summaryrefslogtreecommitdiff
path: root/storage/xtradb
diff options
context:
space:
mode:
Diffstat (limited to 'storage/xtradb')
-rw-r--r--storage/xtradb/CMakeLists.txt63
-rw-r--r--storage/xtradb/api/api0api.cc23
-rw-r--r--storage/xtradb/btr/btr0cur.cc64
-rw-r--r--storage/xtradb/btr/btr0sea.cc6
-rw-r--r--storage/xtradb/buf/buf0buf.cc50
-rw-r--r--storage/xtradb/buf/buf0flu.cc13
-rw-r--r--storage/xtradb/buf/buf0lru.cc51
-rw-r--r--storage/xtradb/dict/dict0crea.cc44
-rw-r--r--storage/xtradb/dict/dict0dict.cc477
-rw-r--r--storage/xtradb/dict/dict0mem.cc45
-rw-r--r--storage/xtradb/dict/dict0stats.cc611
-rw-r--r--storage/xtradb/fil/fil0fil.cc69
-rw-r--r--storage/xtradb/fts/fts0ast.cc136
-rw-r--r--storage/xtradb/fts/fts0blex.cc8
-rw-r--r--storage/xtradb/fts/fts0blex.l8
-rw-r--r--storage/xtradb/fts/fts0fts.cc111
-rw-r--r--storage/xtradb/fts/fts0opt.cc2
-rw-r--r--storage/xtradb/fts/fts0pars.cc51
-rw-r--r--storage/xtradb/fts/fts0pars.y22
-rw-r--r--storage/xtradb/fts/fts0que.cc57
-rw-r--r--storage/xtradb/fts/fts0tlex.cc6
-rw-r--r--storage/xtradb/fts/fts0tlex.l6
-rw-r--r--storage/xtradb/handler/ha_innodb.cc269
-rw-r--r--storage/xtradb/handler/ha_innodb.h1
-rw-r--r--storage/xtradb/handler/handler0alter.cc73
-rw-r--r--storage/xtradb/handler/i_s.cc9
-rw-r--r--storage/xtradb/include/btr0cur.h13
-rw-r--r--storage/xtradb/include/buf0buf.h11
-rw-r--r--storage/xtradb/include/buf0buf.ic11
-rw-r--r--storage/xtradb/include/dict0crea.h34
-rw-r--r--storage/xtradb/include/dict0dict.h27
-rw-r--r--storage/xtradb/include/dict0mem.h147
-rw-r--r--storage/xtradb/include/fil0fil.h12
-rw-r--r--storage/xtradb/include/fts0ast.h69
-rw-r--r--storage/xtradb/include/fts0fts.h1
-rw-r--r--storage/xtradb/include/fts0pars.h6
-rw-r--r--storage/xtradb/include/lock0lock.h2
-rw-r--r--storage/xtradb/include/log0log.h34
-rw-r--r--storage/xtradb/include/log0log.ic56
-rw-r--r--storage/xtradb/include/os0file.h6
-rw-r--r--storage/xtradb/include/os0once.h125
-rw-r--r--storage/xtradb/include/os0sync.h79
-rw-r--r--storage/xtradb/include/read0read.h21
-rw-r--r--storage/xtradb/include/srv0srv.h15
-rw-r--r--storage/xtradb/include/sync0rw.h12
-rw-r--r--storage/xtradb/include/sync0rw.ic17
-rw-r--r--storage/xtradb/include/sync0sync.h2
-rw-r--r--storage/xtradb/include/sync0sync.ic12
-rw-r--r--storage/xtradb/include/trx0trx.h16
-rw-r--r--storage/xtradb/include/univ.i12
-rw-r--r--storage/xtradb/lock/lock0lock.cc171
-rw-r--r--storage/xtradb/log/log0log.cc71
-rw-r--r--storage/xtradb/log/log0online.cc35
-rw-r--r--storage/xtradb/log/log0recv.cc20
-rw-r--r--storage/xtradb/mysql-test/storage_engine/alter_tablespace.opt2
-rw-r--r--storage/xtradb/mysql-test/storage_engine/autoinc_secondary.rdiff30
-rw-r--r--storage/xtradb/mysql-test/storage_engine/cache_index.rdiff71
-rw-r--r--storage/xtradb/mysql-test/storage_engine/checksum_table_live.rdiff13
-rw-r--r--storage/xtradb/mysql-test/storage_engine/col_opt_not_null.opt1
-rw-r--r--storage/xtradb/mysql-test/storage_engine/col_opt_null.opt1
-rw-r--r--storage/xtradb/mysql-test/storage_engine/define_engine.inc49
-rw-r--r--storage/xtradb/mysql-test/storage_engine/disabled.def8
-rw-r--r--storage/xtradb/mysql-test/storage_engine/fulltext_search.rdiff49
-rw-r--r--storage/xtradb/mysql-test/storage_engine/index_enable_disable.rdiff33
-rw-r--r--storage/xtradb/mysql-test/storage_engine/index_type_hash.rdiff60
-rw-r--r--storage/xtradb/mysql-test/storage_engine/insert_delayed.rdiff26
-rw-r--r--storage/xtradb/mysql-test/storage_engine/lock_concurrent.rdiff22
-rw-r--r--storage/xtradb/mysql-test/storage_engine/optimize_table.rdiff37
-rw-r--r--storage/xtradb/mysql-test/storage_engine/parts/checksum_table.rdiff13
-rw-r--r--storage/xtradb/mysql-test/storage_engine/parts/create_table.rdiff20
-rw-r--r--storage/xtradb/mysql-test/storage_engine/parts/disabled.def1
-rw-r--r--storage/xtradb/mysql-test/storage_engine/parts/optimize_table.rdiff58
-rw-r--r--storage/xtradb/mysql-test/storage_engine/parts/repair_table.rdiff158
-rw-r--r--storage/xtradb/mysql-test/storage_engine/parts/suite.opt2
-rw-r--r--storage/xtradb/mysql-test/storage_engine/repair_table.rdiff138
-rw-r--r--storage/xtradb/mysql-test/storage_engine/suite.opt1
-rw-r--r--storage/xtradb/mysql-test/storage_engine/tbl_opt_data_index_dir.rdiff23
-rw-r--r--storage/xtradb/mysql-test/storage_engine/tbl_opt_insert_method.rdiff11
-rw-r--r--storage/xtradb/mysql-test/storage_engine/tbl_opt_key_block_size.opt3
-rw-r--r--storage/xtradb/mysql-test/storage_engine/tbl_opt_row_format.opt3
-rw-r--r--storage/xtradb/mysql-test/storage_engine/tbl_opt_row_format.rdiff10
-rw-r--r--storage/xtradb/mysql-test/storage_engine/tbl_opt_union.rdiff16
-rw-r--r--storage/xtradb/mysql-test/storage_engine/trx/cons_snapshot_serializable.rdiff18
-rw-r--r--storage/xtradb/mysql-test/storage_engine/trx/level_read_committed.rdiff11
-rw-r--r--storage/xtradb/mysql-test/storage_engine/trx/level_read_uncommitted.rdiff11
-rw-r--r--storage/xtradb/mysql-test/storage_engine/trx/suite.opt3
-rw-r--r--storage/xtradb/mysql-test/storage_engine/type_blob.opt1
-rw-r--r--storage/xtradb/mysql-test/storage_engine/type_char_indexes.rdiff11
-rw-r--r--storage/xtradb/mysql-test/storage_engine/type_float_indexes.rdiff11
-rw-r--r--storage/xtradb/mysql-test/storage_engine/type_spatial_indexes.rdiff712
-rw-r--r--storage/xtradb/mysql-test/storage_engine/type_text.opt1
-rw-r--r--storage/xtradb/os/os0file.cc65
-rw-r--r--storage/xtradb/os/os0stacktrace.cc6
-rw-r--r--storage/xtradb/page/page0zip.cc41
-rw-r--r--storage/xtradb/read/read0read.cc4
-rw-r--r--storage/xtradb/row/row0ins.cc42
-rw-r--r--storage/xtradb/row/row0merge.cc2
-rw-r--r--storage/xtradb/row/row0mysql.cc158
-rw-r--r--storage/xtradb/row/row0sel.cc35
-rw-r--r--storage/xtradb/row/row0upd.cc54
-rw-r--r--storage/xtradb/srv/srv0mon.cc4
-rw-r--r--storage/xtradb/srv/srv0srv.cc30
-rw-r--r--storage/xtradb/srv/srv0start.cc45
-rw-r--r--storage/xtradb/sync/sync0arr.cc227
-rw-r--r--storage/xtradb/sync/sync0rw.cc78
-rw-r--r--storage/xtradb/sync/sync0sync.cc28
-rw-r--r--storage/xtradb/trx/trx0i_s.cc2
-rw-r--r--storage/xtradb/trx/trx0sys.cc22
-rw-r--r--storage/xtradb/trx/trx0trx.cc71
109 files changed, 4605 insertions, 1259 deletions
diff --git a/storage/xtradb/CMakeLists.txt b/storage/xtradb/CMakeLists.txt
index e34add61886..093f8f64e20 100644
--- a/storage/xtradb/CMakeLists.txt
+++ b/storage/xtradb/CMakeLists.txt
@@ -88,7 +88,6 @@ IF(NOT CMAKE_CROSSCOMPILING)
long x;
long y;
long res;
- char c;
x = 10;
y = 123;
@@ -109,6 +108,16 @@ IF(NOT CMAKE_CROSSCOMPILING)
if (res != 123 + 10 || x != 123 + 10) {
return(1);
}
+ return(0);
+ }"
+ HAVE_IB_GCC_ATOMIC_BUILTINS
+ )
+ CHECK_C_SOURCE_RUNS(
+ "
+ int main()
+ {
+ long res;
+ char c;
c = 10;
res = __sync_lock_test_and_set(&c, 123);
@@ -117,7 +126,7 @@ IF(NOT CMAKE_CROSSCOMPILING)
}
return(0);
}"
- HAVE_IB_GCC_ATOMIC_BUILTINS
+ HAVE_IB_GCC_ATOMIC_BUILTINS_BYTE
)
CHECK_C_SOURCE_RUNS(
"#include<stdint.h>
@@ -139,6 +148,25 @@ IF(NOT CMAKE_CROSSCOMPILING)
}"
HAVE_IB_GCC_ATOMIC_BUILTINS_64
)
+ CHECK_C_SOURCE_RUNS(
+ "#include<stdint.h>
+ int main()
+ {
+ __sync_synchronize();
+ return(0);
+ }"
+ HAVE_IB_GCC_SYNC_SYNCHRONISE
+ )
+ CHECK_C_SOURCE_RUNS(
+ "#include<stdint.h>
+ int main()
+ {
+ __atomic_thread_fence(__ATOMIC_ACQUIRE);
+ __atomic_thread_fence(__ATOMIC_RELEASE);
+ return(0);
+ }"
+ HAVE_IB_GCC_ATOMIC_THREAD_FENCE
+ )
ENDIF()
IF(HAVE_IB_GCC_ATOMIC_BUILTINS)
@@ -146,10 +174,22 @@ IF(HAVE_IB_GCC_ATOMIC_BUILTINS)
SET(XTRADB_OK 1)
ENDIF()
+IF(HAVE_IB_GCC_ATOMIC_BUILTINS_BYTE)
+ ADD_DEFINITIONS(-DHAVE_IB_GCC_ATOMIC_BUILTINS_BYTE=1)
+ENDIF()
+
IF(HAVE_IB_GCC_ATOMIC_BUILTINS_64)
ADD_DEFINITIONS(-DHAVE_IB_GCC_ATOMIC_BUILTINS_64=1)
ENDIF()
+IF(HAVE_IB_GCC_SYNC_SYNCHRONISE)
+ ADD_DEFINITIONS(-DHAVE_IB_GCC_SYNC_SYNCHRONISE=1)
+ENDIF()
+
+IF(HAVE_IB_GCC_ATOMIC_THREAD_FENCE)
+ ADD_DEFINITIONS(-DHAVE_IB_GCC_ATOMIC_THREAD_FENCE=1)
+ENDIF()
+
# either define HAVE_IB_ATOMIC_PTHREAD_T_GCC or not
IF(NOT CMAKE_CROSSCOMPILING)
CHECK_C_SOURCE_RUNS(
@@ -232,10 +272,21 @@ IF(CMAKE_SYSTEM_NAME STREQUAL "SunOS")
return(0);
}
" HAVE_IB_ATOMIC_PTHREAD_T_SOLARIS)
+ CHECK_C_SOURCE_COMPILES(
+ "#include <mbarrier.h>
+ int main() {
+ __machine_r_barrier();
+ __machine_w_barrier();
+ return(0);
+ }"
+ HAVE_IB_MACHINE_BARRIER_SOLARIS)
ENDIF()
IF(HAVE_IB_ATOMIC_PTHREAD_T_SOLARIS)
ADD_DEFINITIONS(-DHAVE_IB_ATOMIC_PTHREAD_T_SOLARIS=1)
ENDIF()
+ IF(HAVE_IB_MACHINE_BARRIER_SOLARIS)
+ ADD_DEFINITIONS(-DHAVE_IB_MACHINE_BARRIER_SOLARIS=1)
+ ENDIF()
ENDIF()
@@ -253,14 +304,8 @@ ENDIF()
IF(MSVC)
ADD_DEFINITIONS(-DHAVE_WINDOWS_ATOMICS)
+ ADD_DEFINITIONS(-DHAVE_WINDOWS_MM_FENCE)
SET(XTRADB_OK 1)
-
- # Avoid "unreferenced label" warning in generated file
- GET_FILENAME_COMPONENT(_SRC_DIR ${CMAKE_CURRENT_LIST_FILE} PATH)
- SET_SOURCE_FILES_PROPERTIES(${_SRC_DIR}/pars/pars0grm.c
- PROPERTIES COMPILE_FLAGS "/wd4102")
- SET_SOURCE_FILES_PROPERTIES(${_SRC_DIR}/pars/lexyy.c
- PROPERTIES COMPILE_FLAGS "/wd4003")
ENDIF()
diff --git a/storage/xtradb/api/api0api.cc b/storage/xtradb/api/api0api.cc
index bb65dd82216..a060cbc7270 100644
--- a/storage/xtradb/api/api0api.cc
+++ b/storage/xtradb/api/api0api.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2008, 2013, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2008, 2014, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -2044,6 +2044,8 @@ ib_cursor_delete_row(
const rec_t* rec;
ib_bool_t page_format;
mtr_t mtr;
+ rec_t* copy = NULL;
+ byte ptr[UNIV_PAGE_SIZE_MAX];
page_format = static_cast<ib_bool_t>(
dict_table_is_comp(index->table));
@@ -2052,16 +2054,27 @@ ib_cursor_delete_row(
if (btr_pcur_restore_position(
BTR_SEARCH_LEAF, pcur, &mtr)) {
+ mem_heap_t* heap = NULL;
+ ulint offsets_[REC_OFFS_NORMAL_SIZE];
+ ulint* offsets = offsets_;
+
+ rec_offs_init(offsets_);
rec = btr_pcur_get_rec(pcur);
- } else {
- rec = NULL;
+
+ /* Since mtr will be commited, the rec
+ will not be protected. Make a copy of
+ the rec. */
+ offsets = rec_get_offsets(
+ rec, index, offsets, ULINT_UNDEFINED, &heap);
+ ut_ad(rec_offs_size(offsets) < UNIV_PAGE_SIZE_MAX);
+ copy = rec_copy(ptr, rec, offsets);
}
mtr_commit(&mtr);
- if (rec && !rec_get_deleted_flag(rec, page_format)) {
- err = ib_delete_row(cursor, pcur, rec);
+ if (copy && !rec_get_deleted_flag(copy, page_format)) {
+ err = ib_delete_row(cursor, pcur, copy);
} else {
err = DB_RECORD_NOT_FOUND;
}
diff --git a/storage/xtradb/btr/btr0cur.cc b/storage/xtradb/btr/btr0cur.cc
index 5e0473daa85..5d8b5c04d68 100644
--- a/storage/xtradb/btr/btr0cur.cc
+++ b/storage/xtradb/btr/btr0cur.cc
@@ -202,15 +202,6 @@ btr_rec_free_externally_stored_fields(
mtr_t* mtr); /*!< in: mini-transaction handle which contains
an X-latch to record page and to the index
tree */
-/***********************************************************//**
-Gets the externally stored size of a record, in units of a database page.
-@return externally stored part, in units of a database page */
-static
-ulint
-btr_rec_get_externally_stored_len(
-/*==============================*/
- const rec_t* rec, /*!< in: record */
- const ulint* offsets);/*!< in: array returned by rec_get_offsets() */
#endif /* !UNIV_HOTBACKUP */
/******************************************************//**
@@ -2743,6 +2734,31 @@ make_external:
goto return_after_reservations;
}
+ if (big_rec_vec) {
+ const ulint redo_10p = srv_log_file_size * UNIV_PAGE_SIZE / 10;
+ ulint total_blob_len = 0;
+
+ /* Calculate the total number of bytes for blob data */
+ for (ulint i = 0; i < big_rec_vec->n_fields; i++) {
+ total_blob_len += big_rec_vec->fields[i].len;
+ }
+
+ if (total_blob_len > redo_10p) {
+ ib_logf(IB_LOG_LEVEL_ERROR, "The total blob data"
+ " length (" ULINTPF ") is greater than"
+ " 10%% of the redo log file size (" UINT64PF
+ "). Please increase innodb_log_file_size.",
+ total_blob_len, srv_log_file_size);
+ if (n_reserved > 0) {
+ fil_space_release_free_extents(
+ index->space, n_reserved);
+ }
+
+ err = DB_TOO_BIG_RECORD;
+ goto err_exit;
+ }
+ }
+
/* Store state of explicit locks on rec on the page infimum record,
before deleting rec. The page infimum acts as a dummy carrier of the
locks, taking care also of lock releases, before we can move the locks
@@ -4238,15 +4254,15 @@ btr_rec_get_field_ref_offs(
#define btr_rec_get_field_ref(rec, offsets, n) \
((rec) + btr_rec_get_field_ref_offs(offsets, n))
-/***********************************************************//**
-Gets the externally stored size of a record, in units of a database page.
+/** Gets the externally stored size of a record, in units of a database page.
+@param[in] rec record
+@param[in] offsets array returned by rec_get_offsets()
@return externally stored part, in units of a database page */
-static
+
ulint
btr_rec_get_externally_stored_len(
-/*==============================*/
- const rec_t* rec, /*!< in: record */
- const ulint* offsets)/*!< in: array returned by rec_get_offsets() */
+ const rec_t* rec,
+ const ulint* offsets)
{
ulint n_fields;
ulint total_extern_len = 0;
@@ -4593,6 +4609,7 @@ btr_store_big_rec_extern_fields(
buf_block_t** freed_pages = NULL;
ulint n_freed_pages = 0;
dberr_t error = DB_SUCCESS;
+ ulint total_blob_len = 0;
ut_ad(rec_offs_validate(rec, index, offsets));
ut_ad(rec_offs_any_extern(offsets));
@@ -4612,6 +4629,23 @@ btr_store_big_rec_extern_fields(
rec_page_no = buf_block_get_page_no(rec_block);
ut_a(fil_page_get_type(page_align(rec)) == FIL_PAGE_INDEX);
+ const ulint redo_10p = (srv_log_file_size * UNIV_PAGE_SIZE / 10);
+
+ /* Calculate the total number of bytes for blob data */
+ for (ulint i = 0; i < big_rec_vec->n_fields; i++) {
+ total_blob_len += big_rec_vec->fields[i].len;
+ }
+
+ if (total_blob_len > redo_10p) {
+ ut_ad(op == BTR_STORE_INSERT);
+ ib_logf(IB_LOG_LEVEL_ERROR, "The total blob data length"
+ " (" ULINTPF ") is greater than 10%% of the"
+ " redo log file size (" UINT64PF "). Please"
+ " increase innodb_log_file_size.",
+ total_blob_len, srv_log_file_size);
+ return(DB_TOO_BIG_RECORD);
+ }
+
if (page_zip) {
int err;
diff --git a/storage/xtradb/btr/btr0sea.cc b/storage/xtradb/btr/btr0sea.cc
index eec3c0b29aa..ac5e9aec67b 100644
--- a/storage/xtradb/btr/btr0sea.cc
+++ b/storage/xtradb/btr/btr0sea.cc
@@ -1944,7 +1944,10 @@ btr_search_validate_one_table(
buf_pool_t* buf_pool;
index_id_t page_index_id;
- buf_pool = buf_pool_from_bpage((buf_page_t*) block);
+ buf_pool = buf_pool_from_bpage((buf_page_t *) block);
+ /* Prevent BUF_BLOCK_FILE_PAGE -> BUF_BLOCK_REMOVE_HASH
+ transition until we lock the block mutex */
+ mutex_enter(&buf_pool->LRU_list_mutex);
if (UNIV_LIKELY(buf_block_get_state(block)
== BUF_BLOCK_FILE_PAGE)) {
@@ -1980,6 +1983,7 @@ btr_search_validate_one_table(
}
mutex_enter(&block->mutex);
+ mutex_exit(&buf_pool->LRU_list_mutex);
ut_a(!dict_index_is_ibuf(block->index));
diff --git a/storage/xtradb/buf/buf0buf.cc b/storage/xtradb/buf/buf0buf.cc
index 359b15f4a6b..b27178fa8c8 100644
--- a/storage/xtradb/buf/buf0buf.cc
+++ b/storage/xtradb/buf/buf0buf.cc
@@ -380,6 +380,48 @@ buf_pool_get_oldest_modification(void)
}
/********************************************************************//**
+Gets the smallest oldest_modification lsn for any page in the pool. Returns
+zero if all modified pages have been flushed to disk.
+@return oldest modification in pool, zero if none */
+UNIV_INTERN
+lsn_t
+buf_pool_get_oldest_modification_peek(void)
+/*=======================================*/
+{
+ ulint i;
+ buf_page_t* bpage;
+ lsn_t lsn = 0;
+ lsn_t oldest_lsn = 0;
+
+ /* Dirsty read to buffer pool array */
+ for (i = 0; i < srv_buf_pool_instances; i++) {
+ buf_pool_t* buf_pool;
+
+ buf_pool = buf_pool_from_array(i);
+
+ buf_flush_list_mutex_enter(buf_pool);
+
+ bpage = UT_LIST_GET_LAST(buf_pool->flush_list);
+
+ if (bpage != NULL) {
+ ut_ad(bpage->in_flush_list);
+ lsn = bpage->oldest_modification;
+ }
+
+ buf_flush_list_mutex_exit(buf_pool);
+
+ if (!oldest_lsn || oldest_lsn > lsn) {
+ oldest_lsn = lsn;
+ }
+ }
+
+ /* The returned answer may be out of date: the flush_list can
+ change after the mutex has been released. */
+
+ return(oldest_lsn);
+}
+
+/********************************************************************//**
Get total buffer pool statistics. */
UNIV_INTERN
void
@@ -2996,12 +3038,6 @@ got_block:
ut_ad(buf_block_get_state(fix_block) == BUF_BLOCK_FILE_PAGE);
-#if UNIV_WORD_SIZE == 4
- /* On 32-bit systems, there is no padding in buf_page_t. On
- other systems, Valgrind could complain about uninitialized pad
- bytes. */
- UNIV_MEM_ASSERT_RW(&fix_block->page, sizeof(fix_block->page));
-#endif
#if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG
if ((mode == BUF_GET_IF_IN_POOL || mode == BUF_GET_IF_IN_POOL_OR_WATCH)
@@ -5635,7 +5671,7 @@ buf_get_free_list_len(void)
#else /* !UNIV_HOTBACKUP */
/********************************************************************//**
-Inits a page to the buffer buf_pool, for use in ibbackup --restore. */
+Inits a page to the buffer buf_pool, for use in mysqlbackup --restore. */
UNIV_INTERN
void
buf_page_init_for_backup_restore(
diff --git a/storage/xtradb/buf/buf0flu.cc b/storage/xtradb/buf/buf0flu.cc
index 57db9fd367c..ac614823bc1 100644
--- a/storage/xtradb/buf/buf0flu.cc
+++ b/storage/xtradb/buf/buf0flu.cc
@@ -542,7 +542,7 @@ buf_flush_ready_for_flush(
ut_ad(flush_type < BUF_FLUSH_N_TYPES);
ut_ad(mutex_own(buf_page_get_mutex(bpage))
|| flush_type == BUF_FLUSH_LIST);
- ut_a(buf_page_in_file(bpage));
+ ut_a(buf_page_in_file(bpage) || buf_page_get_state(bpage) == BUF_BLOCK_REMOVE_HASH);
if (bpage->oldest_modification == 0
|| buf_page_get_io_fix_unlocked(bpage) != BUF_IO_NONE) {
@@ -553,6 +553,7 @@ buf_flush_ready_for_flush(
switch (flush_type) {
case BUF_FLUSH_LIST:
+ return(buf_page_get_state(bpage) != BUF_BLOCK_REMOVE_HASH);
case BUF_FLUSH_LRU:
case BUF_FLUSH_SINGLE_PAGE:
return(true);
@@ -1377,7 +1378,8 @@ buf_flush_page_and_try_neighbors(
}
ut_a(buf_page_in_file(bpage)
- || buf_page_get_state(bpage) == BUF_BLOCK_REMOVE_HASH);
+ || (buf_page_get_state(bpage) == BUF_BLOCK_REMOVE_HASH
+ ));
if (buf_flush_ready_for_flush(bpage, flush_type)) {
buf_pool_t* buf_pool;
@@ -1663,7 +1665,7 @@ buf_do_LRU_batch(
{
if (buf_LRU_evict_from_unzip_LRU(buf_pool)) {
n->unzip_LRU_evicted
- += buf_free_from_unzip_LRU_list_batch(buf_pool, max);
+ = buf_free_from_unzip_LRU_list_batch(buf_pool, max);
} else {
n->unzip_LRU_evicted = 0;
}
@@ -1981,6 +1983,7 @@ buf_flush_LRU(
if (!buf_flush_start(buf_pool, BUF_FLUSH_LRU)) {
n->flushed = 0;
n->evicted = 0;
+ n->unzip_LRU_evicted = 0;
return(false);
}
@@ -2407,6 +2410,10 @@ af_get_pct_for_dirty()
{
ulint dirty_pct = buf_get_modified_ratio_pct();
+ if (dirty_pct > 0 && srv_max_buf_pool_modified_pct == 0) {
+ return(100);
+ }
+
ut_a(srv_max_dirty_pages_pct_lwm
<= srv_max_buf_pool_modified_pct);
diff --git a/storage/xtradb/buf/buf0lru.cc b/storage/xtradb/buf/buf0lru.cc
index d0904f4b8ad..3b0319e4e79 100644
--- a/storage/xtradb/buf/buf0lru.cc
+++ b/storage/xtradb/buf/buf0lru.cc
@@ -595,6 +595,8 @@ buf_flush_or_remove_pages(
buf_page_t* bpage;
ulint processed = 0;
+ ut_ad(mutex_own(&buf_pool->LRU_list_mutex));
+
buf_flush_list_mutex_enter(buf_pool);
rescan:
@@ -1971,13 +1973,6 @@ buf_LRU_free_page(
ut_ad(buf_page_in_file(bpage));
ut_ad(bpage->in_LRU_list);
-#if UNIV_WORD_SIZE == 4
- /* On 32-bit systems, there is no padding in buf_page_t. On
- other systems, Valgrind could complain about uninitialized pad
- bytes. */
- UNIV_MEM_ASSERT_RW(bpage, sizeof *bpage);
-#endif
-
if (!buf_page_can_relocate(bpage)) {
/* Do not free buffer fixed or I/O-fixed blocks. */
@@ -2010,12 +2005,6 @@ buf_LRU_free_page(
ut_ad(buf_page_in_file(bpage));
ut_ad(bpage->in_LRU_list);
ut_ad(!bpage->in_flush_list == !bpage->oldest_modification);
-#if UNIV_WORD_SIZE == 4
- /* On 32-bit systems, there is no padding in buf_page_t. On
- other systems, Valgrind could complain about uninitialized pad
- bytes. */
- UNIV_MEM_ASSERT_RW(bpage, sizeof *bpage);
-#endif
#ifdef UNIV_DEBUG
if (buf_debug_prints) {
@@ -2121,13 +2110,6 @@ not_freed:
ut_ad(prev_b->in_LRU_list);
ut_ad(buf_page_in_file(prev_b));
-#if UNIV_WORD_SIZE == 4
- /* On 32-bit systems, there is no
- padding in buf_page_t. On other
- systems, Valgrind could complain about
- uninitialized pad bytes. */
- UNIV_MEM_ASSERT_RW(prev_b, sizeof *prev_b);
-#endif
UT_LIST_INSERT_AFTER(LRU, buf_pool->LRU,
prev_b, b);
@@ -2338,13 +2320,6 @@ buf_LRU_block_remove_hashed(
ut_a(buf_page_get_io_fix(bpage) == BUF_IO_NONE);
ut_a(bpage->buf_fix_count == 0);
-#if UNIV_WORD_SIZE == 4
- /* On 32-bit systems, there is no padding in
- buf_page_t. On other systems, Valgrind could complain
- about uninitialized pad bytes. */
- UNIV_MEM_ASSERT_RW(bpage, sizeof *bpage);
-#endif
-
buf_LRU_remove_block(bpage);
buf_pool->freed_page_clock += 1;
@@ -2429,6 +2404,25 @@ buf_LRU_block_remove_hashed(
" in the hash table\n",
(ulong) bpage->space,
(ulong) bpage->offset);
+
+#ifdef UNIV_DEBUG
+ fprintf(stderr,
+ "InnoDB: in_page_hash %lu in_zip_hash %lu\n"
+ " in_free_list %lu in_flush_list %lu in_LRU_list %lu\n"
+ " zip.data %p zip_size %lu page_state %d\n",
+ bpage->in_page_hash, bpage->in_zip_hash,
+ bpage->in_free_list, bpage->in_flush_list,
+ bpage->in_LRU_list, bpage->zip.data,
+ buf_page_get_zip_size(bpage),
+ buf_page_get_state(bpage));
+#else
+ fprintf(stderr,
+ "InnoDB: zip.data %p zip_size %lu page_state %d\n",
+ bpage->zip.data,
+ buf_page_get_zip_size(bpage),
+ buf_page_get_state(bpage));
+#endif
+
if (hashed_bpage) {
fprintf(stderr,
"InnoDB: In hash table we find block"
@@ -2439,6 +2433,9 @@ buf_LRU_block_remove_hashed(
(const void*) bpage);
}
+ ut_a(buf_page_get_io_fix(bpage) == BUF_IO_NONE);
+ ut_a(bpage->buf_fix_count == 0);
+
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
mutex_exit(buf_page_get_mutex(bpage));
rw_lock_x_unlock(hash_lock);
diff --git a/storage/xtradb/dict/dict0crea.cc b/storage/xtradb/dict/dict0crea.cc
index ff892749d4f..30523ff2af4 100644
--- a/storage/xtradb/dict/dict0crea.cc
+++ b/storage/xtradb/dict/dict0crea.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2013, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2014, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -1611,26 +1611,25 @@ dict_create_add_foreign_to_dictionary(
return(error);
}
-/********************************************************************//**
-Adds foreign key definitions to data dictionary tables in the database.
-@return error code or DB_SUCCESS */
+/** Adds the given set of foreign key objects to the dictionary tables
+in the database. This function does not modify the dictionary cache. The
+caller must ensure that all foreign key objects contain a valid constraint
+name in foreign->id.
+@param[in] local_fk_set set of foreign key objects, to be added to
+the dictionary tables
+@param[in] table table to which the foreign key objects in
+local_fk_set belong to
+@param[in,out] trx transaction
+@return error code or DB_SUCCESS */
UNIV_INTERN
dberr_t
dict_create_add_foreigns_to_dictionary(
/*===================================*/
- ulint start_id,/*!< in: if we are actually doing ALTER TABLE
- ADD CONSTRAINT, we want to generate constraint
- numbers which are bigger than in the table so
- far; we number the constraints from
- start_id + 1 up; start_id should be set to 0 if
- we are creating a new table, or if the table
- so far has no constraints for which the name
- was generated here */
- dict_table_t* table, /*!< in: table */
- trx_t* trx) /*!< in: transaction */
+ const dict_foreign_set& local_fk_set,
+ const dict_table_t* table,
+ trx_t* trx)
{
dict_foreign_t* foreign;
- ulint number = start_id + 1;
dberr_t error;
ut_ad(mutex_own(&(dict_sys->mutex)));
@@ -1643,17 +1642,12 @@ dict_create_add_foreigns_to_dictionary(
return(DB_ERROR);
}
- for (foreign = UT_LIST_GET_FIRST(table->foreign_list);
- foreign;
- foreign = UT_LIST_GET_NEXT(foreign_list, foreign)) {
+ for (dict_foreign_set::const_iterator it = local_fk_set.begin();
+ it != local_fk_set.end();
+ ++it) {
- error = dict_create_add_foreign_id(&number, table->name,
- foreign);
-
- if (error != DB_SUCCESS) {
-
- return(error);
- }
+ foreign = *it;
+ ut_ad(foreign->id != NULL);
error = dict_create_add_foreign_to_dictionary(table->name,
foreign, trx);
diff --git a/storage/xtradb/dict/dict0dict.cc b/storage/xtradb/dict/dict0dict.cc
index 931f14af312..515a40dff14 100644
--- a/storage/xtradb/dict/dict0dict.cc
+++ b/storage/xtradb/dict/dict0dict.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2013, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2014, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2012, Facebook Inc.
Copyright (c) 2013, SkySQL Ab. All Rights Reserved.
@@ -28,6 +28,7 @@ Created 1/8/1996 Heikki Tuuri
#include "dict0dict.h"
#include "fts0fts.h"
#include "fil0fil.h"
+#include <algorithm>
#ifdef UNIV_NONINL
#include "dict0dict.ic"
@@ -51,6 +52,7 @@ UNIV_INTERN dict_index_t* dict_ind_compact;
#include "btr0btr.h"
#include "btr0cur.h"
#include "btr0sea.h"
+#include "os0once.h"
#include "page0zip.h"
#include "page0page.h"
#include "pars0pars.h"
@@ -103,7 +105,7 @@ UNIV_INTERN ulong zip_pad_max = 50;
UNIV_INTERN mysql_pfs_key_t dict_operation_lock_key;
UNIV_INTERN mysql_pfs_key_t index_tree_rw_lock_key;
UNIV_INTERN mysql_pfs_key_t index_online_log_key;
-UNIV_INTERN mysql_pfs_key_t dict_table_stats_latch_key;
+UNIV_INTERN mysql_pfs_key_t dict_table_stats_key;
#endif /* UNIV_PFS_RWLOCK */
#ifdef UNIV_PFS_MUTEX
@@ -122,6 +124,11 @@ UNIV_INTERN mysql_pfs_key_t dict_foreign_err_mutex_key;
/** Identifies generated InnoDB foreign key names */
static char dict_ibfk[] = "_ibfk_";
+bool innodb_table_stats_not_found = false;
+bool innodb_index_stats_not_found = false;
+static bool innodb_table_stats_not_found_reported = false;
+static bool innodb_index_stats_not_found_reported = false;
+
/*******************************************************************//**
Tries to find column names for the index and sets the col field of the
index.
@@ -320,6 +327,82 @@ dict_mutex_exit_for_mysql(void)
mutex_exit(&(dict_sys->mutex));
}
+/** Allocate and init a dict_table_t's stats latch.
+This function must not be called concurrently on the same table object.
+@param[in,out] table_void table whose stats latch to create */
+static
+void
+dict_table_stats_latch_alloc(
+ void* table_void)
+{
+ dict_table_t* table = static_cast<dict_table_t*>(table_void);
+
+ table->stats_latch = new(std::nothrow) rw_lock_t;
+
+ ut_a(table->stats_latch != NULL);
+
+ rw_lock_create(dict_table_stats_key, table->stats_latch,
+ SYNC_INDEX_TREE);
+}
+
+/** Deinit and free a dict_table_t's stats latch.
+This function must not be called concurrently on the same table object.
+@param[in,out] table table whose stats latch to free */
+static
+void
+dict_table_stats_latch_free(
+ dict_table_t* table)
+{
+ rw_lock_free(table->stats_latch);
+ delete table->stats_latch;
+}
+
+/** Create a dict_table_t's stats latch or delay for lazy creation.
+This function is only called from either single threaded environment
+or from a thread that has not shared the table object with other threads.
+@param[in,out] table table whose stats latch to create
+@param[in] enabled if false then the latch is disabled
+and dict_table_stats_lock()/unlock() become noop on this table. */
+
+void
+dict_table_stats_latch_create(
+ dict_table_t* table,
+ bool enabled)
+{
+ if (!enabled) {
+ table->stats_latch = NULL;
+ table->stats_latch_created = os_once::DONE;
+ return;
+ }
+
+#ifdef HAVE_ATOMIC_BUILTINS
+ /* We create this lazily the first time it is used. */
+ table->stats_latch = NULL;
+ table->stats_latch_created = os_once::NEVER_DONE;
+#else /* HAVE_ATOMIC_BUILTINS */
+
+ dict_table_stats_latch_alloc(table);
+
+ table->stats_latch_created = os_once::DONE;
+#endif /* HAVE_ATOMIC_BUILTINS */
+}
+
+/** Destroy a dict_table_t's stats latch.
+This function is only called from either single threaded environment
+or from a thread that has not shared the table object with other threads.
+@param[in,out] table table whose stats latch to destroy */
+
+void
+dict_table_stats_latch_destroy(
+ dict_table_t* table)
+{
+ if (table->stats_latch_created == os_once::DONE
+ && table->stats_latch != NULL) {
+
+ dict_table_stats_latch_free(table);
+ }
+}
+
/**********************************************************************//**
Lock the appropriate latch to protect a given table's statistics. */
UNIV_INTERN
@@ -332,6 +415,14 @@ dict_table_stats_lock(
ut_ad(table != NULL);
ut_ad(table->magic_n == DICT_TABLE_MAGIC_N);
+#ifdef HAVE_ATOMIC_BUILTINS
+ os_once::do_or_wait_for_done(
+ &table->stats_latch_created,
+ dict_table_stats_latch_alloc, table);
+#else /* HAVE_ATOMIC_BUILTINS */
+ ut_ad(table->stats_latch_created == os_once::DONE);
+#endif /* HAVE_ATOMIC_BUILTINS */
+
if (table->stats_latch == NULL) {
/* This is a dummy table object that is private in the current
thread and is not shared between multiple threads, thus we
@@ -1163,8 +1254,8 @@ dict_table_can_be_evicted(
#endif /* UNIV_SYNC_DEBUG */
ut_a(table->can_be_evicted);
- ut_a(UT_LIST_GET_LEN(table->foreign_list) == 0);
- ut_a(UT_LIST_GET_LEN(table->referenced_list) == 0);
+ ut_a(table->foreign_set.empty());
+ ut_a(table->referenced_set.empty());
if (table->n_ref_count == 0) {
dict_index_t* index;
@@ -1380,6 +1471,22 @@ dict_index_find_on_id_low(
return(NULL);
}
+/** Function object to remove a foreign key constraint from the
+referenced_set of the referenced table. The foreign key object is
+also removed from the dictionary cache. The foreign key constraint
+is not removed from the foreign_set of the table containing the
+constraint. */
+struct dict_foreign_remove_partial
+{
+ void operator()(dict_foreign_t* foreign) {
+ dict_table_t* table = foreign->referenced_table;
+ if (table != NULL) {
+ table->referenced_set.erase(foreign);
+ }
+ dict_foreign_free(foreign);
+ }
+};
+
/**********************************************************************//**
Renames a table object.
@return TRUE if success */
@@ -1554,27 +1661,25 @@ dict_table_rename_in_cache(
system tables through a call of dict_load_foreigns. */
/* Remove the foreign constraints from the cache */
- foreign = UT_LIST_GET_LAST(table->foreign_list);
-
- while (foreign != NULL) {
- dict_foreign_remove_from_cache(foreign);
- foreign = UT_LIST_GET_LAST(table->foreign_list);
- }
+ std::for_each(table->foreign_set.begin(),
+ table->foreign_set.end(),
+ dict_foreign_remove_partial());
+ table->foreign_set.clear();
/* Reset table field in referencing constraints */
+ for (dict_foreign_set::iterator it
+ = table->referenced_set.begin();
+ it != table->referenced_set.end();
+ ++it) {
- foreign = UT_LIST_GET_FIRST(table->referenced_list);
-
- while (foreign != NULL) {
+ foreign = *it;
foreign->referenced_table = NULL;
foreign->referenced_index = NULL;
- foreign = UT_LIST_GET_NEXT(referenced_list, foreign);
}
- /* Make the list of referencing constraints empty */
-
- UT_LIST_INIT(table->referenced_list);
+ /* Make the set of referencing constraints empty */
+ table->referenced_set.clear();
return(DB_SUCCESS);
}
@@ -1583,9 +1688,19 @@ dict_table_rename_in_cache(
the constraint id of new format >= 4.0.18 constraints. Note that at
this point we have already changed table->name to the new name. */
- foreign = UT_LIST_GET_FIRST(table->foreign_list);
+ dict_foreign_set fk_set;
+
+ for (;;) {
+
+ dict_foreign_set::iterator it
+ = table->foreign_set.begin();
+
+ if (it == table->foreign_set.end()) {
+ break;
+ }
+
+ foreign = *it;
- while (foreign != NULL) {
if (ut_strlen(foreign->foreign_table_name)
< ut_strlen(table->name)) {
/* Allocate a longer name buffer;
@@ -1735,12 +1850,18 @@ dict_table_rename_in_cache(
mem_free(old_id);
}
- foreign = UT_LIST_GET_NEXT(foreign_list, foreign);
+ table->foreign_set.erase(it);
+ fk_set.insert(foreign);
}
- for (foreign = UT_LIST_GET_FIRST(table->referenced_list);
- foreign != NULL;
- foreign = UT_LIST_GET_NEXT(referenced_list, foreign)) {
+ ut_a(table->foreign_set.empty());
+ table->foreign_set.swap(fk_set);
+
+ for (dict_foreign_set::iterator it = table->referenced_set.begin();
+ it != table->referenced_set.end();
+ ++it) {
+
+ foreign = *it;
if (ut_strlen(foreign->referenced_table_name)
< ut_strlen(table->name)) {
@@ -1810,27 +1931,17 @@ dict_table_remove_from_cache_low(
ut_ad(mutex_own(&(dict_sys->mutex)));
ut_ad(table->magic_n == DICT_TABLE_MAGIC_N);
-#if 0
- fputs("Removing table ", stderr);
- ut_print_name(stderr, table->name, ULINT_UNDEFINED);
- fputs(" from dictionary cache\n", stderr);
-#endif
-
/* Remove the foreign constraints from the cache */
-
- for (foreign = UT_LIST_GET_LAST(table->foreign_list);
- foreign != NULL;
- foreign = UT_LIST_GET_LAST(table->foreign_list)) {
-
- dict_foreign_remove_from_cache(foreign);
- }
+ std::for_each(table->foreign_set.begin(), table->foreign_set.end(),
+ dict_foreign_remove_partial());
+ table->foreign_set.clear();
/* Reset table field in referencing constraints */
+ for (dict_foreign_set::iterator it = table->referenced_set.begin();
+ it != table->referenced_set.end();
+ ++it) {
- for (foreign = UT_LIST_GET_FIRST(table->referenced_list);
- foreign != NULL;
- foreign = UT_LIST_GET_NEXT(referenced_list, foreign)) {
-
+ foreign = *it;
foreign->referenced_table = NULL;
foreign->referenced_index = NULL;
}
@@ -3057,7 +3168,7 @@ dict_table_is_referenced_by_foreign_key(
/*====================================*/
const dict_table_t* table) /*!< in: InnoDB table */
{
- return(UT_LIST_GET_LEN(table->referenced_list) > 0);
+ return(!table->referenced_set.empty());
}
/*********************************************************************//**
@@ -3077,9 +3188,11 @@ dict_table_get_referenced_constraint(
ut_ad(index != NULL);
ut_ad(table != NULL);
- for (foreign = UT_LIST_GET_FIRST(table->referenced_list);
- foreign;
- foreign = UT_LIST_GET_NEXT(referenced_list, foreign)) {
+ for (dict_foreign_set::iterator it = table->referenced_set.begin();
+ it != table->referenced_set.end();
+ ++it) {
+
+ foreign = *it;
if (foreign->referenced_index == index) {
@@ -3108,9 +3221,11 @@ dict_table_get_foreign_constraint(
ut_ad(index != NULL);
ut_ad(table != NULL);
- for (foreign = UT_LIST_GET_FIRST(table->foreign_list);
- foreign;
- foreign = UT_LIST_GET_NEXT(foreign_list, foreign)) {
+ for (dict_foreign_set::iterator it = table->foreign_set.begin();
+ it != table->foreign_set.end();
+ ++it) {
+
+ foreign = *it;
if (foreign->foreign_index == index) {
@@ -3121,17 +3236,6 @@ dict_table_get_foreign_constraint(
return(NULL);
}
-/*********************************************************************//**
-Frees a foreign key struct. */
-UNIV_INTERN
-void
-dict_foreign_free(
-/*==============*/
- dict_foreign_t* foreign) /*!< in, own: foreign key struct */
-{
- mem_heap_free(foreign->heap);
-}
-
/**********************************************************************//**
Removes a foreign constraint struct from the dictionary cache. */
UNIV_INTERN
@@ -3143,16 +3247,12 @@ dict_foreign_remove_from_cache(
ut_ad(mutex_own(&(dict_sys->mutex)));
ut_a(foreign);
- if (foreign->referenced_table) {
- UT_LIST_REMOVE(referenced_list,
- foreign->referenced_table->referenced_list,
- foreign);
+ if (foreign->referenced_table != NULL) {
+ foreign->referenced_table->referenced_set.erase(foreign);
}
- if (foreign->foreign_table) {
- UT_LIST_REMOVE(foreign_list,
- foreign->foreign_table->foreign_list,
- foreign);
+ if (foreign->foreign_table != NULL) {
+ foreign->foreign_table->foreign_set.erase(foreign);
}
dict_foreign_free(foreign);
@@ -3166,33 +3266,21 @@ static
dict_foreign_t*
dict_foreign_find(
/*==============*/
- dict_table_t* table, /*!< in: table object */
- const char* id) /*!< in: foreign constraint id */
+ dict_table_t* table, /*!< in: table object */
+ dict_foreign_t* foreign) /*!< in: foreign constraint */
{
- dict_foreign_t* foreign;
-
ut_ad(mutex_own(&(dict_sys->mutex)));
- foreign = UT_LIST_GET_FIRST(table->foreign_list);
-
- while (foreign) {
- if (ut_strcmp(id, foreign->id) == 0) {
-
- return(foreign);
- }
+ dict_foreign_set::iterator it = table->foreign_set.find(foreign);
- foreign = UT_LIST_GET_NEXT(foreign_list, foreign);
+ if (it != table->foreign_set.end()) {
+ return(*it);
}
- foreign = UT_LIST_GET_FIRST(table->referenced_list);
-
- while (foreign) {
- if (ut_strcmp(id, foreign->id) == 0) {
-
- return(foreign);
- }
+ it = table->referenced_set.find(foreign);
- foreign = UT_LIST_GET_NEXT(referenced_list, foreign);
+ if (it != table->referenced_set.end()) {
+ return(*it);
}
return(NULL);
@@ -3352,11 +3440,11 @@ dict_foreign_add_to_cache(
ut_a(for_table || ref_table);
if (for_table) {
- for_in_cache = dict_foreign_find(for_table, foreign->id);
+ for_in_cache = dict_foreign_find(for_table, foreign);
}
if (!for_in_cache && ref_table) {
- for_in_cache = dict_foreign_find(ref_table, foreign->id);
+ for_in_cache = dict_foreign_find(ref_table, foreign);
}
if (for_in_cache) {
@@ -3393,9 +3481,12 @@ dict_foreign_add_to_cache(
for_in_cache->referenced_table = ref_table;
for_in_cache->referenced_index = index;
- UT_LIST_ADD_LAST(referenced_list,
- ref_table->referenced_list,
- for_in_cache);
+
+ std::pair<dict_foreign_set::iterator, bool> ret
+ = ref_table->referenced_set.insert(for_in_cache);
+
+ ut_a(ret.second); /* second is true if the insertion
+ took place */
added_to_referenced_list = TRUE;
}
@@ -3424,10 +3515,13 @@ dict_foreign_add_to_cache(
if (for_in_cache == foreign) {
if (added_to_referenced_list) {
- UT_LIST_REMOVE(
- referenced_list,
- ref_table->referenced_list,
- for_in_cache);
+ const dict_foreign_set::size_type n
+ = ref_table->referenced_set
+ .erase(for_in_cache);
+
+ ut_a(n == 1); /* the number of
+ elements removed must
+ be one */
}
mem_heap_free(foreign->heap);
@@ -3438,9 +3532,11 @@ dict_foreign_add_to_cache(
for_in_cache->foreign_table = for_table;
for_in_cache->foreign_index = index;
- UT_LIST_ADD_LAST(foreign_list,
- for_table->foreign_list,
- for_in_cache);
+ std::pair<dict_foreign_set::iterator, bool> ret
+ = for_table->foreign_set.insert(for_in_cache);
+
+ ut_a(ret.second); /* second is true if the insertion
+ took place */
}
/* We need to move the table to the non-LRU end of the table LRU
@@ -4018,9 +4114,12 @@ dict_table_get_highest_foreign_id(
ut_a(table);
len = ut_strlen(table->name);
- foreign = UT_LIST_GET_FIRST(table->foreign_list);
- while (foreign) {
+ for (dict_foreign_set::iterator it = table->foreign_set.begin();
+ it != table->foreign_set.end();
+ ++it) {
+ foreign = *it;
+
if (ut_strlen(foreign->id) > ((sizeof dict_ibfk) - 1) + len
&& 0 == ut_memcmp(foreign->id, table->name, len)
&& 0 == ut_memcmp(foreign->id + len,
@@ -4039,8 +4138,6 @@ dict_table_get_highest_foreign_id(
}
}
}
-
- foreign = UT_LIST_GET_NEXT(foreign_list, foreign);
}
return(biggest_id);
@@ -4101,6 +4198,7 @@ dict_create_foreign_constraints_low(
dict_table_t* referenced_table;
dict_table_t* table_to_alter;
ulint highest_id_so_far = 0;
+ ulint number = 1;
dict_index_t* index;
dict_foreign_t* foreign;
const char* ptr = sql_string;
@@ -4119,6 +4217,8 @@ dict_create_foreign_constraints_low(
const dict_col_t*columns[500];
const char* column_names[500];
const char* referenced_table_name;
+ dict_foreign_set local_fk_set;
+ dict_foreign_set_free local_fk_set_free(local_fk_set);
ut_ad(!srv_read_only_mode);
ut_ad(mutex_own(&(dict_sys->mutex)));
@@ -4183,6 +4283,7 @@ dict_create_foreign_constraints_low(
table_to_alter);
}
+ number = highest_id_so_far + 1;
/* Scan for foreign key declarations in a loop */
loop:
/* Scan either to "CONSTRAINT" or "FOREIGN", whichever is closer */
@@ -4227,7 +4328,7 @@ loop:
command, determine if there are any foreign keys, and
if so, immediately reject the command if the table is a
temporary one. For now, this kludge will work. */
- if (reject_fks && (UT_LIST_GET_LEN(table->foreign_list) > 0)) {
+ if (reject_fks && !local_fk_set.empty()) {
return(DB_CANNOT_ADD_CONSTRAINT);
}
@@ -4237,7 +4338,17 @@ loop:
to the data dictionary system tables on disk */
error = dict_create_add_foreigns_to_dictionary(
- highest_id_so_far, table, trx);
+ local_fk_set, table, trx);
+
+ if (error == DB_SUCCESS) {
+
+ table->foreign_set.insert(local_fk_set.begin(),
+ local_fk_set.end());
+ std::for_each(local_fk_set.begin(),
+ local_fk_set.end(),
+ dict_foreign_add_to_referenced_table());
+ local_fk_set.clear();
+ }
return(error);
}
@@ -4396,6 +4507,24 @@ col_loop1:
strcpy(foreign->id + db_len + 1, constraint_name);
}
+ if (foreign->id == NULL) {
+ error = dict_create_add_foreign_id(&number,
+ table->name, foreign);
+ if (error != DB_SUCCESS) {
+ dict_foreign_free(foreign);
+ return(error);
+ }
+ }
+
+ std::pair<dict_foreign_set::iterator, bool> ret
+ = local_fk_set.insert(foreign);
+
+ if (!ret.second) {
+ /* A duplicate foreign key name has been found */
+ dict_foreign_free(foreign);
+ return(DB_CANNOT_ADD_CONSTRAINT);
+ }
+
foreign->foreign_table = table;
foreign->foreign_table_name = mem_heap_strdup(
foreign->heap, table->name);
@@ -4421,8 +4550,6 @@ col_loop1:
checking of foreign key constraints! */
if (!success || (!referenced_table && trx->check_foreigns)) {
- dict_foreign_free(foreign);
-
mutex_enter(&dict_foreign_err_mutex);
dict_foreign_error_report_low(ef, name);
fprintf(ef, "%s:\nCannot resolve table name close to:\n"
@@ -4436,7 +4563,6 @@ col_loop1:
ptr = dict_accept(cs, ptr, "(", &success);
if (!success) {
- dict_foreign_free(foreign);
dict_foreign_report_syntax_err(name, start_of_latest_foreign,
ptr);
return(DB_CANNOT_ADD_CONSTRAINT);
@@ -4451,7 +4577,6 @@ col_loop2:
i++;
if (!success) {
- dict_foreign_free(foreign);
mutex_enter(&dict_foreign_err_mutex);
dict_foreign_error_report_low(ef, name);
@@ -4472,7 +4597,6 @@ col_loop2:
ptr = dict_accept(cs, ptr, ")", &success);
if (!success || foreign->n_fields != i) {
- dict_foreign_free(foreign);
dict_foreign_report_syntax_err(name, start_of_latest_foreign,
ptr);
@@ -4498,7 +4622,6 @@ scan_on_conditions:
ptr = dict_accept(cs, ptr, "UPDATE", &success);
if (!success) {
- dict_foreign_free(foreign);
dict_foreign_report_syntax_err(
name, start_of_latest_foreign, ptr);
@@ -4536,7 +4659,6 @@ scan_on_conditions:
ptr = dict_accept(cs, ptr, "ACTION", &success);
if (!success) {
- dict_foreign_free(foreign);
dict_foreign_report_syntax_err(
name, start_of_latest_foreign, ptr);
@@ -4555,7 +4677,6 @@ scan_on_conditions:
ptr = dict_accept(cs, ptr, "SET", &success);
if (!success) {
- dict_foreign_free(foreign);
dict_foreign_report_syntax_err(name, start_of_latest_foreign,
ptr);
return(DB_CANNOT_ADD_CONSTRAINT);
@@ -4564,7 +4685,6 @@ scan_on_conditions:
ptr = dict_accept(cs, ptr, "NULL", &success);
if (!success) {
- dict_foreign_free(foreign);
dict_foreign_report_syntax_err(name, start_of_latest_foreign,
ptr);
return(DB_CANNOT_ADD_CONSTRAINT);
@@ -4577,8 +4697,6 @@ scan_on_conditions:
/* It is not sensible to define SET NULL
if the column is not allowed to be NULL! */
- dict_foreign_free(foreign);
-
mutex_enter(&dict_foreign_err_mutex);
dict_foreign_error_report_low(ef, name);
fprintf(ef, "%s:\n"
@@ -4604,8 +4722,6 @@ try_find_index:
if (n_on_deletes > 1 || n_on_updates > 1) {
/* It is an error to define more than 1 action */
- dict_foreign_free(foreign);
-
mutex_enter(&dict_foreign_err_mutex);
dict_foreign_error_report_low(ef, name);
fprintf(ef, "%s:\n"
@@ -4627,7 +4743,6 @@ try_find_index:
foreign->foreign_index,
TRUE, FALSE);
if (!index) {
- dict_foreign_free(foreign);
mutex_enter(&dict_foreign_err_mutex);
dict_foreign_error_report_low(ef, name);
fprintf(ef, "%s:\n"
@@ -4671,16 +4786,6 @@ try_find_index:
= mem_heap_strdup(foreign->heap, column_names[i]);
}
- /* We found an ok constraint definition: add to the lists */
-
- UT_LIST_ADD_LAST(foreign_list, table->foreign_list, foreign);
-
- if (referenced_table) {
- UT_LIST_ADD_LAST(referenced_list,
- referenced_table->referenced_list,
- foreign);
- }
-
goto loop;
}
/**************************************************************************
@@ -4766,7 +4871,6 @@ dict_foreign_parse_drop_constraints(
const char*** constraints_to_drop) /*!< out: id's of the
constraints to drop */
{
- dict_foreign_t* foreign;
ibool success;
char* str;
size_t len;
@@ -4843,25 +4947,10 @@ loop:
(*constraints_to_drop)[*n] = id;
(*n)++;
- /* Look for the given constraint id */
-
- foreign = UT_LIST_GET_FIRST(table->foreign_list);
-
- while (foreign != NULL) {
- if (0 == innobase_strcasecmp(foreign->id, id)
- || (strchr(foreign->id, '/')
- && 0 == innobase_strcasecmp(
- id,
- dict_remove_db_name(foreign->id)))) {
- /* Found */
- break;
- }
-
- foreign = UT_LIST_GET_NEXT(foreign_list, foreign);
- }
-
-
- if (foreign == NULL) {
+ if (std::find_if(table->foreign_set.begin(),
+ table->foreign_set.end(),
+ dict_foreign_matches_id(id))
+ == table->foreign_set.end()) {
if (!srv_read_only_mode) {
FILE* ef = dict_foreign_err_file;
@@ -5188,7 +5277,6 @@ dict_table_print(
dict_table_t* table) /*!< in: table */
{
dict_index_t* index;
- dict_foreign_t* foreign;
ulint i;
ut_ad(mutex_own(&(dict_sys->mutex)));
@@ -5225,23 +5313,15 @@ dict_table_print(
index = UT_LIST_GET_NEXT(indexes, index);
}
- table->stat_initialized = FALSE;
-
dict_table_stats_unlock(table, RW_X_LATCH);
- foreign = UT_LIST_GET_FIRST(table->foreign_list);
-
- while (foreign != NULL) {
- dict_foreign_print_low(foreign);
- foreign = UT_LIST_GET_NEXT(foreign_list, foreign);
- }
-
- foreign = UT_LIST_GET_FIRST(table->referenced_list);
+ std::for_each(table->foreign_set.begin(),
+ table->foreign_set.end(),
+ dict_foreign_print_low);
- while (foreign != NULL) {
- dict_foreign_print_low(foreign);
- foreign = UT_LIST_GET_NEXT(referenced_list, foreign);
- }
+ std::for_each(table->referenced_set.begin(),
+ table->referenced_set.end(),
+ dict_foreign_print_low);
}
/**********************************************************************//**
@@ -5449,15 +5529,12 @@ dict_print_info_on_foreign_keys(
mutex_enter(&(dict_sys->mutex));
- foreign = UT_LIST_GET_FIRST(table->foreign_list);
-
- if (foreign == NULL) {
- mutex_exit(&(dict_sys->mutex));
+ for (dict_foreign_set::iterator it = table->foreign_set.begin();
+ it != table->foreign_set.end();
+ ++it) {
- return;
- }
+ foreign = *it;
- while (foreign != NULL) {
if (create_table_format) {
dict_print_info_on_foreign_key_in_create_format(
file, trx, foreign, TRUE);
@@ -5514,8 +5591,6 @@ dict_print_info_on_foreign_keys(
fputs(" ON UPDATE NO ACTION", file);
}
}
-
- foreign = UT_LIST_GET_NEXT(foreign_list, foreign);
}
mutex_exit(&(dict_sys->mutex));
@@ -5886,10 +5961,11 @@ dict_foreign_replace_index(
ut_ad(index->to_be_dropped);
ut_ad(index->table == table);
- for (foreign = UT_LIST_GET_FIRST(table->foreign_list);
- foreign;
- foreign = UT_LIST_GET_NEXT(foreign_list, foreign)) {
+ for (dict_foreign_set::iterator it = table->foreign_set.begin();
+ it != table->foreign_set.end();
+ ++it) {
+ foreign = *it;
if (foreign->foreign_index == index) {
ut_ad(foreign->foreign_table == index->table);
@@ -5909,10 +5985,11 @@ dict_foreign_replace_index(
}
}
- for (foreign = UT_LIST_GET_FIRST(table->referenced_list);
- foreign;
- foreign = UT_LIST_GET_NEXT(referenced_list, foreign)) {
+ for (dict_foreign_set::iterator it = table->referenced_set.begin();
+ it != table->referenced_set.end();
+ ++it) {
+ foreign = *it;
if (foreign->referenced_index == index) {
ut_ad(foreign->referenced_table == index->table);
@@ -6068,14 +6145,34 @@ dict_table_schema_check(
table = dict_table_get_low(req_schema->table_name);
if (table == NULL) {
+ bool should_print=true;
/* no such table */
- ut_snprintf(errstr, errstr_sz,
- "Table %s not found.",
- ut_format_name(req_schema->table_name,
- TRUE, buf, sizeof(buf)));
+ if (innobase_strcasecmp(req_schema->table_name, "mysql/innodb_table_stats") == 0) {
+ if (innodb_table_stats_not_found_reported == false) {
+ innodb_table_stats_not_found = true;
+ innodb_table_stats_not_found_reported = true;
+ } else {
+ should_print = false;
+ }
+ } else if (innobase_strcasecmp(req_schema->table_name, "mysql/innodb_index_stats") == 0 ) {
+ if (innodb_index_stats_not_found_reported == false) {
+ innodb_index_stats_not_found = true;
+ innodb_index_stats_not_found_reported = true;
+ } else {
+ should_print = false;
+ }
+ }
- return(DB_TABLE_NOT_FOUND);
+ if (should_print) {
+ ut_snprintf(errstr, errstr_sz,
+ "Table %s not found.",
+ ut_format_name(req_schema->table_name,
+ TRUE, buf, sizeof(buf)));
+ return(DB_TABLE_NOT_FOUND);
+ } else {
+ return(DB_STATS_DO_NOT_EXIST);
+ }
}
if (table->ibd_file_missing) {
@@ -6204,24 +6301,24 @@ dict_table_schema_check(
}
}
- if (req_schema->n_foreign != UT_LIST_GET_LEN(table->foreign_list)) {
+ if (req_schema->n_foreign != table->foreign_set.size()) {
ut_snprintf(
errstr, errstr_sz,
- "Table %s has %lu foreign key(s) pointing to other "
- "tables, but it must have %lu.",
+ "Table %s has " ULINTPF " foreign key(s) pointing"
+ " to other tables, but it must have %lu.",
ut_format_name(req_schema->table_name,
TRUE, buf, sizeof(buf)),
- UT_LIST_GET_LEN(table->foreign_list),
+ static_cast<ulint>(table->foreign_set.size()),
req_schema->n_foreign);
return(DB_ERROR);
}
- if (req_schema->n_referenced != UT_LIST_GET_LEN(table->referenced_list)) {
+ if (req_schema->n_referenced != table->referenced_set.size()) {
ut_snprintf(
errstr, errstr_sz,
- "There are %lu foreign key(s) pointing to %s, "
+ "There are " ULINTPF " foreign key(s) pointing to %s, "
"but there must be %lu.",
- UT_LIST_GET_LEN(table->referenced_list),
+ static_cast<ulint>(table->referenced_set.size()),
ut_format_name(req_schema->table_name,
TRUE, buf, sizeof(buf)),
req_schema->n_referenced);
diff --git a/storage/xtradb/dict/dict0mem.cc b/storage/xtradb/dict/dict0mem.cc
index 7ce42fa8efc..5e0ffab4bf7 100644
--- a/storage/xtradb/dict/dict0mem.cc
+++ b/storage/xtradb/dict/dict0mem.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2013, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2014, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2012, Facebook Inc.
This program is free software; you can redistribute it and/or modify it under
@@ -98,13 +98,9 @@ dict_mem_table_create(
ut_d(table->magic_n = DICT_TABLE_MAGIC_N);
- if (!nonshared) {
- table->stats_latch = new rw_lock_t;
- rw_lock_create(dict_table_stats_latch_key, table->stats_latch,
- SYNC_INDEX_TREE);
- } else {
- table->stats_latch = NULL;
- }
+ /* true means that the stats latch will be enabled -
+ dict_table_stats_lock() will not be noop. */
+ dict_table_stats_latch_create(table, true);
#ifndef UNIV_HOTBACKUP
@@ -141,6 +137,9 @@ dict_mem_table_create(
#endif /* !UNIV_HOTBACKUP */
+ new(&table->foreign_set) dict_foreign_set();
+ new(&table->referenced_set) dict_foreign_set();
+
return(table);
}
@@ -168,17 +167,16 @@ dict_mem_table_free(
}
}
#ifndef UNIV_HOTBACKUP
- if (table->stats_latch) {
+ if (table->autoinc_lock) {
mutex_free(&(table->autoinc_mutex));
}
#endif /* UNIV_HOTBACKUP */
- if (table->stats_latch) {
+ dict_table_stats_latch_destroy(table);
- rw_lock_free(table->stats_latch);
- delete table->stats_latch;
- }
+ table->foreign_set.~dict_foreign_set();
+ table->referenced_set.~dict_foreign_set();
ut_free(table->name);
mem_heap_free(table->heap);
@@ -350,10 +348,15 @@ dict_mem_table_col_rename_low(
table->col_names = col_names;
}
+ dict_foreign_t* foreign;
+
/* Replace the field names in every foreign key constraint. */
- for (dict_foreign_t* foreign = UT_LIST_GET_FIRST(table->foreign_list);
- foreign != NULL;
- foreign = UT_LIST_GET_NEXT(foreign_list, foreign)) {
+ for (dict_foreign_set::iterator it = table->foreign_set.begin();
+ it != table->foreign_set.end();
+ ++it) {
+
+ foreign = *it;
+
for (unsigned f = 0; f < foreign->n_fields; f++) {
/* These can point straight to
table->col_names, because the foreign key
@@ -365,10 +368,12 @@ dict_mem_table_col_rename_low(
}
}
- for (dict_foreign_t* foreign = UT_LIST_GET_FIRST(
- table->referenced_list);
- foreign != NULL;
- foreign = UT_LIST_GET_NEXT(referenced_list, foreign)) {
+ for (dict_foreign_set::iterator it = table->referenced_set.begin();
+ it != table->referenced_set.end();
+ ++it) {
+
+ foreign = *it;
+
for (unsigned f = 0; f < foreign->n_fields; f++) {
/* foreign->referenced_col_names[] need to be
copies, because the constraint may become
diff --git a/storage/xtradb/dict/dict0stats.cc b/storage/xtradb/dict/dict0stats.cc
index bec0079942b..e0a2880e214 100644
--- a/storage/xtradb/dict/dict0stats.cc
+++ b/storage/xtradb/dict/dict0stats.cc
@@ -46,6 +46,7 @@ Created Jan 06, 2010 Vasil Dimov
#include "ut0rnd.h" /* ut_rnd_interval() */
#include "ut0ut.h" /* ut_format_name(), ut_time() */
+#include <algorithm>
#include <map>
#include <vector>
@@ -127,10 +128,11 @@ where n=1..n_uniq.
#endif /* UNIV_STATS_DEBUG */
/* Gets the number of leaf pages to sample in persistent stats estimation */
-#define N_SAMPLE_PAGES(index) \
- ((index)->table->stats_sample_pages != 0 ? \
- (index)->table->stats_sample_pages : \
- srv_stats_persistent_sample_pages)
+#define N_SAMPLE_PAGES(index) \
+ static_cast<ib_uint64_t>( \
+ (index)->table->stats_sample_pages != 0 \
+ ? (index)->table->stats_sample_pages \
+ : srv_stats_persistent_sample_pages)
/* number of distinct records on a given level that are required to stop
descending to lower levels and fetch N_SAMPLE_PAGES(index) records
@@ -268,10 +270,12 @@ dict_stats_persistent_storage_check(
mutex_exit(&(dict_sys->mutex));
}
- if (ret != DB_SUCCESS) {
+ if (ret != DB_SUCCESS && ret != DB_STATS_DO_NOT_EXIST) {
ut_print_timestamp(stderr);
fprintf(stderr, " InnoDB: Error: %s\n", errstr);
return(false);
+ } else if (ret == DB_STATS_DO_NOT_EXIST) {
+ return false;
}
/* else */
@@ -430,9 +434,9 @@ dict_stats_table_clone_create(
t->corrupted = table->corrupted;
/* This private object "t" is not shared with other threads, so
- we do not need the stats_latch. The lock/unlock routines will do
- nothing if stats_latch is NULL. */
- t->stats_latch = NULL;
+ we do not need the stats_latch (thus we pass false below). The
+ dict_table_stats_lock()/unlock() routines will do nothing. */
+ dict_table_stats_latch_create(t, false);
UT_LIST_INIT(t->indexes);
@@ -511,6 +515,7 @@ dict_stats_table_clone_free(
/*========================*/
dict_table_t* t) /*!< in: dummy table object to free */
{
+ dict_table_stats_latch_destroy(t);
mem_heap_free(t->heap);
}
@@ -1330,35 +1335,40 @@ enum page_scan_method_t {
};
/* @} */
-/*********************************************************************//**
-Scan a page, reading records from left to right and counting the number
-of distinct records on that page (looking only at the first n_prefix
-columns). If scan_method is QUIT_ON_FIRST_NON_BORING then the function
+/** Scan a page, reading records from left to right and counting the number
+of distinct records (looking only at the first n_prefix
+columns) and the number of external pages pointed by records from this page.
+If scan_method is QUIT_ON_FIRST_NON_BORING then the function
will return as soon as it finds a record that does not match its neighbor
to the right, which means that in the case of QUIT_ON_FIRST_NON_BORING the
returned n_diff can either be 0 (empty page), 1 (the whole page has all keys
equal) or 2 (the function found a non-boring record and returned).
+@param[out] out_rec record, or NULL
+@param[out] offsets1 rec_get_offsets() working space (must
+be big enough)
+@param[out] offsets2 rec_get_offsets() working space (must
+be big enough)
+@param[in] index index of the page
+@param[in] page the page to scan
+@param[in] n_prefix look at the first n_prefix columns
+@param[in] scan_method scan to the end of the page or not
+@param[out] n_diff number of distinct records encountered
+@param[out] n_external_pages if this is non-NULL then it will be set
+to the number of externally stored pages which were encountered
@return offsets1 or offsets2 (the offsets of *out_rec),
or NULL if the page is empty and does not contain user records. */
-UNIV_INLINE __attribute__((nonnull))
+UNIV_INLINE
ulint*
dict_stats_scan_page(
-/*=================*/
- const rec_t** out_rec, /*!< out: record, or NULL */
- ulint* offsets1, /*!< out: rec_get_offsets()
- working space (must be big
- enough) */
- ulint* offsets2, /*!< out: rec_get_offsets()
- working space (must be big
- enough) */
- dict_index_t* index, /*!< in: index of the page */
- const page_t* page, /*!< in: the page to scan */
- ulint n_prefix, /*!< in: look at the first
- n_prefix columns */
- page_scan_method_t scan_method, /*!< in: scan to the end of
- the page or not */
- ib_uint64_t* n_diff) /*!< out: number of distinct
- records encountered */
+ const rec_t** out_rec,
+ ulint* offsets1,
+ ulint* offsets2,
+ dict_index_t* index,
+ const page_t* page,
+ ulint n_prefix,
+ page_scan_method_t scan_method,
+ ib_uint64_t* n_diff,
+ ib_uint64_t* n_external_pages)
{
ulint* offsets_rec = offsets1;
ulint* offsets_next_rec = offsets2;
@@ -1376,6 +1386,12 @@ dict_stats_scan_page(
get_next = page_rec_get_next_const;
}
+ const bool should_count_external_pages = n_external_pages != NULL;
+
+ if (should_count_external_pages) {
+ *n_external_pages = 0;
+ }
+
rec = get_next(page_get_infimum_rec(page));
if (page_rec_is_supremum(rec)) {
@@ -1388,6 +1404,11 @@ dict_stats_scan_page(
offsets_rec = rec_get_offsets(rec, index, offsets_rec,
ULINT_UNDEFINED, &heap);
+ if (should_count_external_pages) {
+ *n_external_pages += btr_rec_get_externally_stored_len(
+ rec, offsets_rec);
+ }
+
next_rec = get_next(rec);
*n_diff = 1;
@@ -1438,6 +1459,11 @@ dict_stats_scan_page(
offsets_next_rec = offsets_tmp;
}
+ if (should_count_external_pages) {
+ *n_external_pages += btr_rec_get_externally_stored_len(
+ rec, offsets_rec);
+ }
+
next_rec = get_next(next_rec);
}
@@ -1448,19 +1474,25 @@ func_exit:
return(offsets_rec);
}
-/*********************************************************************//**
-Dive below the current position of a cursor and calculate the number of
+/** Dive below the current position of a cursor and calculate the number of
distinct records on the leaf page, when looking at the fist n_prefix
-columns.
+columns. Also calculate the number of external pages pointed by records
+on the leaf page.
+@param[in] cur cursor
+@param[in] n_prefix look at the first n_prefix columns
+when comparing records
+@param[out] n_diff number of distinct records
+@param[out] n_external_pages number of external pages
+@param[in,out] mtr mini-transaction
@return number of distinct records on the leaf page */
static
-ib_uint64_t
+void
dict_stats_analyze_index_below_cur(
-/*===============================*/
- const btr_cur_t*cur, /*!< in: cursor */
- ulint n_prefix, /*!< in: look at the first n_prefix
- columns when comparing records */
- mtr_t* mtr) /*!< in/out: mini-transaction */
+ const btr_cur_t* cur,
+ ulint n_prefix,
+ ib_uint64_t* n_diff,
+ ib_uint64_t* n_external_pages,
+ mtr_t* mtr)
{
dict_index_t* index;
ulint space;
@@ -1473,7 +1505,6 @@ dict_stats_analyze_index_below_cur(
ulint* offsets1;
ulint* offsets2;
ulint* offsets_rec;
- ib_uint64_t n_diff; /* the result */
ulint size;
index = btr_cur_get_index(cur);
@@ -1509,6 +1540,10 @@ dict_stats_analyze_index_below_cur(
page_no = btr_node_ptr_get_child_page_no(rec, offsets_rec);
+ /* assume no external pages by default - in case we quit from this
+ function without analyzing any leaf pages */
+ *n_external_pages = 0;
+
/* descend to the leaf level on the B-tree */
for (;;) {
@@ -1527,20 +1562,24 @@ dict_stats_analyze_index_below_cur(
/* search for the first non-boring record on the page */
offsets_rec = dict_stats_scan_page(
&rec, offsets1, offsets2, index, page, n_prefix,
- QUIT_ON_FIRST_NON_BORING, &n_diff);
+ QUIT_ON_FIRST_NON_BORING, n_diff, NULL);
/* pages on level > 0 are not allowed to be empty */
ut_a(offsets_rec != NULL);
/* if page is not empty (offsets_rec != NULL) then n_diff must
be > 0, otherwise there is a bug in dict_stats_scan_page() */
- ut_a(n_diff > 0);
+ ut_a(*n_diff > 0);
- if (n_diff == 1) {
+ if (*n_diff == 1) {
/* page has all keys equal and the end of the page
was reached by dict_stats_scan_page(), no need to
descend to the leaf level */
mem_heap_free(heap);
- return(1);
+ /* can't get an estimate for n_external_pages here
+ because we do not dive to the leaf level, assume no
+ external pages (*n_external_pages was assigned to 0
+ above). */
+ return;
}
/* else */
@@ -1548,7 +1587,7 @@ dict_stats_analyze_index_below_cur(
first non-boring record it finds, then the returned n_diff
can either be 0 (empty page), 1 (page has all keys equal) or
2 (non-boring record was found) */
- ut_a(n_diff == 2);
+ ut_a(*n_diff == 2);
/* we have a non-boring record in rec, descend below it */
@@ -1559,11 +1598,14 @@ dict_stats_analyze_index_below_cur(
ut_ad(btr_page_get_level(page, mtr) == 0);
/* scan the leaf page and find the number of distinct keys,
- when looking only at the first n_prefix columns */
+ when looking only at the first n_prefix columns; also estimate
+ the number of externally stored pages pointed by records on this
+ page */
offsets_rec = dict_stats_scan_page(
&rec, offsets1, offsets2, index, page, n_prefix,
- COUNT_ALL_NON_BORING_AND_SKIP_DEL_MARKED, &n_diff);
+ COUNT_ALL_NON_BORING_AND_SKIP_DEL_MARKED, n_diff,
+ n_external_pages);
#if 0
DEBUG_PRINTF(" %s(): n_diff below page_no=%lu: " UINT64PF "\n",
@@ -1571,133 +1613,146 @@ dict_stats_analyze_index_below_cur(
#endif
mem_heap_free(heap);
-
- return(n_diff);
}
-/*********************************************************************//**
-For a given level in an index select N_SAMPLE_PAGES(index)
-(or less) records from that level and dive below them to the corresponding
-leaf pages, then scan those leaf pages and save the sampling results in
-index->stat_n_diff_key_vals[n_prefix - 1] and the number of pages scanned in
-index->stat_n_sample_sizes[n_prefix - 1]. */
+/** Input data that is used to calculate dict_index_t::stat_n_diff_key_vals[]
+for each n-columns prefix (n from 1 to n_uniq). */
+struct n_diff_data_t {
+ /** Index of the level on which the descent through the btree
+ stopped. level 0 is the leaf level. This is >= 1 because we
+ avoid scanning the leaf level because it may contain too many
+ pages and doing so is useless when combined with the random dives -
+ if we are to scan the leaf level, this means a full scan and we can
+ simply do that instead of fiddling with picking random records higher
+ in the tree and to dive below them. At the start of the analyzing
+ we may decide to do full scan of the leaf level, but then this
+ structure is not used in that code path. */
+ ulint level;
+
+ /** Number of records on the level where the descend through the btree
+ stopped. When we scan the btree from the root, we stop at some mid
+ level, choose some records from it and dive below them towards a leaf
+ page to analyze. */
+ ib_uint64_t n_recs_on_level;
+
+ /** Number of different key values that were found on the mid level. */
+ ib_uint64_t n_diff_on_level;
+
+ /** Number of leaf pages that are analyzed. This is also the same as
+ the number of records that we pick from the mid level and dive below
+ them. */
+ ib_uint64_t n_leaf_pages_to_analyze;
+
+ /** Cumulative sum of the number of different key values that were
+ found on all analyzed pages. */
+ ib_uint64_t n_diff_all_analyzed_pages;
+
+ /** Cumulative sum of the number of external pages (stored outside of
+ the btree but in the same file segment). */
+ ib_uint64_t n_external_pages_sum;
+};
+
+/** Estimate the number of different key values in an index when looking at
+the first n_prefix columns. For a given level in an index select
+n_diff_data->n_leaf_pages_to_analyze records from that level and dive below
+them to the corresponding leaf pages, then scan those leaf pages and save the
+sampling results in n_diff_data->n_diff_all_analyzed_pages.
+@param[in] index index
+@param[in] n_prefix look at first 'n_prefix' columns when
+comparing records
+@param[in] boundaries a vector that contains
+n_diff_data->n_diff_on_level integers each of which represents the index (on
+level 'level', counting from left/smallest to right/biggest from 0) of the
+last record from each group of distinct keys
+@param[in,out] n_diff_data n_diff_all_analyzed_pages and
+n_external_pages_sum in this structure will be set by this function. The
+members level, n_diff_on_level and n_leaf_pages_to_analyze must be set by the
+caller in advance - they are used by some calculations inside this function
+@param[in,out] mtr mini-transaction */
static
void
dict_stats_analyze_index_for_n_prefix(
-/*==================================*/
- dict_index_t* index, /*!< in/out: index */
- ulint level, /*!< in: level, must be >= 1 */
- ib_uint64_t total_recs_on_level,
- /*!< in: total number of
- records on the given level */
- ulint n_prefix, /*!< in: look at first
- n_prefix columns when
- comparing records */
- ib_uint64_t n_diff_for_this_prefix,
- /*!< in: number of distinct
- records on the given level,
- when looking at the first
- n_prefix columns */
- boundaries_t* boundaries, /*!< in: array that contains
- n_diff_for_this_prefix
- integers each of which
- represents the index (on the
- level, counting from
- left/smallest to right/biggest
- from 0) of the last record
- from each group of distinct
- keys */
- mtr_t* mtr) /*!< in/out: mini-transaction */
+ dict_index_t* index,
+ ulint n_prefix,
+ const boundaries_t* boundaries,
+ n_diff_data_t* n_diff_data,
+ mtr_t* mtr)
{
btr_pcur_t pcur;
const page_t* page;
ib_uint64_t rec_idx;
- ib_uint64_t last_idx_on_level;
- ib_uint64_t n_recs_to_dive_below;
- ib_uint64_t n_diff_sum_of_all_analyzed_pages;
ib_uint64_t i;
#if 0
DEBUG_PRINTF(" %s(table=%s, index=%s, level=%lu, n_prefix=%lu, "
- "n_diff_for_this_prefix=" UINT64PF ")\n",
+ "n_diff_on_level=" UINT64PF ")\n",
__func__, index->table->name, index->name, level,
- n_prefix, n_diff_for_this_prefix);
+ n_prefix, n_diff_data->n_diff_on_level);
#endif
ut_ad(mtr_memo_contains(mtr, dict_index_get_lock(index),
MTR_MEMO_S_LOCK));
- /* if some of those is 0 then this means that there is exactly one
- page in the B-tree and it is empty and we should have done full scan
- and should not be here */
- ut_ad(total_recs_on_level > 0);
- ut_ad(n_diff_for_this_prefix > 0);
-
- /* this must be at least 1 */
- ut_ad(N_SAMPLE_PAGES(index) > 0);
-
/* Position pcur on the leftmost record on the leftmost page
on the desired level. */
btr_pcur_open_at_index_side(
true, index, BTR_SEARCH_LEAF | BTR_ALREADY_S_LATCHED,
- &pcur, true, level, mtr);
+ &pcur, true, n_diff_data->level, mtr);
btr_pcur_move_to_next_on_page(&pcur);
page = btr_pcur_get_page(&pcur);
+ const rec_t* first_rec = btr_pcur_get_rec(&pcur);
+
+ /* We shouldn't be scanning the leaf level. The caller of this function
+ should have stopped the descend on level 1 or higher. */
+ ut_ad(n_diff_data->level > 0);
+ ut_ad(!page_is_leaf(page));
+
/* The page must not be empty, except when
it is the root page (and the whole index is empty). */
- ut_ad(btr_pcur_is_on_user_rec(&pcur) || page_is_leaf(page));
- ut_ad(btr_pcur_get_rec(&pcur)
- == page_rec_get_next_const(page_get_infimum_rec(page)));
+ ut_ad(btr_pcur_is_on_user_rec(&pcur));
+ ut_ad(first_rec == page_rec_get_next_const(page_get_infimum_rec(page)));
/* check that we are indeed on the desired level */
- ut_a(btr_page_get_level(page, mtr) == level);
+ ut_a(btr_page_get_level(page, mtr) == n_diff_data->level);
/* there should not be any pages on the left */
ut_a(btr_page_get_prev(page, mtr) == FIL_NULL);
/* check whether the first record on the leftmost page is marked
- as such, if we are on a non-leaf level */
- ut_a((level == 0)
- == !(REC_INFO_MIN_REC_FLAG & rec_get_info_bits(
- btr_pcur_get_rec(&pcur), page_is_comp(page))));
+ as such; we are on a non-leaf level */
+ ut_a(rec_get_info_bits(first_rec, page_is_comp(page))
+ & REC_INFO_MIN_REC_FLAG);
- last_idx_on_level = boundaries->at(
- static_cast<unsigned int>(n_diff_for_this_prefix - 1));
+ const ib_uint64_t last_idx_on_level = boundaries->at(
+ static_cast<unsigned>(n_diff_data->n_diff_on_level - 1));
rec_idx = 0;
- n_diff_sum_of_all_analyzed_pages = 0;
-
- n_recs_to_dive_below = ut_min(N_SAMPLE_PAGES(index),
- n_diff_for_this_prefix);
-
- for (i = 0; i < n_recs_to_dive_below; i++) {
- ib_uint64_t left;
- ib_uint64_t right;
- ib_uint64_t rnd;
- ib_uint64_t dive_below_idx;
+ n_diff_data->n_diff_all_analyzed_pages = 0;
+ n_diff_data->n_external_pages_sum = 0;
- /* there are n_diff_for_this_prefix elements
+ for (i = 0; i < n_diff_data->n_leaf_pages_to_analyze; i++) {
+ /* there are n_diff_on_level elements
in 'boundaries' and we divide those elements
- into n_recs_to_dive_below segments, for example:
+ into n_leaf_pages_to_analyze segments, for example:
- let n_diff_for_this_prefix=100, n_recs_to_dive_below=4, then:
+ let n_diff_on_level=100, n_leaf_pages_to_analyze=4, then:
segment i=0: [0, 24]
segment i=1: [25, 49]
segment i=2: [50, 74]
segment i=3: [75, 99] or
- let n_diff_for_this_prefix=1, n_recs_to_dive_below=1, then:
+ let n_diff_on_level=1, n_leaf_pages_to_analyze=1, then:
segment i=0: [0, 0] or
- let n_diff_for_this_prefix=2, n_recs_to_dive_below=2, then:
+ let n_diff_on_level=2, n_leaf_pages_to_analyze=2, then:
segment i=0: [0, 0]
segment i=1: [1, 1] or
- let n_diff_for_this_prefix=13, n_recs_to_dive_below=7, then:
+ let n_diff_on_level=13, n_leaf_pages_to_analyze=7, then:
segment i=0: [0, 0]
segment i=1: [1, 2]
segment i=2: [3, 4]
@@ -1708,9 +1763,12 @@ dict_stats_analyze_index_for_n_prefix(
then we select a random record from each segment and dive
below it */
- left = n_diff_for_this_prefix * i / n_recs_to_dive_below;
- right = n_diff_for_this_prefix * (i + 1)
- / n_recs_to_dive_below - 1;
+ const ib_uint64_t n_diff = n_diff_data->n_diff_on_level;
+ const ib_uint64_t n_pick
+ = n_diff_data->n_leaf_pages_to_analyze;
+
+ const ib_uint64_t left = n_diff * i / n_pick;
+ const ib_uint64_t right = n_diff * (i + 1) / n_pick - 1;
ut_a(left <= right);
ut_a(right <= last_idx_on_level);
@@ -1718,11 +1776,11 @@ dict_stats_analyze_index_for_n_prefix(
/* we do not pass (left, right) because we do not want to ask
ut_rnd_interval() to work with too big numbers since
ib_uint64_t could be bigger than ulint */
- rnd = static_cast<ib_uint64_t>(
- ut_rnd_interval(0, static_cast<ulint>(right - left)));
+ const ulint rnd = ut_rnd_interval(
+ 0, static_cast<ulint>(right - left));
- dive_below_idx = boundaries->at(
- static_cast<unsigned int>(left + rnd));
+ const ib_uint64_t dive_below_idx
+ = boundaries->at(static_cast<unsigned>(left + rnd));
#if 0
DEBUG_PRINTF(" %s(): dive below record with index="
@@ -1758,9 +1816,13 @@ dict_stats_analyze_index_for_n_prefix(
ut_a(rec_idx == dive_below_idx);
ib_uint64_t n_diff_on_leaf_page;
+ ib_uint64_t n_external_pages;
- n_diff_on_leaf_page = dict_stats_analyze_index_below_cur(
- btr_pcur_get_btr_cur(&pcur), n_prefix, mtr);
+ dict_stats_analyze_index_below_cur(btr_pcur_get_btr_cur(&pcur),
+ n_prefix,
+ &n_diff_on_leaf_page,
+ &n_external_pages,
+ mtr);
/* We adjust n_diff_on_leaf_page here to avoid counting
one record twice - once as the last on some page and once
@@ -1780,37 +1842,86 @@ dict_stats_analyze_index_for_n_prefix(
n_diff_on_leaf_page--;
}
- n_diff_sum_of_all_analyzed_pages += n_diff_on_leaf_page;
- }
-
- /* n_diff_sum_of_all_analyzed_pages can be 0 here if all the leaf
- pages sampled contained only delete-marked records. In this case
- we should assign 0 to index->stat_n_diff_key_vals[n_prefix - 1], which
- the formula below does. */
+ n_diff_data->n_diff_all_analyzed_pages += n_diff_on_leaf_page;
- /* See REF01 for an explanation of the algorithm */
- index->stat_n_diff_key_vals[n_prefix - 1]
- = index->stat_n_leaf_pages
-
- * n_diff_for_this_prefix
- / total_recs_on_level
-
- * n_diff_sum_of_all_analyzed_pages
- / n_recs_to_dive_below;
+ n_diff_data->n_external_pages_sum += n_external_pages;
+ }
- index->stat_n_sample_sizes[n_prefix - 1] = n_recs_to_dive_below;
+ btr_pcur_close(&pcur);
+}
- DEBUG_PRINTF(" %s(): n_diff=" UINT64PF " for n_prefix=%lu "
- "(%lu"
- " * " UINT64PF " / " UINT64PF
- " * " UINT64PF " / " UINT64PF ")\n",
- __func__, index->stat_n_diff_key_vals[n_prefix - 1],
- n_prefix,
- index->stat_n_leaf_pages,
- n_diff_for_this_prefix, total_recs_on_level,
- n_diff_sum_of_all_analyzed_pages, n_recs_to_dive_below);
+/** Set dict_index_t::stat_n_diff_key_vals[] and stat_n_sample_sizes[].
+@param[in] n_diff_data input data to use to derive the results
+@param[in,out] index index whose stat_n_diff_key_vals[] to set */
+UNIV_INLINE
+void
+dict_stats_index_set_n_diff(
+ const n_diff_data_t* n_diff_data,
+ dict_index_t* index)
+{
+ for (ulint n_prefix = dict_index_get_n_unique(index);
+ n_prefix >= 1;
+ n_prefix--) {
+ /* n_diff_all_analyzed_pages can be 0 here if
+ all the leaf pages sampled contained only
+ delete-marked records. In this case we should assign
+ 0 to index->stat_n_diff_key_vals[n_prefix - 1], which
+ the formula below does. */
+
+ const n_diff_data_t* data = &n_diff_data[n_prefix - 1];
+
+ ut_ad(data->n_leaf_pages_to_analyze > 0);
+ ut_ad(data->n_recs_on_level > 0);
+
+ ulint n_ordinary_leaf_pages;
+
+ if (data->level == 1) {
+ /* If we know the number of records on level 1, then
+ this number is the same as the number of pages on
+ level 0 (leaf). */
+ n_ordinary_leaf_pages = data->n_recs_on_level;
+ } else {
+ /* If we analyzed D ordinary leaf pages and found E
+ external pages in total linked from those D ordinary
+ leaf pages, then this means that the ratio
+ ordinary/external is D/E. Then the ratio ordinary/total
+ is D / (D + E). Knowing that the total number of pages
+ is T (including ordinary and external) then we estimate
+ that the total number of ordinary leaf pages is
+ T * D / (D + E). */
+ n_ordinary_leaf_pages
+ = index->stat_n_leaf_pages
+ * data->n_leaf_pages_to_analyze
+ / (data->n_leaf_pages_to_analyze
+ + data->n_external_pages_sum);
+ }
- btr_pcur_close(&pcur);
+ /* See REF01 for an explanation of the algorithm */
+ index->stat_n_diff_key_vals[n_prefix - 1]
+ = n_ordinary_leaf_pages
+
+ * data->n_diff_on_level
+ / data->n_recs_on_level
+
+ * data->n_diff_all_analyzed_pages
+ / data->n_leaf_pages_to_analyze;
+
+ index->stat_n_sample_sizes[n_prefix - 1]
+ = data->n_leaf_pages_to_analyze;
+
+ DEBUG_PRINTF(" %s(): n_diff=" UINT64PF " for n_prefix=%lu"
+ " (%lu"
+ " * " UINT64PF " / " UINT64PF
+ " * " UINT64PF " / " UINT64PF ")\n",
+ __func__,
+ index->stat_n_diff_key_vals[n_prefix - 1],
+ n_prefix,
+ index->stat_n_leaf_pages,
+ data->n_diff_on_level,
+ data->n_recs_on_level,
+ data->n_diff_all_analyzed_pages,
+ data->n_leaf_pages_to_analyze);
+ }
}
/*********************************************************************//**
@@ -1828,10 +1939,8 @@ dict_stats_analyze_index(
bool level_is_analyzed;
ulint n_uniq;
ulint n_prefix;
- ib_uint64_t* n_diff_on_level;
ib_uint64_t total_recs;
ib_uint64_t total_pages;
- boundaries_t* n_diff_boundaries;
mtr_t mtr;
ulint size;
DBUG_ENTER("dict_stats_analyze_index");
@@ -1917,11 +2026,18 @@ dict_stats_analyze_index(
DBUG_VOID_RETURN;
}
- /* set to zero */
- n_diff_on_level = reinterpret_cast<ib_uint64_t*>
- (mem_zalloc(n_uniq * sizeof(ib_uint64_t)));
+ /* For each level that is being scanned in the btree, this contains the
+ number of different key values for all possible n-column prefixes. */
+ ib_uint64_t* n_diff_on_level = new ib_uint64_t[n_uniq];
- n_diff_boundaries = new boundaries_t[n_uniq];
+ /* For each level that is being scanned in the btree, this contains the
+ index of the last record from each group of equal records (when
+ comparing only the first n columns, n=1..n_uniq). */
+ boundaries_t* n_diff_boundaries = new boundaries_t[n_uniq];
+
+ /* For each n-column prefix this array contains the input data that is
+ used to calculate dict_index_t::stat_n_diff_key_vals[]. */
+ n_diff_data_t* n_diff_data = new n_diff_data_t[n_uniq];
/* total_recs is also used to estimate the number of pages on one
level below, so at the start we have 1 page (the root) */
@@ -2033,12 +2149,12 @@ dict_stats_analyze_index(
level_is_analyzed = true;
- if (n_diff_on_level[n_prefix - 1]
- >= N_DIFF_REQUIRED(index)
- || level == 1) {
- /* we found a good level with many distinct
- records or we have reached the last level we
- could scan */
+ if (level == 1
+ || n_diff_on_level[n_prefix - 1]
+ >= N_DIFF_REQUIRED(index)) {
+ /* we have reached the last level we could scan
+ or we found a good level with many distinct
+ records */
break;
}
@@ -2051,7 +2167,6 @@ found_level:
" distinct records for n_prefix=%lu\n",
__func__, level, n_diff_on_level[n_prefix - 1],
n_prefix);
-
/* here we are either on level 1 or the level that we are on
contains >= N_DIFF_REQUIRED distinct keys or we did not scan
deeper levels because they would contain too many pages */
@@ -2060,20 +2175,47 @@ found_level:
ut_ad(level_is_analyzed);
+ /* if any of these is 0 then there is exactly one page in the
+ B-tree and it is empty and we should have done full scan and
+ should not be here */
+ ut_ad(total_recs > 0);
+ ut_ad(n_diff_on_level[n_prefix - 1] > 0);
+
+ ut_ad(N_SAMPLE_PAGES(index) > 0);
+
+ n_diff_data_t* data = &n_diff_data[n_prefix - 1];
+
+ data->level = level;
+
+ data->n_recs_on_level = total_recs;
+
+ data->n_diff_on_level = n_diff_on_level[n_prefix - 1];
+
+ data->n_leaf_pages_to_analyze = std::min(
+ N_SAMPLE_PAGES(index),
+ n_diff_on_level[n_prefix - 1]);
+
/* pick some records from this level and dive below them for
the given n_prefix */
dict_stats_analyze_index_for_n_prefix(
- index, level, total_recs, n_prefix,
- n_diff_on_level[n_prefix - 1],
- &n_diff_boundaries[n_prefix - 1], &mtr);
+ index, n_prefix, &n_diff_boundaries[n_prefix - 1],
+ data, &mtr);
}
mtr_commit(&mtr);
delete[] n_diff_boundaries;
- mem_free(n_diff_on_level);
+ delete[] n_diff_on_level;
+
+ /* n_prefix == 0 means that the above loop did not end up prematurely
+ due to tree being changed and so n_diff_data[] is set up. */
+ if (n_prefix == 0) {
+ dict_stats_index_set_n_diff(n_diff_data, index);
+ }
+
+ delete[] n_diff_data;
dict_stats_assert_initialized_index(index);
DBUG_VOID_RETURN;
@@ -2248,17 +2390,21 @@ dict_stats_save_index_stat(
"END;", trx);
if (ret != DB_SUCCESS) {
- char buf_table[MAX_FULL_NAME_LEN];
- char buf_index[MAX_FULL_NAME_LEN];
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: Cannot save index statistics for table "
- "%s, index %s, stat name \"%s\": %s\n",
- ut_format_name(index->table->name, TRUE,
- buf_table, sizeof(buf_table)),
- ut_format_name(index->name, FALSE,
- buf_index, sizeof(buf_index)),
- stat_name, ut_strerr(ret));
+ if (innodb_index_stats_not_found == false &&
+ index->stats_error_printed == false) {
+ char buf_table[MAX_FULL_NAME_LEN];
+ char buf_index[MAX_FULL_NAME_LEN];
+ ut_print_timestamp(stderr);
+ fprintf(stderr,
+ " InnoDB: Cannot save index statistics for table "
+ "%s, index %s, stat name \"%s\": %s\n",
+ ut_format_name(index->table->name, TRUE,
+ buf_table, sizeof(buf_table)),
+ ut_format_name(index->name, FALSE,
+ buf_index, sizeof(buf_index)),
+ stat_name, ut_strerr(ret));
+ index->stats_error_printed = true;
+ }
}
return(ret);
@@ -2973,20 +3119,24 @@ dict_stats_update_for_index(
}
/* else */
- /* Fall back to transient stats since the persistent
- storage is not present or is corrupted */
- char buf_table[MAX_FULL_NAME_LEN];
- char buf_index[MAX_FULL_NAME_LEN];
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: Recalculation of persistent statistics "
- "requested for table %s index %s but the required "
- "persistent statistics storage is not present or is "
- "corrupted. Using transient stats instead.\n",
- ut_format_name(index->table->name, TRUE,
- buf_table, sizeof(buf_table)),
- ut_format_name(index->name, FALSE,
- buf_index, sizeof(buf_index)));
+ if (innodb_index_stats_not_found == false &&
+ index->stats_error_printed == false) {
+ /* Fall back to transient stats since the persistent
+ storage is not present or is corrupted */
+ char buf_table[MAX_FULL_NAME_LEN];
+ char buf_index[MAX_FULL_NAME_LEN];
+ ut_print_timestamp(stderr);
+ fprintf(stderr,
+ " InnoDB: Recalculation of persistent statistics "
+ "requested for table %s index %s but the required "
+ "persistent statistics storage is not present or is "
+ "corrupted. Using transient stats instead.\n",
+ ut_format_name(index->table->name, TRUE,
+ buf_table, sizeof(buf_table)),
+ ut_format_name(index->name, FALSE,
+ buf_index, sizeof(buf_index)));
+ index->stats_error_printed = false;
+ }
}
dict_table_stats_lock(index->table, RW_X_LATCH);
@@ -3071,13 +3221,17 @@ dict_stats_update(
/* Fall back to transient stats since the persistent
storage is not present or is corrupted */
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: Recalculation of persistent statistics "
- "requested for table %s but the required persistent "
- "statistics storage is not present or is corrupted. "
- "Using transient stats instead.\n",
- ut_format_name(table->name, TRUE, buf, sizeof(buf)));
+ if (innodb_table_stats_not_found == false &&
+ table->stats_error_printed == false) {
+ ut_print_timestamp(stderr);
+ fprintf(stderr,
+ " InnoDB: Recalculation of persistent statistics "
+ "requested for table %s but the required persistent "
+ "statistics storage is not present or is corrupted. "
+ "Using transient stats instead.\n",
+ ut_format_name(table->name, TRUE, buf, sizeof(buf)));
+ table->stats_error_printed = true;
+ }
goto transient;
@@ -3121,17 +3275,21 @@ dict_stats_update(
/* persistent statistics storage does not exist
or is corrupted, calculate the transient stats */
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: Error: Fetch of persistent "
- "statistics requested for table %s but the "
- "required system tables %s and %s are not "
- "present or have unexpected structure. "
- "Using transient stats instead.\n",
- ut_format_name(table->name, TRUE,
- buf, sizeof(buf)),
- TABLE_STATS_NAME_PRINT,
- INDEX_STATS_NAME_PRINT);
+ if (innodb_table_stats_not_found == false &&
+ table->stats_error_printed == false) {
+ ut_print_timestamp(stderr);
+ fprintf(stderr,
+ " InnoDB: Error: Fetch of persistent "
+ "statistics requested for table %s but the "
+ "required system tables %s and %s are not "
+ "present or have unexpected structure. "
+ "Using transient stats instead.\n",
+ ut_format_name(table->name, TRUE,
+ buf, sizeof(buf)),
+ TABLE_STATS_NAME_PRINT,
+ INDEX_STATS_NAME_PRINT);
+ table->stats_error_printed = true;
+ }
goto transient;
}
@@ -3202,16 +3360,19 @@ dict_stats_update(
dict_stats_table_clone_free(t);
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: Error fetching persistent statistics "
- "for table %s from %s and %s: %s. "
- "Using transient stats method instead.\n",
- ut_format_name(table->name, TRUE, buf,
- sizeof(buf)),
- TABLE_STATS_NAME,
- INDEX_STATS_NAME,
- ut_strerr(err));
+ if (innodb_table_stats_not_found == false &&
+ table->stats_error_printed == false) {
+ ut_print_timestamp(stderr);
+ fprintf(stderr,
+ " InnoDB: Error fetching persistent statistics "
+ "for table %s from %s and %s: %s. "
+ "Using transient stats method instead.\n",
+ ut_format_name(table->name, TRUE, buf,
+ sizeof(buf)),
+ TABLE_STATS_NAME,
+ INDEX_STATS_NAME,
+ ut_strerr(err));
+ }
goto transient;
}
diff --git a/storage/xtradb/fil/fil0fil.cc b/storage/xtradb/fil/fil0fil.cc
index 017e96e6111..8e7758a8a07 100644
--- a/storage/xtradb/fil/fil0fil.cc
+++ b/storage/xtradb/fil/fil0fil.cc
@@ -121,7 +121,7 @@ completes, we decrement the count and return the file node to the LRU-list if
the count drops to zero. */
/** When mysqld is run, the default directory "." is the mysqld datadir,
-but in the MySQL Embedded Server Library and ibbackup it is not the default
+but in the MySQL Embedded Server Library and mysqlbackup it is not the default
directory, and we must set the base file path explicitly */
UNIV_INTERN const char* fil_path_to_mysql_datadir = ".";
@@ -793,7 +793,7 @@ fil_node_open_file(
fprintf(stderr,
"InnoDB: Error: the size of single-table"
" tablespace file %s\n"
- "InnoDB: is only "UINT64PF","
+ "InnoDB: is only " UINT64PF ","
" should be at least %lu!\n",
node->name,
size_bytes,
@@ -1866,6 +1866,9 @@ fil_close_all_files(void)
{
fil_space_t* space;
+ if (srv_track_changed_pages && srv_redo_log_thread_started)
+ os_event_wait(srv_redo_log_tracked_event);
+
mutex_enter(&fil_system->mutex);
space = UT_LIST_GET_FIRST(fil_system->space_list);
@@ -1902,6 +1905,9 @@ fil_close_log_files(
{
fil_space_t* space;
+ if (srv_track_changed_pages && srv_redo_log_thread_started)
+ os_event_wait(srv_redo_log_tracked_event);
+
mutex_enter(&fil_system->mutex);
space = UT_LIST_GET_FIRST(fil_system->space_list);
@@ -2109,8 +2115,8 @@ fil_check_first_page(
}
/*******************************************************************//**
-Reads the flushed lsn, arch no, and tablespace flag fields from a data
-file at database startup.
+Reads the flushed lsn, arch no, space_id and tablespace flag fields from
+the first page of a data file at database startup.
@retval NULL on success, or if innodb_force_recovery is set
@return pointer to an error message string */
UNIV_INTERN
@@ -2146,7 +2152,13 @@ fil_read_first_page(
fil_space_is_page_compressed(orig_space_id) :
FALSE);
- *flags = fsp_header_get_flags(page);
+ /* The FSP_HEADER on page 0 is only valid for the first file
+ in a tablespace. So if this is not the first datafile, leave
+ *flags and *space_id as they were read from the first file and
+ do not validate the first page. */
+ if (!one_read_already) {
+ *flags = fsp_header_get_flags(page);
+ }
/* Page is page compressed page, need to decompress, before
continue. */
@@ -2155,14 +2167,14 @@ fil_read_first_page(
fil_decompress_page(NULL, page, UNIV_PAGE_SIZE, &write_size);
}
- *space_id = fsp_header_get_space_id(page);
-
- flushed_lsn = mach_read_from_8(page + FIL_PAGE_FILE_FLUSH_LSN);
-
if (!one_read_already) {
+ *space_id = fsp_header_get_space_id(page);
+
check_msg = fil_check_first_page(page);
}
+ flushed_lsn = mach_read_from_8(page + FIL_PAGE_FILE_FLUSH_LSN);
+
ut_free(buf);
if (check_msg) {
@@ -2359,13 +2371,13 @@ exists and the space id in it matches. Replays the create operation if a file
at that path does not exist yet. If the database directory for the file to be
created does not exist, then we create the directory, too.
-Note that ibbackup --apply-log sets fil_path_to_mysql_datadir to point to the
-datadir that we should use in replaying the file operations.
+Note that mysqlbackup --apply-log sets fil_path_to_mysql_datadir to point to
+the datadir that we should use in replaying the file operations.
InnoDB recovery does not replay these fully since it always sets the space id
-to zero. But ibbackup does replay them. TODO: If remote tablespaces are used,
-ibbackup will only create tables in the default directory since MLOG_FILE_CREATE
-and MLOG_FILE_CREATE2 only know the tablename, not the path.
+to zero. But mysqlbackup does replay them. TODO: If remote tablespaces are
+used, mysqlbackup will only create tables in the default directory since
+MLOG_FILE_CREATE and MLOG_FILE_CREATE2 only know the tablename, not the path.
@return end of log record, or NULL if the record was not completely
contained between ptr and end_ptr */
@@ -2457,11 +2469,11 @@ fil_op_log_parse_or_replay(
}
/* Let us try to perform the file operation, if sensible. Note that
- ibbackup has at this stage already read in all space id info to the
+ mysqlbackup has at this stage already read in all space id info to the
fil0fil.cc data structures.
NOTE that our algorithm is not guaranteed to work correctly if there
- were renames of tables during the backup. See ibbackup code for more
+ were renames of tables during the backup. See mysqlbackup code for more
on the problem. */
switch (type) {
@@ -2876,12 +2888,12 @@ fil_delete_tablespace(
if (err == DB_SUCCESS) {
#ifndef UNIV_HOTBACKUP
/* Write a log record about the deletion of the .ibd
- file, so that ibbackup can replay it in the
+ file, so that mysqlbackup can replay it in the
--apply-log phase. We use a dummy mtr and the familiar
log write mechanism. */
mtr_t mtr;
- /* When replaying the operation in ibbackup, do not try
+ /* When replaying the operation in mysqlbackup, do not try
to write any log record */
mtr_start(&mtr);
@@ -4563,7 +4575,7 @@ will_not_choose:
" (< 4 pages 16 kB each),\n"
"InnoDB: or the space id in the file header"
" is not sensible.\n"
- "InnoDB: This can happen in an ibbackup run,"
+ "InnoDB: This can happen in an mysqlbackup run,"
" and is not dangerous.\n",
fsp->filepath, fsp->id, fsp->filepath, size);
os_file_close(fsp->file);
@@ -4600,7 +4612,7 @@ will_not_choose:
"InnoDB: because space %s with the same id\n"
"InnoDB: was scanned earlier. This can happen"
" if you have renamed tables\n"
- "InnoDB: during an ibbackup run.\n",
+ "InnoDB: during an mysqlbackup run.\n",
fsp->filepath, fsp->id, fsp->filepath,
space->name);
os_file_close(fsp->file);
@@ -5332,9 +5344,9 @@ file_extended:
#ifdef UNIV_HOTBACKUP
/********************************************************************//**
Extends all tablespaces to the size stored in the space header. During the
-ibbackup --apply-log phase we extended the spaces on-demand so that log records
-could be applied, but that may have left spaces still too small compared to
-the size stored in the space header. */
+mysqlbackup --apply-log phase we extended the spaces on-demand so that log
+records could be applied, but that may have left spaces still too small
+compared to the size stored in the space header. */
UNIV_INTERN
void
fil_extend_tablespaces_to_stored_len(void)
@@ -5653,7 +5665,7 @@ _fil_io(
ulint mode;
fil_space_t* space;
fil_node_t* node;
- ibool ret;
+ ibool ret=TRUE;
ulint is_log;
ulint wake_later;
os_offset_t offset;
@@ -5878,7 +5890,7 @@ _fil_io(
page_compressed, page_compression_level, write_size);
#else
- /* In ibbackup do normal i/o, not aio */
+ /* In mysqlbackup do normal i/o, not aio */
if (type == OS_FILE_READ) {
ret = os_file_read(node->handle, buf, offset, len);
} else {
@@ -5887,7 +5899,6 @@ _fil_io(
offset, len);
}
#endif /* !UNIV_HOTBACKUP */
- ut_a(ret);
if (mode == OS_AIO_SYNC) {
/* The i/o operation is already completed when we return from
@@ -5902,7 +5913,11 @@ _fil_io(
ut_ad(fil_validate_skip());
}
- return(DB_SUCCESS);
+ if (!ret) {
+ return(DB_OUT_OF_FILE_SPACE);
+ } else {
+ return(DB_SUCCESS);
+ }
}
#ifndef UNIV_HOTBACKUP
diff --git a/storage/xtradb/fts/fts0ast.cc b/storage/xtradb/fts/fts0ast.cc
index d6c19c0050a..dd48ffee14d 100644
--- a/storage/xtradb/fts/fts0ast.cc
+++ b/storage/xtradb/fts/fts0ast.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2007, 2013, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2007, 2014, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -83,11 +83,11 @@ UNIV_INTERN
fts_ast_node_t*
fts_ast_create_node_term(
/*=====================*/
- void* arg, /*!< in: ast state instance */
- const char* ptr) /*!< in: ast term string */
+ void* arg, /*!< in: ast state instance */
+ const fts_ast_string_t* ptr) /*!< in: ast term string */
{
fts_ast_state_t* state = static_cast<fts_ast_state_t*>(arg);
- ulint len = strlen(ptr);
+ ulint len = ptr->len;
ulint cur_pos = 0;
fts_ast_node_t* node = NULL;
fts_ast_node_t* node_list = NULL;
@@ -101,8 +101,9 @@ fts_ast_create_node_term(
cur_len = innobase_mysql_fts_get_token(
state->charset,
- reinterpret_cast<const byte*>(ptr) + cur_pos,
- reinterpret_cast<const byte*>(ptr) + len, &str, &offset);
+ reinterpret_cast<const byte*>(ptr->str) + cur_pos,
+ reinterpret_cast<const byte*>(ptr->str) + len,
+ &str, &offset);
if (cur_len == 0) {
break;
@@ -124,10 +125,8 @@ fts_ast_create_node_term(
node->type = FTS_AST_TERM;
- node->term.ptr = static_cast<byte*>(ut_malloc(
- str.f_len + 1));
- memcpy(node->term.ptr, str.f_str, str.f_len);
- node->term.ptr[str.f_len] = '\0';
+ node->term.ptr = fts_ast_string_create(
+ str.f_str, str.f_len);
fts_ast_state_add_node(
static_cast<fts_ast_state_t*>(arg), node);
@@ -160,25 +159,21 @@ UNIV_INTERN
fts_ast_node_t*
fts_ast_create_node_text(
/*=====================*/
- void* arg, /*!< in: ast state instance */
- const char* ptr) /*!< in: ast text string */
+ void* arg, /*!< in: ast state instance */
+ const fts_ast_string_t* ptr) /*!< in: ast text string */
{
- ulint len = strlen(ptr);
+ ulint len = ptr->len;
fts_ast_node_t* node = NULL;
+ /* Once we come here, the string must have at least 2 quotes ""
+ around the query string, which could be empty. Also the query
+ string may contain 0x00 in it, we don't treat it as null-terminated. */
+ ut_ad(len >= 2);
+ ut_ad(ptr->str[0] == '\"' && ptr->str[len - 1] == '\"');
- ut_ad(len >= 1);
-
- if (len <= 2) {
- /* There is a way to directly supply null terminator
- in the query string (by using 0x220022) and get here,
- and certainly it would not make a valid query text */
- ut_ad(ptr[0] == '\"');
-
- if (len == 2) {
- ut_ad(ptr[1] == '\"');
- }
-
+ if (len == 2) {
+ /* If the query string contains nothing except quotes,
+ it's obviously an invalid query. */
return(NULL);
}
@@ -188,11 +183,9 @@ fts_ast_create_node_text(
len -= 2;
node->type = FTS_AST_TEXT;
- node->text.ptr = static_cast<byte*>(ut_malloc(len + 1));
-
/*!< Skip copying the first quote */
- memcpy(node->text.ptr, ptr + 1, len);
- node->text.ptr[len] = 0;
+ node->text.ptr = fts_ast_string_create(
+ reinterpret_cast<const byte*>(ptr->str + 1), len);
node->text.distance = ULINT_UNDEFINED;
fts_ast_state_add_node((fts_ast_state_t*) arg, node);
@@ -275,14 +268,14 @@ fts_ast_free_node(
switch (node->type) {
case FTS_AST_TEXT:
if (node->text.ptr) {
- ut_free(node->text.ptr);
+ fts_ast_string_free(node->text.ptr);
node->text.ptr = NULL;
}
break;
case FTS_AST_TERM:
if (node->term.ptr) {
- ut_free(node->term.ptr);
+ fts_ast_string_free(node->term.ptr);
node->term.ptr = NULL;
}
break;
@@ -421,10 +414,10 @@ fts_ast_state_free(
fts_ast_node_t* next = node->next_alloc;
if (node->type == FTS_AST_TEXT && node->text.ptr) {
- ut_free(node->text.ptr);
+ fts_ast_string_free(node->text.ptr);
node->text.ptr = NULL;
} else if (node->type == FTS_AST_TERM && node->term.ptr) {
- ut_free(node->term.ptr);
+ fts_ast_string_free(node->term.ptr);
node->term.ptr = NULL;
}
@@ -445,11 +438,13 @@ fts_ast_node_print(
{
switch (node->type) {
case FTS_AST_TEXT:
- printf("TEXT: %s\n", node->text.ptr);
+ printf("TEXT: ");
+ fts_ast_string_print(node->text.ptr);
break;
case FTS_AST_TERM:
- printf("TERM: %s\n", node->term.ptr);
+ printf("TERM: ");
+ fts_ast_string_print(node->term.ptr);
break;
case FTS_AST_LIST:
@@ -628,3 +623,74 @@ fts_ast_visit(
return(error);
}
+
+/**
+Create an ast string object, with NUL-terminator, so the string
+has one more byte than len
+@param[in] str pointer to string
+@param[in] len length of the string
+@return ast string with NUL-terminator */
+UNIV_INTERN
+fts_ast_string_t*
+fts_ast_string_create(
+ const byte* str,
+ ulint len)
+{
+ fts_ast_string_t* ast_str;
+
+ ut_ad(len > 0);
+
+ ast_str = static_cast<fts_ast_string_t*>
+ (ut_malloc(sizeof(fts_ast_string_t)));
+ ast_str->str = static_cast<byte*>(ut_malloc(len + 1));
+
+ ast_str->len = len;
+ memcpy(ast_str->str, str, len);
+ ast_str->str[len] = '\0';
+
+ return(ast_str);
+}
+
+/**
+Free an ast string instance
+@param[in,out] ast_str string to free */
+UNIV_INTERN
+void
+fts_ast_string_free(
+ fts_ast_string_t* ast_str)
+{
+ if (ast_str != NULL) {
+ ut_free(ast_str->str);
+ ut_free(ast_str);
+ }
+}
+
+/**
+Translate ast string of type FTS_AST_NUMB to unsigned long by strtoul
+@param[in] str string to translate
+@param[in] base the base
+@return translated number */
+UNIV_INTERN
+ulint
+fts_ast_string_to_ul(
+ const fts_ast_string_t* ast_str,
+ int base)
+{
+ return(strtoul(reinterpret_cast<const char*>(ast_str->str),
+ NULL, base));
+}
+
+/**
+Print the ast string
+@param[in] str string to print */
+UNIV_INTERN
+void
+fts_ast_string_print(
+ const fts_ast_string_t* ast_str)
+{
+ for (ulint i = 0; i < ast_str->len; ++i) {
+ printf("%c", ast_str->str[i]);
+ }
+
+ printf("\n");
+}
diff --git a/storage/xtradb/fts/fts0blex.cc b/storage/xtradb/fts/fts0blex.cc
index f83523825d2..7d0acb00a3b 100644
--- a/storage/xtradb/fts/fts0blex.cc
+++ b/storage/xtradb/fts/fts0blex.cc
@@ -451,7 +451,7 @@ static yyconst flex_int16_t yy_chk[32] =
#line 1 "fts0blex.l"
/*****************************************************************************
-Copyright (c) 2007, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2007, 2014, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -806,7 +806,7 @@ case 3:
YY_RULE_SETUP
#line 53 "fts0blex.l"
{
- val->token = strdup(fts0bget_text(yyscanner));
+ val->token = fts_ast_string_create(reinterpret_cast<const byte*>(fts0bget_text(yyscanner)), fts0bget_leng(yyscanner));
return(FTS_NUMB);
}
@@ -815,7 +815,7 @@ case 4:
YY_RULE_SETUP
#line 59 "fts0blex.l"
{
- val->token = strdup(fts0bget_text(yyscanner));
+ val->token = fts_ast_string_create(reinterpret_cast<const byte*>(fts0bget_text(yyscanner)), fts0bget_leng(yyscanner));
return(FTS_TERM);
}
@@ -824,7 +824,7 @@ case 5:
YY_RULE_SETUP
#line 65 "fts0blex.l"
{
- val->token = strdup(fts0bget_text(yyscanner));
+ val->token = fts_ast_string_create(reinterpret_cast<const byte*>(fts0bget_text(yyscanner)), fts0bget_leng(yyscanner));
return(FTS_TEXT);
}
diff --git a/storage/xtradb/fts/fts0blex.l b/storage/xtradb/fts/fts0blex.l
index 6193f0df187..ae6e8ffaa48 100644
--- a/storage/xtradb/fts/fts0blex.l
+++ b/storage/xtradb/fts/fts0blex.l
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2007, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2007, 2014, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -51,19 +51,19 @@ this program; if not, write to the Free Software Foundation, Inc.,
}
[0-9]+ {
- val->token = strdup(fts0bget_text(yyscanner));
+ val->token = fts_ast_string_create(reinterpret_cast<const byte*>(fts0bget_text(yyscanner)), fts0bget_leng(yyscanner));
return(FTS_NUMB);
}
[^" \n*()+\-<>~@%]* {
- val->token = strdup(fts0bget_text(yyscanner));
+ val->token = fts_ast_string_create(reinterpret_cast<const byte*>(fts0bget_text(yyscanner)), fts0bget_leng(yyscanner));
return(FTS_TERM);
}
\"[^\"\n]*\" {
- val->token = strdup(fts0bget_text(yyscanner));
+ val->token = fts_ast_string_create(reinterpret_cast<const byte*>(fts0bget_text(yyscanner)), fts0bget_leng(yyscanner));
return(FTS_TEXT);
}
diff --git a/storage/xtradb/fts/fts0fts.cc b/storage/xtradb/fts/fts0fts.cc
index 795f08da966..8884e944dfd 100644
--- a/storage/xtradb/fts/fts0fts.cc
+++ b/storage/xtradb/fts/fts0fts.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2011, 2013, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2011, 2014, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -44,6 +44,13 @@ Full Text Search interface
/** Column name from the FTS config table */
#define FTS_MAX_CACHE_SIZE_IN_MB "cache_size_in_mb"
+/** Verify if a aux table name is a obsolete table
+by looking up the key word in the obsolete table names */
+#define FTS_IS_OBSOLETE_AUX_TABLE(table_name) \
+ (strstr((table_name), "DOC_ID") != NULL \
+ || strstr((table_name), "ADDED") != NULL \
+ || strstr((table_name), "STOPWORDS") != NULL)
+
/** This is maximum FTS cache for each table and would be
a configurable variable */
UNIV_INTERN ulong fts_max_cache_size;
@@ -601,8 +608,10 @@ fts_cache_init(
cache->total_size = 0;
+ mutex_enter((ib_mutex_t*) &cache->deleted_lock);
cache->deleted_doc_ids = ib_vector_create(
cache->sync_heap, sizeof(fts_update_t), 4);
+ mutex_exit((ib_mutex_t*) &cache->deleted_lock);
/* Reset the cache data for all the FTS indexes. */
for (i = 0; i < ib_vector_size(cache->indexes); ++i) {
@@ -1130,7 +1139,10 @@ fts_cache_clear(
cache->sync_heap->arg = NULL;
cache->total_size = 0;
+
+ mutex_enter((ib_mutex_t*) &cache->deleted_lock);
cache->deleted_doc_ids = NULL;
+ mutex_exit((ib_mutex_t*) &cache->deleted_lock);
}
/*********************************************************************//**
@@ -1947,10 +1959,15 @@ fts_create_one_index_table(
char* table_name = fts_get_table_name(fts_table);
dberr_t error;
CHARSET_INFO* charset;
+ ulint flags2 = 0;
ut_ad(index->type & DICT_FTS);
- new_table = dict_mem_table_create(table_name, 0, 5, 1, 0, false);
+ if (srv_file_per_table) {
+ flags2 = DICT_TF2_USE_TABLESPACE;
+ }
+
+ new_table = dict_mem_table_create(table_name, 0, 5, 1, flags2, false);
field = dict_index_get_nth_field(index, 0);
charset = innobase_get_fts_charset(
@@ -1979,7 +1996,7 @@ fts_create_one_index_table(
dict_mem_table_add_col(new_table, heap, "ilist", DATA_BLOB,
4130048, 0);
- error = row_create_table_for_mysql(new_table, trx, true);
+ error = row_create_table_for_mysql(new_table, trx, false);
if (error != DB_SUCCESS) {
trx->error_state = error;
@@ -2244,11 +2261,15 @@ static
fts_trx_t*
fts_trx_create(
/*===========*/
- trx_t* trx) /*!< in: InnoDB transaction */
+ trx_t* trx) /*!< in/out: InnoDB
+ transaction */
{
- fts_trx_t* ftt;
- ib_alloc_t* heap_alloc;
- mem_heap_t* heap = mem_heap_create(1024);
+ fts_trx_t* ftt;
+ ib_alloc_t* heap_alloc;
+ mem_heap_t* heap = mem_heap_create(1024);
+ trx_named_savept_t* savep;
+
+ ut_a(trx->fts_trx == NULL);
ftt = static_cast<fts_trx_t*>(mem_heap_alloc(heap, sizeof(fts_trx_t)));
ftt->trx = trx;
@@ -2266,6 +2287,14 @@ fts_trx_create(
fts_savepoint_create(ftt->savepoints, NULL, NULL);
fts_savepoint_create(ftt->last_stmt, NULL, NULL);
+ /* Copy savepoints that already set before. */
+ for (savep = UT_LIST_GET_FIRST(trx->trx_savepoints);
+ savep != NULL;
+ savep = UT_LIST_GET_NEXT(trx_savepoints, savep)) {
+
+ fts_savepoint_take(trx, ftt, savep->name);
+ }
+
return(ftt);
}
@@ -4359,6 +4388,7 @@ fts_sync_commit(
/* We need to do this within the deleted lock since fts_delete() can
attempt to add a deleted doc id to the cache deleted id array. */
fts_cache_clear(cache);
+ DEBUG_SYNC_C("fts_deleted_doc_ids_clear");
fts_cache_init(cache);
rw_lock_x_unlock(&cache->lock);
@@ -5160,6 +5190,12 @@ fts_cache_append_deleted_doc_ids(
mutex_enter((ib_mutex_t*) &cache->deleted_lock);
+ if (cache->deleted_doc_ids == NULL) {
+ mutex_exit((ib_mutex_t*) &cache->deleted_lock);
+ return;
+ }
+
+
for (i = 0; i < ib_vector_size(cache->deleted_doc_ids); ++i) {
fts_update_t* update;
@@ -5445,16 +5481,15 @@ void
fts_savepoint_take(
/*===============*/
trx_t* trx, /*!< in: transaction */
+ fts_trx_t* fts_trx, /*!< in: fts transaction */
const char* name) /*!< in: savepoint name */
{
mem_heap_t* heap;
- fts_trx_t* fts_trx;
fts_savepoint_t* savepoint;
fts_savepoint_t* last_savepoint;
ut_a(name != NULL);
- fts_trx = trx->fts_trx;
heap = fts_trx->heap;
/* The implied savepoint must exist. */
@@ -5771,7 +5806,7 @@ fts_savepoint_rollback(
ut_a(ib_vector_size(savepoints) > 0);
/* Restore the savepoint. */
- fts_savepoint_take(trx, name);
+ fts_savepoint_take(trx, trx->fts_trx, name);
}
}
@@ -5837,6 +5872,12 @@ fts_is_aux_table_name(
}
}
+ /* Could be obsolete common tables. */
+ if (strncmp(ptr, "ADDED", len) == 0
+ || strncmp(ptr, "STOPWORDS", len) == 0) {
+ return(true);
+ }
+
/* Try and read the index id. */
if (!fts_read_object_id(&table->index_id, ptr)) {
return(FALSE);
@@ -6433,6 +6474,56 @@ fts_check_and_drop_orphaned_tables(
mem_free(path);
}
+ } else {
+ if (FTS_IS_OBSOLETE_AUX_TABLE(aux_table->name)) {
+
+ /* Current table could be one of the three
+ obsolete tables, in this case, we should
+ always try to drop it but not rename it.
+ This could happen when we try to upgrade
+ from older server to later one, which doesn't
+ contain these obsolete tables. */
+ drop = true;
+
+ dberr_t err;
+ trx_t* trx_drop =
+ trx_allocate_for_background();
+
+ trx_drop->op_info = "Drop obsolete aux tables";
+ trx_drop->dict_operation_lock_mode = RW_X_LATCH;
+
+ trx_start_for_ddl(trx_drop, TRX_DICT_OP_TABLE);
+
+ err = row_drop_table_for_mysql(
+ aux_table->name, trx_drop, false, true);
+
+ trx_drop->dict_operation_lock_mode = 0;
+
+ if (err != DB_SUCCESS) {
+ /* We don't need to worry about the
+ failure, since server would try to
+ drop it on next restart, even if
+ the table was broken. */
+
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "Fail to drop obsolete aux"
+ " table '%s', which is"
+ " harmless. will try to drop"
+ " it on next restart.",
+ aux_table->name);
+
+ fts_sql_rollback(trx_drop);
+ } else {
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Dropped obsolete aux"
+ " table '%s'.",
+ aux_table->name);
+
+ fts_sql_commit(trx_drop);
+ }
+
+ trx_free_for_background(trx_drop);
+ }
}
#ifdef _WIN32
if (!drop && rename) {
diff --git a/storage/xtradb/fts/fts0opt.cc b/storage/xtradb/fts/fts0opt.cc
index a9f3a25530d..910a00cd521 100644
--- a/storage/xtradb/fts/fts0opt.cc
+++ b/storage/xtradb/fts/fts0opt.cc
@@ -95,7 +95,7 @@ enum fts_msg_type_t {
/** Compressed list of words that have been read from FTS INDEX
that needs to be optimized. */
struct fts_zip_t {
- ulint status; /*!< Status of (un)/zip operation */
+ lint status; /*!< Status of (un)/zip operation */
ulint n_words; /*!< Number of words compressed */
diff --git a/storage/xtradb/fts/fts0pars.cc b/storage/xtradb/fts/fts0pars.cc
index 83d465b0988..7f0ba4e0c1b 100644
--- a/storage/xtradb/fts/fts0pars.cc
+++ b/storage/xtradb/fts/fts0pars.cc
@@ -100,6 +100,8 @@ extern int ftserror(const char* p);
#define YYPARSE_PARAM state
#define YYLEX_PARAM ((fts_ast_state_t*) state)->lexer
+#define YYTOKENFREE(token) fts_ast_string_free((token))
+
typedef int (*fts_scanner_alt)(YYSTYPE* val, yyscan_t yyscanner);
typedef int (*fts_scanner)();
@@ -154,9 +156,9 @@ typedef union YYSTYPE
/* Line 293 of yacc.c */
#line 61 "fts0pars.y"
- int oper;
- char* token;
- fts_ast_node_t* node;
+ int oper;
+ fts_ast_string_t* token;
+ fts_ast_node_t* node;
@@ -632,6 +634,19 @@ while (YYID (0))
#define YYTERROR 1
#define YYERRCODE 256
+#define YYERRCLEANUP \
+do \
+ switch (yylastchar) \
+ { \
+ case FTS_NUMB: \
+ case FTS_TEXT: \
+ case FTS_TERM: \
+ YYTOKENFREE(yylval.token); \
+ break; \
+ default: \
+ break; \
+ } \
+while (YYID (0))
/* YYLLOC_DEFAULT -- Set CURRENT to span from RHS[1] to RHS[N].
If N is 0, then set CURRENT to the empty location which ends
@@ -1169,6 +1184,8 @@ yyparse ()
{
/* The lookahead symbol. */
int yychar;
+/* The backup of yychar when there is an error and we're in yyerrlab. */
+int yylastchar;
/* The semantic value of the lookahead symbol. */
YYSTYPE yylval;
@@ -1524,8 +1541,8 @@ yyreduce:
/* Line 1806 of yacc.c */
#line 141 "fts0pars.y"
{
- fts_ast_term_set_distance((yyvsp[(1) - (3)].node), strtoul((yyvsp[(3) - (3)].token), NULL, 10));
- free((yyvsp[(3) - (3)].token));
+ fts_ast_term_set_distance((yyvsp[(1) - (3)].node), fts_ast_string_to_ul((yyvsp[(3) - (3)].token), 10));
+ fts_ast_string_free((yyvsp[(3) - (3)].token));
}
break;
@@ -1557,8 +1574,8 @@ yyreduce:
{
(yyval.node) = fts_ast_create_node_list(state, (yyvsp[(1) - (4)].node));
fts_ast_add_node((yyval.node), (yyvsp[(2) - (4)].node));
- fts_ast_term_set_distance((yyvsp[(2) - (4)].node), strtoul((yyvsp[(4) - (4)].token), NULL, 10));
- free((yyvsp[(4) - (4)].token));
+ fts_ast_term_set_distance((yyvsp[(2) - (4)].node), fts_ast_string_to_ul((yyvsp[(4) - (4)].token), 10));
+ fts_ast_string_free((yyvsp[(4) - (4)].token));
}
break;
@@ -1623,7 +1640,7 @@ yyreduce:
#line 191 "fts0pars.y"
{
(yyval.node) = fts_ast_create_node_term(state, (yyvsp[(1) - (1)].token));
- free((yyvsp[(1) - (1)].token));
+ fts_ast_string_free((yyvsp[(1) - (1)].token));
}
break;
@@ -1633,7 +1650,7 @@ yyreduce:
#line 196 "fts0pars.y"
{
(yyval.node) = fts_ast_create_node_term(state, (yyvsp[(1) - (1)].token));
- free((yyvsp[(1) - (1)].token));
+ fts_ast_string_free((yyvsp[(1) - (1)].token));
}
break;
@@ -1652,7 +1669,7 @@ yyreduce:
#line 207 "fts0pars.y"
{
(yyval.node) = fts_ast_create_node_text(state, (yyvsp[(1) - (1)].token));
- free((yyvsp[(1) - (1)].token));
+ fts_ast_string_free((yyvsp[(1) - (1)].token));
}
break;
@@ -1700,6 +1717,8 @@ yyreduce:
| yyerrlab -- here on detecting error |
`------------------------------------*/
yyerrlab:
+ /* Backup yychar, in case we would change it. */
+ yylastchar = yychar;
/* Make sure we have latest lookahead translation. See comments at
user semantic actions for why this is necessary. */
yytoken = yychar == YYEMPTY ? YYEMPTY : YYTRANSLATE (yychar);
@@ -1755,7 +1774,11 @@ yyerrlab:
{
/* Return failure if at end of input. */
if (yychar == YYEOF)
- YYABORT;
+ {
+ /* Since we don't need the token, we have to free it first. */
+ YYERRCLEANUP;
+ YYABORT;
+ }
}
else
{
@@ -1812,7 +1835,11 @@ yyerrlab1:
/* Pop the current state because it cannot handle the error token. */
if (yyssp == yyss)
- YYABORT;
+ {
+ /* Since we don't need the error token, we have to free it first. */
+ YYERRCLEANUP;
+ YYABORT;
+ }
yydestruct ("Error: popping",
diff --git a/storage/xtradb/fts/fts0pars.y b/storage/xtradb/fts/fts0pars.y
index ff22e9a9873..e48036e82fe 100644
--- a/storage/xtradb/fts/fts0pars.y
+++ b/storage/xtradb/fts/fts0pars.y
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2007, 2013, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2007, 2014, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -59,9 +59,9 @@ struct fts_lexer_struct {
%}
%union {
- int oper;
- char* token;
- fts_ast_node_t* node;
+ int oper;
+ fts_ast_string_t* token;
+ fts_ast_node_t* node;
};
/* Enable re-entrant parser */
@@ -139,8 +139,8 @@ expr : term {
}
| text '@' FTS_NUMB {
- fts_ast_term_set_distance($1, strtoul($3, NULL, 10));
- free($3);
+ fts_ast_term_set_distance($1, fts_ast_string_to_ul($3, 10));
+ fts_ast_string_free($3);
}
| prefix term '*' {
@@ -157,8 +157,8 @@ expr : term {
| prefix text '@' FTS_NUMB {
$$ = fts_ast_create_node_list(state, $1);
fts_ast_add_node($$, $2);
- fts_ast_term_set_distance($2, strtoul($4, NULL, 10));
- free($4);
+ fts_ast_term_set_distance($2, fts_ast_string_to_ul($4, 10));
+ fts_ast_string_free($4);
}
| prefix text {
@@ -190,12 +190,12 @@ prefix : '-' {
term : FTS_TERM {
$$ = fts_ast_create_node_term(state, $1);
- free($1);
+ fts_ast_string_free($1);
}
| FTS_NUMB {
$$ = fts_ast_create_node_term(state, $1);
- free($1);
+ fts_ast_string_free($1);
}
/* Ignore leading '*' */
@@ -206,7 +206,7 @@ term : FTS_TERM {
text : FTS_TEXT {
$$ = fts_ast_create_node_text(state, $1);
- free($1);
+ fts_ast_string_free($1);
}
;
%%
diff --git a/storage/xtradb/fts/fts0que.cc b/storage/xtradb/fts/fts0que.cc
index c5c5f954789..beeb31abb9e 100644
--- a/storage/xtradb/fts/fts0que.cc
+++ b/storage/xtradb/fts/fts0que.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2007, 2013, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2007, 2014, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -2800,20 +2800,19 @@ fts_query_get_token(
ulint str_len;
byte* new_ptr = NULL;
- str_len = ut_strlen((char*) node->term.ptr);
+ str_len = node->term.ptr->len;
ut_a(node->type == FTS_AST_TERM);
token->f_len = str_len;
- token->f_str = node->term.ptr;
+ token->f_str = node->term.ptr->str;
if (node->term.wildcard) {
token->f_str = static_cast<byte*>(ut_malloc(str_len + 2));
token->f_len = str_len + 1;
- /* Need to copy the NUL character too. */
- memcpy(token->f_str, node->term.ptr, str_len + 1);
+ memcpy(token->f_str, node->term.ptr->str, str_len);
token->f_str[str_len] = '%';
token->f_str[token->f_len] = 0;
@@ -2848,8 +2847,8 @@ fts_query_visitor(
switch (node->type) {
case FTS_AST_TEXT:
- token.f_str = node->text.ptr;
- token.f_len = ut_strlen((char*) token.f_str);
+ token.f_str = node->text.ptr->str;
+ token.f_len = node->text.ptr->len;
if (query->oper == FTS_EXIST) {
ut_ad(query->intersection == NULL);
@@ -2878,8 +2877,8 @@ fts_query_visitor(
break;
case FTS_AST_TERM:
- token.f_str = node->term.ptr;
- token.f_len = ut_strlen(reinterpret_cast<char*>(token.f_str));
+ token.f_str = node->term.ptr->str;
+ token.f_len = node->term.ptr->len;
/* Add the word to our RB tree that will be used to
calculate this terms per document frequency. */
@@ -3191,13 +3190,9 @@ fts_query_read_node(
to assign the frequency on search string behalf. */
if (query->cur_node->type == FTS_AST_TERM
&& query->cur_node->term.wildcard) {
-
- /* These cast are safe since we only care about the
- terminating NUL character as an end of string marker. */
- term.f_len = ut_strlen(reinterpret_cast<char*>
- (query->cur_node->term.ptr));
+ term.f_len = query->cur_node->term.ptr->len;
ut_ad(FTS_MAX_WORD_LEN >= term.f_len);
- memcpy(term.f_str, query->cur_node->term.ptr, term.f_len);
+ memcpy(term.f_str, query->cur_node->term.ptr->str, term.f_len);
} else {
term.f_len = word->f_len;
ut_ad(FTS_MAX_WORD_LEN >= word->f_len);
@@ -3507,14 +3502,15 @@ fts_query_prepare_result(
doc_freq = rbt_value(fts_doc_freq_t, node);
/* Don't put deleted docs into result */
- if (fts_bsearch(array, 0, static_cast<int>(size), doc_freq->doc_id)
- >= 0) {
+ if (fts_bsearch(array, 0, static_cast<int>(size),
+ doc_freq->doc_id) >= 0) {
+ /* one less matching doc count */
+ --word_freq->doc_count;
continue;
}
ranking.doc_id = doc_freq->doc_id;
- ranking.rank = static_cast<fts_rank_t>(
- doc_freq->freq * word_freq->idf * word_freq->idf);
+ ranking.rank = static_cast<fts_rank_t>(doc_freq->freq);
ranking.words = NULL;
fts_query_add_ranking(query, result->rankings_by_id,
@@ -3527,6 +3523,25 @@ fts_query_prepare_result(
}
}
+ /* Calculate IDF only after we exclude the deleted items */
+ fts_query_calculate_idf(query);
+
+ node = rbt_first(query->word_freqs);
+ word_freq = rbt_value(fts_word_freq_t, node);
+
+ /* Calculate the ranking for each doc */
+ for (node = rbt_first(result->rankings_by_id);
+ node != NULL;
+ node = rbt_next(result->rankings_by_id, node)) {
+
+ fts_ranking_t* ranking;
+
+ ranking = rbt_value(fts_ranking_t, node);
+
+ ranking->rank = static_cast<fts_rank_t>(
+ ranking->rank * word_freq->idf * word_freq->idf);
+ }
+
return(result);
}
@@ -3898,6 +3913,7 @@ fts_query(
/* Get the deleted doc ids that are in the cache. */
fts_cache_append_deleted_doc_ids(
index->table->fts->cache, query.deleted->doc_ids);
+ DEBUG_SYNC_C("fts_deleted_doc_ids_append");
/* Sort the vector so that we can do a binary search over the ids. */
ib_vector_sort(query.deleted->doc_ids, fts_update_doc_id_cmp);
@@ -3954,7 +3970,8 @@ fts_query(
}
/* Calculate the inverse document frequency of the terms. */
- if (query.error == DB_SUCCESS) {
+ if (query.error == DB_SUCCESS
+ && query.flags != FTS_OPT_RANKING) {
fts_query_calculate_idf(&query);
}
diff --git a/storage/xtradb/fts/fts0tlex.cc b/storage/xtradb/fts/fts0tlex.cc
index ef17ab1acf2..b744fbf0763 100644
--- a/storage/xtradb/fts/fts0tlex.cc
+++ b/storage/xtradb/fts/fts0tlex.cc
@@ -447,7 +447,7 @@ static yyconst flex_int16_t yy_chk[29] =
#line 1 "fts0tlex.l"
/*****************************************************************************
-Copyright (c) 2007, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2007, 2014, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -802,7 +802,7 @@ case 3:
YY_RULE_SETUP
#line 54 "fts0tlex.l"
{
- val->token = strdup(fts0tget_text(yyscanner));
+ val->token = fts_ast_string_create(reinterpret_cast<const byte*>(fts0tget_text(yyscanner)), fts0tget_leng(yyscanner));
return(FTS_TEXT);
}
@@ -811,7 +811,7 @@ case 4:
YY_RULE_SETUP
#line 60 "fts0tlex.l"
{
- val->token = strdup(fts0tget_text(yyscanner));
+ val->token = fts_ast_string_create(reinterpret_cast<const byte*>(fts0tget_text(yyscanner)), fts0tget_leng(yyscanner));
return(FTS_TERM);
}
diff --git a/storage/xtradb/fts/fts0tlex.l b/storage/xtradb/fts/fts0tlex.l
index a18c2a55081..4f55a83afe5 100644
--- a/storage/xtradb/fts/fts0tlex.l
+++ b/storage/xtradb/fts/fts0tlex.l
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2007, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2007, 2014, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -52,13 +52,13 @@ this program; if not, write to the Free Software Foundation, Inc.,
}
\"[^\"\n]*\" {
- val->token = strdup(fts0tget_text(yyscanner));
+ val->token = fts_ast_string_create(reinterpret_cast<const byte*>(fts0tget_text(yyscanner)), fts0tget_leng(yyscanner));
return(FTS_TEXT);
}
[^" \n\%]* {
- val->token = strdup(fts0tget_text(yyscanner));
+ val->token = fts_ast_string_create(reinterpret_cast<const byte*>(fts0tget_text(yyscanner)), fts0tget_leng(yyscanner));
return(FTS_TERM);
}
diff --git a/storage/xtradb/handler/ha_innodb.cc b/storage/xtradb/handler/ha_innodb.cc
index 1355c8a22a1..e4cfad45838 100644
--- a/storage/xtradb/handler/ha_innodb.cc
+++ b/storage/xtradb/handler/ha_innodb.cc
@@ -99,6 +99,7 @@ this program; if not, write to the Free Software Foundation, Inc.,
#include "fts0types.h"
#include "row0import.h"
#include "row0quiesce.h"
+#include "row0mysql.h"
#ifdef UNIV_DEBUG
#include "trx0purge.h"
#endif /* UNIV_DEBUG */
@@ -488,7 +489,7 @@ static PSI_rwlock_info all_innodb_rwlocks[] = {
{&trx_purge_latch_key, "trx_purge_latch", 0},
{&index_tree_rw_lock_key, "index_tree_rw_lock", 0},
{&index_online_log_key, "index_online_log", 0},
- {&dict_table_stats_latch_key, "dict_table_stats", 0},
+ {&dict_table_stats_key, "dict_table_stats", 0},
{&hash_table_rw_lock_key, "hash_table_locks", 0}
};
# endif /* UNIV_PFS_RWLOCK */
@@ -1298,6 +1299,22 @@ innobase_start_trx_and_assign_read_view(
THD* thd); /* in: MySQL thread handle of the
user for whom the transaction should
be committed */
+/*****************************************************************//**
+Creates an InnoDB transaction struct for the thd if it does not yet have one.
+Starts a new InnoDB transaction if a transaction is not yet started. And
+clones snapshot for a consistent read from another session, if it has one.
+@return 0 */
+static
+int
+innobase_start_trx_and_clone_read_view(
+/*====================================*/
+ handlerton* hton, /* in: Innodb handlerton */
+ THD* thd, /* in: MySQL thread handle of the
+ user for whom the transaction should
+ be committed */
+ THD* from_thd); /* in: MySQL thread handle of the
+ user session from which the consistent
+ read should be cloned */
/****************************************************************//**
Flushes InnoDB logs to disk and makes a checkpoint. Really, a commit flushes
the logs, and the name of this function should be innobase_checkpoint.
@@ -4034,6 +4051,14 @@ innobase_end(
if (innodb_inited) {
+ THD *thd= current_thd;
+ if (thd) { // may be UNINSTALL PLUGIN statement
+ trx_t* trx = thd_to_trx(thd);
+ if (trx) {
+ trx_free_for_mysql(trx);
+ }
+ }
+
srv_fast_shutdown = (ulint) innobase_fast_shutdown;
innodb_inited = 0;
@@ -4224,7 +4249,7 @@ innobase_commit_ordered_2(
{
DBUG_ENTER("innobase_commit_ordered_2");
- /* We need current binlog position for ibbackup to work. */
+ /* We need current binlog position for mysqlbackup to work. */
retry:
if (innobase_commit_concurrency > 0) {
mysql_mutex_lock(&commit_cond_m);
@@ -4327,6 +4352,102 @@ innobase_commit_ordered(
}
/*****************************************************************//**
+Creates an InnoDB transaction struct for the thd if it does not yet have one.
+Starts a new InnoDB transaction if a transaction is not yet started. And
+clones snapshot for a consistent read from another session, if it has one.
+@return 0 */
+static
+int
+innobase_start_trx_and_clone_read_view(
+/*====================================*/
+ handlerton* hton, /* in: Innodb handlerton */
+ THD* thd, /* in: MySQL thread handle of the
+ user for whom the transaction should
+ be committed */
+ THD* from_thd) /* in: MySQL thread handle of the
+ user session from which the consistent
+ read should be cloned */
+{
+ trx_t* trx;
+ trx_t* from_trx;
+
+ DBUG_ENTER("innobase_start_trx_and_clone_read_view");
+ DBUG_ASSERT(hton == innodb_hton_ptr);
+
+ /* Get transaction handle from the donor session */
+
+ from_trx = thd_to_trx(from_thd);
+
+ if (!from_trx) {
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
+ HA_ERR_UNSUPPORTED,
+ "InnoDB: WITH CONSISTENT SNAPSHOT "
+ "FROM SESSION was ignored because the "
+ "specified session does not have an open "
+ "transaction inside InnoDB.");
+
+ DBUG_RETURN(0);
+ }
+
+ /* Create a new trx struct for thd, if it does not yet have one */
+
+ trx = check_trx_exists(thd);
+
+ /* This is just to play safe: release a possible FIFO ticket and
+ search latch. Since we can potentially reserve the trx_sys->mutex,
+ we have to release the search system latch first to obey the latching
+ order. */
+
+ trx_search_latch_release_if_reserved(trx);
+
+ innobase_srv_conc_force_exit_innodb(trx);
+
+ /* If the transaction is not started yet, start it */
+
+ trx_start_if_not_started_xa(trx);
+
+ /* Clone the read view from the donor transaction. Do this only if
+ transaction is using REPEATABLE READ isolation level. */
+ trx->isolation_level = innobase_map_isolation_level(
+ thd_get_trx_isolation(thd));
+
+ if (trx->isolation_level != TRX_ISO_REPEATABLE_READ) {
+
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
+ HA_ERR_UNSUPPORTED,
+ "InnoDB: WITH CONSISTENT SNAPSHOT "
+ "was ignored because this phrase "
+ "can only be used with "
+ "REPEATABLE READ isolation level.");
+ } else {
+
+ lock_mutex_enter();
+ mutex_enter(&trx_sys->mutex);
+ trx_mutex_enter(from_trx);
+
+ if (!trx_clone_read_view(trx, from_trx)) {
+
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
+ HA_ERR_UNSUPPORTED,
+ "InnoDB: WITH CONSISTENT SNAPSHOT "
+ "FROM SESSION was ignored because "
+ "the target transaction has not been "
+ "assigned a read view.");
+ }
+
+ trx_mutex_exit(from_trx);
+ mutex_exit(&trx_sys->mutex);
+ lock_mutex_exit();
+ }
+
+ /* Set the MySQL flag to mark that there is an active transaction */
+
+ innobase_register_trx(hton, current_thd, trx);
+
+ DBUG_RETURN(0);
+}
+
+/*****************************************************************//**
Commits a transaction in an InnoDB database or marks an SQL statement
ended.
@return 0 */
@@ -4760,6 +4881,7 @@ innobase_release_savepoint(
DBUG_ASSERT(hton == innodb_hton_ptr);
trx = check_trx_exists(thd);
+ trx_start_if_not_started(trx);
/* TODO: use provided savepoint data area to store savepoint data */
@@ -4815,7 +4937,7 @@ innobase_savepoint(
error = trx_savepoint_for_mysql(trx, name, (ib_int64_t)0);
if (error == DB_SUCCESS && trx->fts_trx != NULL) {
- fts_savepoint_take(trx, name);
+ fts_savepoint_take(trx, trx->fts_trx, name);
}
DBUG_RETURN(convert_error_code_to_mysql(error, 0, NULL));
@@ -4850,7 +4972,7 @@ innobase_close_connection(
sql_print_warning(
"MySQL is closing a connection that has an active "
- "InnoDB transaction. "TRX_ID_FMT" row modifications "
+ "InnoDB transaction. " TRX_ID_FMT " row modifications "
"will roll back.",
trx->undo_no);
}
@@ -4943,16 +5065,24 @@ innobase_kill_connection(
#endif /* WITH_WSREP */
trx = thd_to_trx(thd);
- if (trx)
- {
- /* Cancel a pending lock request. */
- lock_mutex_enter();
- trx_mutex_enter(trx);
- if (trx->lock.wait_lock)
- lock_cancel_waiting_and_release(trx->lock.wait_lock);
- trx_mutex_exit(trx);
- lock_mutex_exit();
- }
+ if (trx) {
+ THD *cur = current_thd;
+ THD *owner = trx->current_lock_mutex_owner;
+
+ if (owner != cur) {
+ lock_mutex_enter();
+ }
+ trx_mutex_enter(trx);
+
+ /* Cancel a pending lock request. */
+ if (trx->lock.wait_lock)
+ lock_cancel_waiting_and_release(trx->lock.wait_lock);
+
+ trx_mutex_exit(trx);
+ if (owner != cur) {
+ lock_mutex_exit();
+ }
+ }
DBUG_VOID_RETURN;
}
@@ -4967,14 +5097,11 @@ handler::Table_flags
ha_innobase::table_flags() const
/*============================*/
{
- THD *thd = ha_thd();
/* Need to use tx_isolation here since table flags is (also)
called before prebuilt is inited. */
- ulong const tx_isolation = thd_tx_isolation(thd);
+ ulong const tx_isolation = thd_tx_isolation(ha_thd());
- if (tx_isolation <= ISO_READ_COMMITTED &&
- !(tx_isolation == ISO_READ_COMMITTED &&
- thd_rpl_is_parallel(thd))) {
+ if (tx_isolation <= ISO_READ_COMMITTED) {
return(int_table_flags);
}
@@ -8528,7 +8655,7 @@ calc_row_difference(
if (doc_id < prebuilt->table->fts->cache->next_doc_id) {
fprintf(stderr,
"InnoDB: FTS Doc ID must be larger than"
- " "IB_ID_FMT" for table",
+ " " IB_ID_FMT " for table",
innodb_table->fts->cache->next_doc_id
- 1);
ut_print_name(stderr, trx,
@@ -8540,9 +8667,9 @@ calc_row_difference(
- prebuilt->table->fts->cache->next_doc_id)
>= FTS_DOC_ID_MAX_STEP) {
fprintf(stderr,
- "InnoDB: Doc ID "UINT64PF" is too"
+ "InnoDB: Doc ID " UINT64PF " is too"
" big. Its difference with largest"
- " Doc ID used "UINT64PF" cannot"
+ " Doc ID used " UINT64PF " cannot"
" exceed or equal to %d\n",
doc_id,
prebuilt->table->fts->cache->next_doc_id - 1,
@@ -9326,6 +9453,29 @@ ha_innobase::innobase_get_index(
index = innobase_index_lookup(share, keynr);
if (index) {
+
+ if (!key || ut_strcmp(index->name, key->name) != 0) {
+ fprintf(stderr, "InnoDB: [Error] Index for key no %u"
+ " mysql name %s , InnoDB name %s for table %s\n",
+ keynr, key ? key->name : "NULL",
+ index->name,
+ prebuilt->table->name);
+
+ for(ulint i=0; i < table->s->keys; i++) {
+ index = innobase_index_lookup(share, i);
+ key = table->key_info + keynr;
+
+ if (index) {
+
+ fprintf(stderr, "InnoDB: [Note] Index for key no %u"
+ " mysql name %s , InnoDB name %s for table %s\n",
+ keynr, key ? key->name : "NULL",
+ index->name,
+ prebuilt->table->name);
+ }
+ }
+ }
+
ut_a(ut_strcmp(index->name, key->name) == 0);
} else {
/* Can't find index with keynr in the translation
@@ -12959,16 +13109,6 @@ ha_innobase::get_memory_buffer_size() const
return(innobase_buffer_pool_size);
}
-UNIV_INTERN
-bool
-ha_innobase::is_corrupt() const
-{
- if (share->ib_table)
- return ((bool)share->ib_table->is_corrupt);
- else
- return (FALSE);
-}
-
/*********************************************************************//**
Calculates the key number used inside MySQL for an Innobase index. We will
first check the "index translation table" for a match of the index to get
@@ -13446,6 +13586,35 @@ ha_innobase::info_low(
break;
}
+ DBUG_EXECUTE_IF("ib_ha_innodb_stat_not_initialized",
+ index->table->stat_initialized = FALSE;);
+
+ if (!ib_table->stat_initialized ||
+ (index->table != ib_table ||
+ !index->table->stat_initialized)) {
+ fprintf(stderr,
+ "InnoDB: Warning: Index %s points to table %s"
+ " and ib_table %s statistics is initialized %d "
+ " but index table %s initialized %d "
+ " mysql table is %s. Have you mixed "
+ "up .frm files from different "
+ "installations? "
+ "See " REFMAN
+ "innodb-troubleshooting.html\n",
+ index->name,
+ index->table->name,
+ ib_table->name,
+ ib_table->stat_initialized,
+ index->table->name,
+ index->table->stat_initialized,
+ table->s->table_name.str
+ );
+
+ /* This is better than
+ assert on below function */
+ dict_stats_init(index->table);
+ }
+
rec_per_key = innodb_rec_per_key(
index, j, stats.records);
@@ -14139,9 +14308,13 @@ ha_innobase::get_foreign_key_list(
mutex_enter(&(dict_sys->mutex));
- for (foreign = UT_LIST_GET_FIRST(prebuilt->table->foreign_list);
- foreign != NULL;
- foreign = UT_LIST_GET_NEXT(foreign_list, foreign)) {
+ for (dict_foreign_set::iterator it
+ = prebuilt->table->foreign_set.begin();
+ it != prebuilt->table->foreign_set.end();
+ ++it) {
+
+ foreign = *it;
+
pf_key_info = get_foreign_key_info(thd, foreign);
if (pf_key_info) {
f_key_list->push_back(pf_key_info);
@@ -14177,9 +14350,13 @@ ha_innobase::get_parent_foreign_key_list(
mutex_enter(&(dict_sys->mutex));
- for (foreign = UT_LIST_GET_FIRST(prebuilt->table->referenced_list);
- foreign != NULL;
- foreign = UT_LIST_GET_NEXT(referenced_list, foreign)) {
+ for (dict_foreign_set::iterator it
+ = prebuilt->table->referenced_set.begin();
+ it != prebuilt->table->referenced_set.end();
+ ++it) {
+
+ foreign = *it;
+
pf_key_info = get_foreign_key_info(thd, foreign);
if (pf_key_info) {
f_key_list->push_back(pf_key_info);
@@ -14212,8 +14389,8 @@ ha_innobase::can_switch_engines(void)
"determining if there are foreign key constraints";
row_mysql_freeze_data_dictionary(prebuilt->trx);
- can_switch = !UT_LIST_GET_FIRST(prebuilt->table->referenced_list)
- && !UT_LIST_GET_FIRST(prebuilt->table->foreign_list);
+ can_switch = prebuilt->table->referenced_set.empty()
+ && prebuilt->table->foreign_set.empty();
row_mysql_unfreeze_data_dictionary(prebuilt->trx);
prebuilt->trx->op_info = "";
@@ -16041,7 +16218,7 @@ innobase_xa_prepare(
|| !thd_test_options(
thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))) {
- /* For ibbackup to work the order of transactions in binlog
+ /* For mysqlbackup to work the order of transactions in binlog
and InnoDB must be the same. Consider the situation
thread1> prepare; write to binlog; ...
@@ -19816,8 +19993,14 @@ static MYSQL_SYSVAR_ULONG(saved_page_number_debug,
srv_saved_page_number_debug, PLUGIN_VAR_OPCMDARG,
"An InnoDB page number.",
NULL, innodb_save_page_no, 0, 0, UINT_MAX32, 0);
+
#endif /* UNIV_DEBUG */
+static MYSQL_SYSVAR_UINT(simulate_comp_failures, srv_simulate_comp_failures,
+ PLUGIN_VAR_NOCMDARG,
+ "Simulate compression failures.",
+ NULL, NULL, 0, 0, 99, 0);
+
static MYSQL_SYSVAR_BOOL(force_primary_key,
srv_force_primary_key,
PLUGIN_VAR_OPCMDARG,
@@ -20097,6 +20280,7 @@ static struct st_mysql_sys_var* innobase_system_variables[]= {
MYSQL_SYSVAR(fil_make_page_dirty_debug),
MYSQL_SYSVAR(saved_page_number_debug),
#endif /* UNIV_DEBUG */
+ MYSQL_SYSVAR(simulate_comp_failures),
MYSQL_SYSVAR(corrupt_table_action),
MYSQL_SYSVAR(fake_changes),
MYSQL_SYSVAR(locking_fake_changes),
@@ -20106,7 +20290,6 @@ static struct st_mysql_sys_var* innobase_system_variables[]= {
MYSQL_SYSVAR(compression_algorithm),
MYSQL_SYSVAR(mtflush_threads),
MYSQL_SYSVAR(use_mtflush),
-
NULL
};
@@ -20385,7 +20568,7 @@ ib_senderrf(
va_start(args, code);
- myf l;
+ myf l=0;
switch(level) {
case IB_LOG_LEVEL_INFO:
diff --git a/storage/xtradb/handler/ha_innodb.h b/storage/xtradb/handler/ha_innodb.h
index 0c76c286030..2d70c67d3bf 100644
--- a/storage/xtradb/handler/ha_innodb.h
+++ b/storage/xtradb/handler/ha_innodb.h
@@ -151,7 +151,6 @@ class ha_innobase: public handler
double read_time(uint index, uint ranges, ha_rows rows);
longlong get_memory_buffer_size() const;
my_bool is_fake_change_enabled(THD *thd);
- bool is_corrupt() const;
int write_row(uchar * buf);
int update_row(const uchar * old_data, uchar * new_data);
diff --git a/storage/xtradb/handler/handler0alter.cc b/storage/xtradb/handler/handler0alter.cc
index 8097fd01e3f..2d7fd259cb1 100644
--- a/storage/xtradb/handler/handler0alter.cc
+++ b/storage/xtradb/handler/handler0alter.cc
@@ -612,15 +612,9 @@ innobase_init_foreign(
/* Check if any existing foreign key has the same id,
this is needed only if user supplies the constraint name */
- for (const dict_foreign_t* existing_foreign
- = UT_LIST_GET_FIRST(table->foreign_list);
- existing_foreign != 0;
- existing_foreign = UT_LIST_GET_NEXT(
- foreign_list, existing_foreign)) {
-
- if (ut_strcmp(existing_foreign->id, foreign->id) == 0) {
- return(false);
- }
+ if (table->foreign_set.find(foreign)
+ != table->foreign_set.end()) {
+ return(false);
}
}
@@ -2258,14 +2252,18 @@ innobase_check_foreigns_low(
const char* col_name,
bool drop)
{
+ dict_foreign_t* foreign;
ut_ad(mutex_own(&dict_sys->mutex));
/* Check if any FOREIGN KEY constraints are defined on this
column. */
- for (const dict_foreign_t* foreign = UT_LIST_GET_FIRST(
- user_table->foreign_list);
- foreign;
- foreign = UT_LIST_GET_NEXT(foreign_list, foreign)) {
+
+ for (dict_foreign_set::iterator it = user_table->foreign_set.begin();
+ it != user_table->foreign_set.end();
+ ++it) {
+
+ foreign = *it;
+
if (!drop && !(foreign->type
& (DICT_FOREIGN_ON_DELETE_SET_NULL
| DICT_FOREIGN_ON_UPDATE_SET_NULL))) {
@@ -2297,10 +2295,13 @@ innobase_check_foreigns_low(
/* Check if any FOREIGN KEY constraints in other tables are
referring to the column that is being dropped. */
- for (const dict_foreign_t* foreign = UT_LIST_GET_FIRST(
- user_table->referenced_list);
- foreign;
- foreign = UT_LIST_GET_NEXT(referenced_list, foreign)) {
+ for (dict_foreign_set::iterator it
+ = user_table->referenced_set.begin();
+ it != user_table->referenced_set.end();
+ ++it) {
+
+ foreign = *it;
+
if (innobase_dropping_foreign(foreign, drop_fk, n_drop_fk)) {
continue;
}
@@ -3188,6 +3189,9 @@ error_handling:
case DB_DUPLICATE_KEY:
my_error(ER_DUP_KEY, MYF(0), "SYS_INDEXES");
break;
+ case DB_OUT_OF_FILE_SPACE:
+ my_error_innodb(error, table_name, user_table->flags);
+ break;
default:
my_error_innodb(error, table_name, user_table->flags);
}
@@ -3648,11 +3652,12 @@ check_if_ok_to_rename:
continue;
}
- for (dict_foreign_t* foreign = UT_LIST_GET_FIRST(
- prebuilt->table->foreign_list);
- foreign != NULL;
- foreign = UT_LIST_GET_NEXT(
- foreign_list, foreign)) {
+ for (dict_foreign_set::iterator it
+ = prebuilt->table->foreign_set.begin();
+ it != prebuilt->table->foreign_set.end();
+ ++it) {
+
+ dict_foreign_t* foreign = *it;
const char* fid = strchr(foreign->id, '/');
DBUG_ASSERT(fid);
@@ -4498,10 +4503,12 @@ err_exit:
rename_foreign:
trx->op_info = "renaming column in SYS_FOREIGN_COLS";
- for (const dict_foreign_t* foreign = UT_LIST_GET_FIRST(
- user_table->foreign_list);
- foreign != NULL;
- foreign = UT_LIST_GET_NEXT(foreign_list, foreign)) {
+ for (dict_foreign_set::iterator it = user_table->foreign_set.begin();
+ it != user_table->foreign_set.end();
+ ++it) {
+
+ dict_foreign_t* foreign = *it;
+
for (unsigned i = 0; i < foreign->n_fields; i++) {
if (strcmp(foreign->foreign_col_names[i], from)) {
continue;
@@ -4531,10 +4538,12 @@ rename_foreign:
}
}
- for (const dict_foreign_t* foreign = UT_LIST_GET_FIRST(
- user_table->referenced_list);
- foreign != NULL;
- foreign = UT_LIST_GET_NEXT(referenced_list, foreign)) {
+ for (dict_foreign_set::iterator it
+ = user_table->referenced_set.begin();
+ it != user_table->referenced_set.end();
+ ++it) {
+
+ dict_foreign_t* foreign = *it;
for (unsigned i = 0; i < foreign->n_fields; i++) {
if (strcmp(foreign->referenced_col_names[i], from)) {
continue;
@@ -4858,8 +4867,8 @@ innobase_update_foreign_cache(
column names. No need to pass col_names or to drop
constraints from the data dictionary cache. */
DBUG_ASSERT(!ctx->col_names);
- DBUG_ASSERT(UT_LIST_GET_LEN(user_table->foreign_list) == 0);
- DBUG_ASSERT(UT_LIST_GET_LEN(user_table->referenced_list) == 0);
+ DBUG_ASSERT(user_table->foreign_set.empty());
+ DBUG_ASSERT(user_table->referenced_set.empty());
user_table = ctx->new_table;
} else {
/* Drop the foreign key constraints if the
diff --git a/storage/xtradb/handler/i_s.cc b/storage/xtradb/handler/i_s.cc
index b53c3ad536e..a0ba8c5f1a1 100644
--- a/storage/xtradb/handler/i_s.cc
+++ b/storage/xtradb/handler/i_s.cc
@@ -8287,6 +8287,15 @@ i_s_innodb_changed_pages_fill(
limit_lsn_range_from_condition(table, cond, &min_lsn,
&max_lsn);
}
+
+ /* If the log tracker is running and our max_lsn > current tracked LSN,
+ cap the max lsn so that we don't try to read any partial runs as the
+ tracked LSN advances. */
+ if (srv_track_changed_pages) {
+ ib_uint64_t tracked_lsn = log_get_tracked_lsn();
+ if (max_lsn > tracked_lsn)
+ max_lsn = tracked_lsn;
+ }
if (!log_online_bitmap_iterator_init(&i, min_lsn, max_lsn)) {
my_error(ER_CANT_FIND_SYSTEM_REC, MYF(0));
diff --git a/storage/xtradb/include/btr0cur.h b/storage/xtradb/include/btr0cur.h
index 8a35cb1a3da..4ed66e76fe0 100644
--- a/storage/xtradb/include/btr0cur.h
+++ b/storage/xtradb/include/btr0cur.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1994, 2013, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1994, 2014, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -582,6 +582,17 @@ void
btr_estimate_number_of_different_key_vals(
/*======================================*/
dict_index_t* index); /*!< in: index */
+
+/** Gets the externally stored size of a record, in units of a database page.
+@param[in] rec record
+@param[in] offsets array returned by rec_get_offsets()
+@return externally stored part, in units of a database page */
+
+ulint
+btr_rec_get_externally_stored_len(
+ const rec_t* rec,
+ const ulint* offsets);
+
/*******************************************************************//**
Marks non-updated off-page fields as disowned by this record. The ownership
must be transferred to the updated record which is inserted elsewhere in the
diff --git a/storage/xtradb/include/buf0buf.h b/storage/xtradb/include/buf0buf.h
index d5cc03d8c86..84db9cfcf2b 100644
--- a/storage/xtradb/include/buf0buf.h
+++ b/storage/xtradb/include/buf0buf.h
@@ -273,6 +273,15 @@ buf_pool_get_oldest_modification(void);
/*==================================*/
/********************************************************************//**
+Gets the smallest oldest_modification lsn for any page in the pool. Returns
+zero if all modified pages have been flushed to disk.
+@return oldest modification in pool, zero if none */
+UNIV_INTERN
+lsn_t
+buf_pool_get_oldest_modification_peek(void);
+/*=======================================*/
+
+/********************************************************************//**
Allocates a buf_page_t descriptor. This function must succeed. In case
of failure we assert in this function. */
UNIV_INLINE
@@ -437,7 +446,7 @@ buf_page_create(
mtr_t* mtr); /*!< in: mini-transaction handle */
#else /* !UNIV_HOTBACKUP */
/********************************************************************//**
-Inits a page to the buffer buf_pool, for use in ibbackup --restore. */
+Inits a page to the buffer buf_pool, for use in mysqlbackup --restore. */
UNIV_INTERN
void
buf_page_init_for_backup_restore(
diff --git a/storage/xtradb/include/buf0buf.ic b/storage/xtradb/include/buf0buf.ic
index c49061621f3..10f0e02cb8f 100644
--- a/storage/xtradb/include/buf0buf.ic
+++ b/storage/xtradb/include/buf0buf.ic
@@ -662,6 +662,11 @@ buf_page_get_block(
buf_page_t* bpage) /*!< in: control block, or NULL */
{
if (bpage != NULL) {
+#ifdef UNIV_DEBUG
+ buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
+ ut_ad(buf_page_hash_lock_held_s_or_x(buf_pool, bpage)
+ || mutex_own(&buf_pool->LRU_list_mutex));
+#endif
ut_ad(buf_page_in_file(bpage));
if (buf_page_get_state(bpage) == BUF_BLOCK_FILE_PAGE) {
@@ -1176,12 +1181,6 @@ buf_page_hash_get_low(
ut_a(buf_page_in_file(bpage));
ut_ad(bpage->in_page_hash);
ut_ad(!bpage->in_zip_hash);
-#if UNIV_WORD_SIZE == 4
- /* On 32-bit systems, there is no padding in
- buf_page_t. On other systems, Valgrind could complain
- about uninitialized pad bytes. */
- UNIV_MEM_ASSERT_RW(bpage, sizeof *bpage);
-#endif
}
return(bpage);
diff --git a/storage/xtradb/include/dict0crea.h b/storage/xtradb/include/dict0crea.h
index 6ec1079957b..67eab9058da 100644
--- a/storage/xtradb/include/dict0crea.h
+++ b/storage/xtradb/include/dict0crea.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2014, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -124,28 +124,24 @@ dict_create_add_foreign_id(
const char* name, /*!< in: table name */
dict_foreign_t* foreign)/*!< in/out: foreign key */
__attribute__((nonnull));
-/********************************************************************//**
-Adds foreign key definitions to data dictionary tables in the database. We
-look at table->foreign_list, and also generate names to constraints that were
-not named by the user. A generated constraint has a name of the format
-databasename/tablename_ibfk_NUMBER, where the numbers start from 1, and are
-given locally for this table, that is, the number is not global, as in the
-old format constraints < 4.0.18 it used to be.
-@return error code or DB_SUCCESS */
+
+/** Adds the given set of foreign key objects to the dictionary tables
+in the database. This function does not modify the dictionary cache. The
+caller must ensure that all foreign key objects contain a valid constraint
+name in foreign->id.
+@param[in] local_fk_set set of foreign key objects, to be added to
+the dictionary tables
+@param[in] table table to which the foreign key objects in
+local_fk_set belong to
+@param[in,out] trx transaction
+@return error code or DB_SUCCESS */
UNIV_INTERN
dberr_t
dict_create_add_foreigns_to_dictionary(
/*===================================*/
- ulint start_id,/*!< in: if we are actually doing ALTER TABLE
- ADD CONSTRAINT, we want to generate constraint
- numbers which are bigger than in the table so
- far; we number the constraints from
- start_id + 1 up; start_id should be set to 0 if
- we are creating a new table, or if the table
- so far has no constraints for which the name
- was generated here */
- dict_table_t* table, /*!< in: table */
- trx_t* trx) /*!< in: transaction */
+ const dict_foreign_set& local_fk_set,
+ const dict_table_t* table,
+ trx_t* trx)
__attribute__((nonnull, warn_unused_result));
/****************************************************************//**
Creates the tablespaces and datafiles system tables inside InnoDB
diff --git a/storage/xtradb/include/dict0dict.h b/storage/xtradb/include/dict0dict.h
index 52ac5eee86b..78503d954ba 100644
--- a/storage/xtradb/include/dict0dict.h
+++ b/storage/xtradb/include/dict0dict.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2013, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2014, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2012, Facebook Inc.
Copyright (c) 2013, SkySQL Ab. All Rights Reserved.
@@ -46,6 +46,9 @@ Created 1/8/1996 Heikki Tuuri
#include "fsp0fsp.h"
#include "dict0pagecompress.h"
+extern bool innodb_table_stats_not_found;
+extern bool innodb_index_stats_not_found;
+
#ifndef UNIV_HOTBACKUP
# include "sync0sync.h"
# include "sync0rw.h"
@@ -1447,6 +1450,28 @@ UNIV_INTERN
void
dict_mutex_exit_for_mysql(void);
/*===========================*/
+
+/** Create a dict_table_t's stats latch or delay for lazy creation.
+This function is only called from either single threaded environment
+or from a thread that has not shared the table object with other threads.
+@param[in,out] table table whose stats latch to create
+@param[in] enabled if false then the latch is disabled
+and dict_table_stats_lock()/unlock() become noop on this table. */
+
+void
+dict_table_stats_latch_create(
+ dict_table_t* table,
+ bool enabled);
+
+/** Destroy a dict_table_t's stats latch.
+This function is only called from either single threaded environment
+or from a thread that has not shared the table object with other threads.
+@param[in,out] table table whose stats latch to destroy */
+
+void
+dict_table_stats_latch_destroy(
+ dict_table_t* table);
+
/**********************************************************************//**
Lock the appropriate latch to protect a given table's statistics.
table->id is used to pick the corresponding latch from a global array of
diff --git a/storage/xtradb/include/dict0mem.h b/storage/xtradb/include/dict0mem.h
index 8de9206cb81..5bea2334131 100644
--- a/storage/xtradb/include/dict0mem.h
+++ b/storage/xtradb/include/dict0mem.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2013, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2014, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2012, Facebook Inc.
Copyright (c) 2013, SkySQL Ab. All Rights Reserved.
@@ -50,6 +50,9 @@ Created 1/8/1996 Heikki Tuuri
#include "hash0hash.h"
#include "trx0types.h"
#include "fts0fts.h"
+#include "os0once.h"
+#include <set>
+#include <algorithm>
/* Forward declaration. */
struct ib_rbt_t;
@@ -695,6 +698,9 @@ struct dict_index_t{
ulint stat_n_leaf_pages;
/*!< approximate number of leaf pages in the
index tree */
+ bool stats_error_printed;
+ /*!< has persistent statistics error printed
+ for this index ? */
/* @} */
/** Statistics for defragmentation, these numbers are estimations and
could be very inaccurate at certain times, e.g. right after restart,
@@ -790,12 +796,106 @@ struct dict_foreign_t{
does not generate new indexes
implicitly */
dict_index_t* referenced_index;/*!< referenced index */
- UT_LIST_NODE_T(dict_foreign_t)
- foreign_list; /*!< list node for foreign keys of the
- table */
- UT_LIST_NODE_T(dict_foreign_t)
- referenced_list;/*!< list node for referenced
- keys of the table */
+};
+
+/** Compare two dict_foreign_t objects using their ids. Used in the ordering
+of dict_table_t::foreign_set and dict_table_t::referenced_set. It returns
+true if the first argument is considered to go before the second in the
+strict weak ordering it defines, and false otherwise. */
+struct dict_foreign_compare {
+
+ bool operator()(
+ const dict_foreign_t* lhs,
+ const dict_foreign_t* rhs) const
+ {
+ return(ut_strcmp(lhs->id, rhs->id) < 0);
+ }
+};
+
+/** A function object to find a foreign key with the given index as the
+referenced index. Return the foreign key with matching criteria or NULL */
+struct dict_foreign_with_index {
+
+ dict_foreign_with_index(const dict_index_t* index)
+ : m_index(index)
+ {}
+
+ bool operator()(const dict_foreign_t* foreign) const
+ {
+ return(foreign->referenced_index == m_index);
+ }
+
+ const dict_index_t* m_index;
+};
+
+/* A function object to check if the foreign constraint is between different
+tables. Returns true if foreign key constraint is between different tables,
+false otherwise. */
+struct dict_foreign_different_tables {
+
+ bool operator()(const dict_foreign_t* foreign) const
+ {
+ return(foreign->foreign_table != foreign->referenced_table);
+ }
+};
+
+/** A function object to check if the foreign key constraint has the same
+name as given. If the full name of the foreign key constraint doesn't match,
+then, check if removing the database name from the foreign key constraint
+matches. Return true if it matches, false otherwise. */
+struct dict_foreign_matches_id {
+
+ dict_foreign_matches_id(const char* id)
+ : m_id(id)
+ {}
+
+ bool operator()(const dict_foreign_t* foreign) const
+ {
+ if (0 == innobase_strcasecmp(foreign->id, m_id)) {
+ return(true);
+ }
+ if (const char* pos = strchr(foreign->id, '/')) {
+ if (0 == innobase_strcasecmp(m_id, pos + 1)) {
+ return(true);
+ }
+ }
+ return(false);
+ }
+
+ const char* m_id;
+};
+
+typedef std::set<dict_foreign_t*, dict_foreign_compare> dict_foreign_set;
+
+/*********************************************************************//**
+Frees a foreign key struct. */
+inline
+void
+dict_foreign_free(
+/*==============*/
+ dict_foreign_t* foreign) /*!< in, own: foreign key struct */
+{
+ mem_heap_free(foreign->heap);
+}
+
+/** The destructor will free all the foreign key constraints in the set
+by calling dict_foreign_free() on each of the foreign key constraints.
+This is used to free the allocated memory when a local set goes out
+of scope. */
+struct dict_foreign_set_free {
+
+ dict_foreign_set_free(const dict_foreign_set& foreign_set)
+ : m_foreign_set(foreign_set)
+ {}
+
+ ~dict_foreign_set_free()
+ {
+ std::for_each(m_foreign_set.begin(),
+ m_foreign_set.end(),
+ dict_foreign_free);
+ }
+
+ const dict_foreign_set& m_foreign_set;
};
/** The flags for ON_UPDATE and ON_DELETE can be ORed; the default is that
@@ -817,6 +917,8 @@ the table, DML from memcached will be blocked. */
/** Data structure for a database table. Most fields will be
initialized to 0, NULL or FALSE in dict_mem_table_create(). */
struct dict_table_t{
+
+
table_id_t id; /*!< id of the table */
mem_heap_t* heap; /*!< memory heap */
char* name; /*!< table name */
@@ -871,13 +973,16 @@ struct dict_table_t{
hash_node_t id_hash; /*!< hash chain node */
UT_LIST_BASE_NODE_T(dict_index_t)
indexes; /*!< list of indexes of the table */
- UT_LIST_BASE_NODE_T(dict_foreign_t)
- foreign_list;/*!< list of foreign key constraints
+
+ dict_foreign_set foreign_set;
+ /*!< set of foreign key constraints
in the table; these refer to columns
in other tables */
- UT_LIST_BASE_NODE_T(dict_foreign_t)
- referenced_list;/*!< list of foreign key constraints
+
+ dict_foreign_set referenced_set;
+ /*!< list of foreign key constraints
which refer to this table */
+
UT_LIST_NODE_T(dict_table_t)
table_LRU; /*!< node of the LRU list of tables */
unsigned fk_max_recusive_level:8;
@@ -927,6 +1032,10 @@ struct dict_table_t{
initialized in dict_table_add_to_cache() */
/** Statistics for query optimization */
/* @{ */
+
+ volatile os_once::state_t stats_latch_created;
+ /*!< Creation state of 'stats_latch'. */
+
rw_lock_t* stats_latch; /*!< this latch protects:
dict_table_t::stat_initialized
dict_table_t::stat_n_rows (*)
@@ -1036,6 +1145,9 @@ struct dict_table_t{
/*!< see BG_STAT_* above.
Writes are covered by dict_sys->mutex.
Dirty reads are possible. */
+ bool stats_error_printed;
+ /*!< Has persistent stats error beein
+ already printed for this table ? */
/* @} */
/*----------------------*/
/**!< The following fields are used by the
@@ -1116,6 +1228,19 @@ struct dict_table_t{
#endif /* UNIV_DEBUG */
};
+/** A function object to add the foreign key constraint to the referenced set
+of the referenced table, if it exists in the dictionary cache. */
+struct dict_foreign_add_to_referenced_table {
+ void operator()(dict_foreign_t* foreign) const
+ {
+ if (dict_table_t* table = foreign->referenced_table) {
+ std::pair<dict_foreign_set::iterator, bool> ret
+ = table->referenced_set.insert(foreign);
+ ut_a(ret.second);
+ }
+ }
+};
+
#ifndef UNIV_NONINL
#include "dict0mem.ic"
#endif
diff --git a/storage/xtradb/include/fil0fil.h b/storage/xtradb/include/fil0fil.h
index 3960eef5d7e..c2d113bdc1f 100644
--- a/storage/xtradb/include/fil0fil.h
+++ b/storage/xtradb/include/fil0fil.h
@@ -50,7 +50,7 @@ struct fil_space_t;
typedef std::list<const char*> space_name_list_t;
/** When mysqld is run, the default directory "." is the mysqld datadir,
-but in the MySQL Embedded Server Library and ibbackup it is not the default
+but in the MySQL Embedded Server Library and mysqlbackup it is not the default
directory, and we must set the base file path explicitly */
extern const char* fil_path_to_mysql_datadir;
@@ -456,8 +456,8 @@ exists and the space id in it matches. Replays the create operation if a file
at that path does not exist yet. If the database directory for the file to be
created does not exist, then we create the directory, too.
-Note that ibbackup --apply-log sets fil_path_to_mysql_datadir to point to the
-datadir that we should use in replaying the file operations.
+Note that mysqlbackup --apply-log sets fil_path_to_mysql_datadir to point to
+the datadir that we should use in replaying the file operations.
@return end of log record, or NULL if the record was not completely
contained between ptr and end_ptr */
UNIV_INTERN
@@ -710,9 +710,9 @@ fil_space_for_table_exists_in_mem(
#else /* !UNIV_HOTBACKUP */
/********************************************************************//**
Extends all tablespaces to the size stored in the space header. During the
-ibbackup --apply-log phase we extended the spaces on-demand so that log records
-could be appllied, but that may have left spaces still too small compared to
-the size stored in the space header. */
+mysqlbackup --apply-log phase we extended the spaces on-demand so that log
+records could be appllied, but that may have left spaces still too small
+compared to the size stored in the space header. */
UNIV_INTERN
void
fil_extend_tablespaces_to_stored_len(void);
diff --git a/storage/xtradb/include/fts0ast.h b/storage/xtradb/include/fts0ast.h
index c0aac6d8e4c..50ee587e282 100644
--- a/storage/xtradb/include/fts0ast.h
+++ b/storage/xtradb/include/fts0ast.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2007, 2013, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2007, 2014, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -76,6 +76,7 @@ enum fts_ast_oper_t {
struct fts_lexer_t;
struct fts_ast_node_t;
struct fts_ast_state_t;
+struct fts_ast_string_t;
typedef dberr_t (*fts_ast_callback)(fts_ast_oper_t, fts_ast_node_t*, void*);
@@ -101,16 +102,16 @@ extern
fts_ast_node_t*
fts_ast_create_node_term(
/*=====================*/
- void* arg, /*!< in: ast state */
- const char* ptr); /*!< in: term string */
+ void* arg, /*!< in: ast state */
+ const fts_ast_string_t* ptr); /*!< in: term string */
/********************************************************************
Create an AST text node */
extern
fts_ast_node_t*
fts_ast_create_node_text(
/*=====================*/
- void* arg, /*!< in: ast state */
- const char* ptr); /*!< in: text string */
+ void* arg, /*!< in: ast state */
+ const fts_ast_string_t* ptr); /*!< in: text string */
/********************************************************************
Create an AST expr list node */
extern
@@ -233,16 +234,66 @@ fts_lexer_free(
free */
__attribute__((nonnull));
+/**
+Create an ast string object, with NUL-terminator, so the string
+has one more byte than len
+@param[in] str pointer to string
+@param[in] len length of the string
+@return ast string with NUL-terminator */
+UNIV_INTERN
+fts_ast_string_t*
+fts_ast_string_create(
+ const byte* str,
+ ulint len);
+
+/**
+Free an ast string instance
+@param[in,out] ast_str string to free */
+UNIV_INTERN
+void
+fts_ast_string_free(
+ fts_ast_string_t* ast_str);
+
+/**
+Translate ast string of type FTS_AST_NUMB to unsigned long by strtoul
+@param[in] str string to translate
+@param[in] base the base
+@return translated number */
+UNIV_INTERN
+ulint
+fts_ast_string_to_ul(
+ const fts_ast_string_t* ast_str,
+ int base);
+
+/**
+Print the ast string
+@param[in] str string to print */
+UNIV_INTERN
+void
+fts_ast_string_print(
+ const fts_ast_string_t* ast_str);
+
+/* String of length len.
+We always store the string of length len with a terminating '\0',
+regardless of there is any 0x00 in the string itself */
+struct fts_ast_string_t {
+ /*!< Pointer to string. */
+ byte* str;
+
+ /*!< Length of the string. */
+ ulint len;
+};
+
/* Query term type */
struct fts_ast_term_t {
- byte* ptr; /*!< Pointer to term string.*/
- ibool wildcard; /*!< TRUE if wild card set.*/
+ fts_ast_string_t* ptr; /*!< Pointer to term string.*/
+ ibool wildcard; /*!< TRUE if wild card set.*/
};
/* Query text type */
struct fts_ast_text_t {
- byte* ptr; /*!< Pointer to term string.*/
- ulint distance; /*!< > 0 if proximity distance
+ fts_ast_string_t* ptr; /*!< Pointer to text string.*/
+ ulint distance; /*!< > 0 if proximity distance
set */
};
diff --git a/storage/xtradb/include/fts0fts.h b/storage/xtradb/include/fts0fts.h
index 5bea5bc0e97..a2996ecacc8 100644
--- a/storage/xtradb/include/fts0fts.h
+++ b/storage/xtradb/include/fts0fts.h
@@ -745,6 +745,7 @@ void
fts_savepoint_take(
/*===============*/
trx_t* trx, /*!< in: transaction */
+ fts_trx_t* fts_trx, /*!< in: fts transaction */
const char* name) /*!< in: savepoint name */
__attribute__((nonnull));
/**********************************************************************//**
diff --git a/storage/xtradb/include/fts0pars.h b/storage/xtradb/include/fts0pars.h
index 50f636944e5..8108e811599 100644
--- a/storage/xtradb/include/fts0pars.h
+++ b/storage/xtradb/include/fts0pars.h
@@ -53,9 +53,9 @@ typedef union YYSTYPE
/* Line 2068 of yacc.c */
#line 61 "fts0pars.y"
- int oper;
- char* token;
- fts_ast_node_t* node;
+ int oper;
+ fts_ast_string_t* token;
+ fts_ast_node_t* node;
diff --git a/storage/xtradb/include/lock0lock.h b/storage/xtradb/include/lock0lock.h
index 8d5515b5eb5..235b2373c25 100644
--- a/storage/xtradb/include/lock0lock.h
+++ b/storage/xtradb/include/lock0lock.h
@@ -301,7 +301,7 @@ lock_rec_insert_check_and_lock(
inserted record maybe should inherit
LOCK_GAP type locks from the successor
record */
- __attribute__((nonnull, warn_unused_result));
+ __attribute__((nonnull(2,3,4,6,7), warn_unused_result));
/*********************************************************************//**
Checks if locks of other transactions prevent an immediate modify (update,
delete mark, or delete unmark) of a clustered index record. If they do,
diff --git a/storage/xtradb/include/log0log.h b/storage/xtradb/include/log0log.h
index b9e0c2ef516..f130c8de423 100644
--- a/storage/xtradb/include/log0log.h
+++ b/storage/xtradb/include/log0log.h
@@ -168,6 +168,13 @@ lsn_t
log_get_lsn(void);
/*=============*/
/************************************************************//**
+Gets the current lsn.
+@return current lsn */
+UNIV_INLINE
+lsn_t
+log_get_lsn_nowait(void);
+/*=============*/
+/************************************************************//**
Gets the last lsn that is fully flushed to disk.
@return last flushed lsn */
UNIV_INLINE
@@ -615,6 +622,27 @@ void
log_mem_free(void);
/*==============*/
+/****************************************************************//**
+Safely reads the log_sys->tracked_lsn value. Uses atomic operations
+if available, otherwise this field is protected with the log system
+mutex. The writer counterpart function is log_set_tracked_lsn() in
+log0online.c.
+
+@return log_sys->tracked_lsn value. */
+UNIV_INLINE
+lsn_t
+log_get_tracked_lsn(void);
+/*=====================*/
+/****************************************************************//**
+Unsafely reads the log_sys->tracked_lsn value. Uses atomic operations
+if available, or use dirty read. Use for printing only.
+
+@return log_sys->tracked_lsn value. */
+UNIV_INLINE
+lsn_t
+log_get_tracked_lsn_peek(void);
+/*==========================*/
+
extern log_t* log_sys;
/* Values used as flags */
@@ -696,13 +724,13 @@ extern log_t* log_sys;
megabyte.
This information might have been used
- since ibbackup version 0.35 but
+ since mysqlbackup version 0.35 but
before 1.41 to decide if unused ends of
non-auto-extending data files
in space 0 can be truncated.
This information was made obsolete
- by ibbackup --compress. */
+ by mysqlbackup --compress. */
#define LOG_CHECKPOINT_FSP_MAGIC_N (12 + LOG_CHECKPOINT_ARRAY_END)
/*!< Not used (0);
This magic number tells if the
@@ -731,7 +759,7 @@ extern log_t* log_sys;
/* a 32-byte field which contains
the string 'ibbackup' and the
creation time if the log file was
- created by ibbackup --restore;
+ created by mysqlbackup --restore;
when mysqld is first time started
on the restored database, it can
print helpful info for the user */
diff --git a/storage/xtradb/include/log0log.ic b/storage/xtradb/include/log0log.ic
index 7724d94b51a..853027daa7e 100644
--- a/storage/xtradb/include/log0log.ic
+++ b/storage/xtradb/include/log0log.ic
@@ -486,6 +486,26 @@ log_get_flush_lsn(void)
return(lsn);
}
+/************************************************************//**
+Gets the current lsn with a trylock
+@return current lsn or 0 if false*/
+UNIV_INLINE
+lsn_t
+log_get_lsn_nowait(void)
+/*=============*/
+{
+ lsn_t lsn;
+
+ if (mutex_enter_nowait(&(log_sys->mutex)))
+ return 0;
+
+ lsn = log_sys->lsn;
+
+ mutex_exit(&(log_sys->mutex));
+
+ return(lsn);
+}
+
/****************************************************************
Gets the log group capacity. It is OK to read the value without
holding log_sys->mutex because it is constant.
@@ -531,3 +551,39 @@ log_free_check(void)
}
}
#endif /* !UNIV_HOTBACKUP */
+
+/****************************************************************//**
+Unsafely reads the log_sys->tracked_lsn value. Uses atomic operations
+if available, or use dirty read. Use for printing only.
+
+@return log_sys->tracked_lsn value. */
+UNIV_INLINE
+lsn_t
+log_get_tracked_lsn_peek(void)
+/*==========================*/
+{
+#ifdef HAVE_ATOMIC_BUILTINS_64
+ return os_atomic_increment_uint64(&log_sys->tracked_lsn, 0);
+#else
+ return log_sys->tracked_lsn;
+#endif
+}
+
+/****************************************************************//**
+Safely reads the log_sys->tracked_lsn value. Uses atomic operations
+if available, otherwise this field is protected with the log system
+mutex. The writer counterpart function is log_set_tracked_lsn() in
+log0online.c.
+@return log_sys->tracked_lsn value. */
+UNIV_INLINE
+lsn_t
+log_get_tracked_lsn(void)
+/*=====================*/
+{
+#ifdef HAVE_ATOMIC_BUILTINS_64
+ return os_atomic_increment_uint64(&log_sys->tracked_lsn, 0);
+#else
+ ut_ad(mutex_own(&(log_sys->mutex)));
+ return log_sys->tracked_lsn;
+#endif
+}
diff --git a/storage/xtradb/include/os0file.h b/storage/xtradb/include/os0file.h
index 76e77799b43..ba362a0e458 100644
--- a/storage/xtradb/include/os0file.h
+++ b/storage/xtradb/include/os0file.h
@@ -132,7 +132,7 @@ enum os_file_create_t {
#define OS_FILE_READ_ONLY 333
#define OS_FILE_READ_WRITE 444
-#define OS_FILE_READ_ALLOW_DELETE 555 /* for ibbackup */
+#define OS_FILE_READ_ALLOW_DELETE 555 /* for mysqlbackup */
/* Options for file_create */
#define OS_FILE_AIO 61
@@ -168,8 +168,8 @@ enum os_file_create_t {
#define OS_FILE_LOG 256 /* This can be ORed to type */
/* @} */
-#define OS_AIO_N_PENDING_IOS_PER_THREAD 256 /*!< Windows might be able to handle
-more */
+#define OS_AIO_N_PENDING_IOS_PER_THREAD 32 /*!< Win NT does not allow more
+ than 64 */
/** Modes for aio operations @{ */
#define OS_AIO_NORMAL 21 /*!< Normal asynchronous i/o not for ibuf
diff --git a/storage/xtradb/include/os0once.h b/storage/xtradb/include/os0once.h
new file mode 100644
index 00000000000..a8bbaf1d2d4
--- /dev/null
+++ b/storage/xtradb/include/os0once.h
@@ -0,0 +1,125 @@
+/*****************************************************************************
+
+Copyright (c) 2014, Oracle and/or its affiliates. All Rights Reserved.
+
+This program is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free Software
+Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License along with
+this program; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
+
+*****************************************************************************/
+
+/**************************************************//**
+@file include/os0once.h
+A class that aids executing a given function exactly once in a multi-threaded
+environment.
+
+Created Feb 20, 2014 Vasil Dimov
+*******************************************************/
+
+#ifndef os0once_h
+#define os0once_h
+
+#include "univ.i"
+
+#include "os0sync.h"
+#include "ut0ut.h"
+
+/** Execute a given function exactly once in a multi-threaded environment
+or wait for the function to be executed by another thread.
+
+Example usage:
+First the user must create a control variable of type os_once::state_t and
+assign it os_once::NEVER_DONE.
+Then the user must pass this variable, together with a function to be
+executed to os_once::do_or_wait_for_done().
+
+Multiple threads can call os_once::do_or_wait_for_done() simultaneously with
+the same (os_once::state_t) control variable. The provided function will be
+called exactly once and when os_once::do_or_wait_for_done() returns then this
+function has completed execution, by this or another thread. In other words
+os_once::do_or_wait_for_done() will either execute the provided function or
+will wait for its execution to complete if it is already called by another
+thread or will do nothing if the function has already completed its execution
+earlier.
+
+This mimics pthread_once(3), but unfortunatelly pthread_once(3) does not
+support passing arguments to the init_routine() function. We should use
+std::call_once() when we start compiling with C++11 enabled. */
+class os_once {
+public:
+ /** Control variables' state type */
+ typedef ib_uint32_t state_t;
+
+ /** Not yet executed. */
+ static const state_t NEVER_DONE = 0;
+
+ /** Currently being executed by this or another thread. */
+ static const state_t IN_PROGRESS = 1;
+
+ /** Finished execution. */
+ static const state_t DONE = 2;
+
+#ifdef HAVE_ATOMIC_BUILTINS
+ /** Call a given function or wait its execution to complete if it is
+ already called by another thread.
+ @param[in,out] state control variable
+ @param[in] do_func function to call
+ @param[in,out] do_func_arg an argument to pass to do_func(). */
+ static
+ void
+ do_or_wait_for_done(
+ volatile state_t* state,
+ void (*do_func)(void*),
+ void* do_func_arg)
+ {
+ /* Avoid calling os_compare_and_swap_uint32() in the most
+ common case. */
+ if (*state == DONE) {
+ return;
+ }
+
+ if (os_compare_and_swap_uint32(state,
+ NEVER_DONE, IN_PROGRESS)) {
+ /* We are the first. Call the function. */
+
+ do_func(do_func_arg);
+
+ const bool swapped = os_compare_and_swap_uint32(
+ state, IN_PROGRESS, DONE);
+
+ ut_a(swapped);
+ } else {
+ /* The state is not NEVER_DONE, so either it is
+ IN_PROGRESS (somebody is calling the function right
+ now or DONE (it has already been called and completed).
+ Wait for it to become DONE. */
+ for (;;) {
+ const state_t s = *state;
+
+ switch (s) {
+ case DONE:
+ return;
+ case IN_PROGRESS:
+ break;
+ case NEVER_DONE:
+ /* fall through */
+ default:
+ ut_error;
+ }
+
+ UT_RELAX_CPU();
+ }
+ }
+ }
+#endif /* HAVE_ATOMIC_BUILTINS */
+};
+
+#endif /* os0once_h */
diff --git a/storage/xtradb/include/os0sync.h b/storage/xtradb/include/os0sync.h
index ea5d09ec535..066fd34d668 100644
--- a/storage/xtradb/include/os0sync.h
+++ b/storage/xtradb/include/os0sync.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1995, 2014, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2008, Google Inc.
Portions of this file contain modifications contributed and copyrighted by
@@ -357,6 +357,10 @@ Atomic compare-and-swap and increment for InnoDB. */
# define HAVE_ATOMIC_BUILTINS
+# ifdef HAVE_IB_GCC_ATOMIC_BUILTINS_BYTE
+# define HAVE_ATOMIC_BUILTINS_BYTE
+# endif
+
# ifdef HAVE_IB_GCC_ATOMIC_BUILTINS_64
# define HAVE_ATOMIC_BUILTINS_64
# endif
@@ -434,9 +438,13 @@ Returns the old value of *ptr, atomically sets *ptr to new_val */
# define os_atomic_test_and_set_ulint(ptr, new_val) \
__sync_lock_test_and_set(ptr, new_val)
+# define os_atomic_lock_release_byte(ptr) \
+ __sync_lock_release(ptr)
+
#elif defined(HAVE_IB_SOLARIS_ATOMICS)
# define HAVE_ATOMIC_BUILTINS
+# define HAVE_ATOMIC_BUILTINS_BYTE
# define HAVE_ATOMIC_BUILTINS_64
/* If not compiling with GCC or GCC doesn't support the atomic
@@ -515,9 +523,13 @@ Returns the old value of *ptr, atomically sets *ptr to new_val */
# define os_atomic_test_and_set_ulint(ptr, new_val) \
atomic_swap_ulong(ptr, new_val)
+# define os_atomic_lock_release_byte(ptr) \
+ (void) atomic_swap_uchar(ptr, 0)
+
#elif defined(HAVE_WINDOWS_ATOMICS)
# define HAVE_ATOMIC_BUILTINS
+# define HAVE_ATOMIC_BUILTINS_BYTE
# ifndef _WIN32
# define HAVE_ATOMIC_BUILTINS_64
@@ -574,7 +586,8 @@ Returns true if swapped, ptr is pointer to target, old_val is value to
compare to, new_val is the value to swap in. */
# define os_compare_and_swap_uint32(ptr, old_val, new_val) \
- (win_cmp_and_xchg_dword(ptr, new_val, old_val) == old_val)
+ (InterlockedCompareExchange(reinterpret_cast<volatile long*>(ptr), \
+ new_val, old_val) == old_val)
# define os_compare_and_swap_ulint(ptr, old_val, new_val) \
(win_cmp_and_xchg_ulint(ptr, new_val, old_val) == old_val)
@@ -637,6 +650,9 @@ clobbered */
# define os_atomic_test_and_set_ulong(ptr, new_val) \
InterlockedExchange(ptr, new_val)
+# define os_atomic_lock_release_byte(ptr) \
+ (void) InterlockedExchange(ptr, 0)
+
#else
# define IB_ATOMICS_STARTUP_MSG \
"Mutexes and rw_locks use InnoDB's own implementation"
@@ -684,6 +700,65 @@ for synchronization */
os_decrement_counter_by_amount(mutex, counter, 1);\
} while (0);
+/** barrier definitions for memory ordering */
+#if defined __i386__ || defined __x86_64__ || defined _M_IX86 || defined _M_X64 || defined __WIN__
+/* Performance regression was observed at some conditions for Intel
+architecture. Disable memory barrier for Intel architecture for now. */
+# define os_rmb do { } while(0)
+# define os_wmb do { } while(0)
+# define os_isync do { } while(0)
+# define IB_MEMORY_BARRIER_STARTUP_MSG \
+ "Memory barrier is not used"
+#elif defined(HAVE_IB_GCC_ATOMIC_THREAD_FENCE)
+# define HAVE_MEMORY_BARRIER
+# define os_rmb __atomic_thread_fence(__ATOMIC_ACQUIRE)
+# define os_wmb __atomic_thread_fence(__ATOMIC_RELEASE)
+#ifdef __powerpc__
+# define os_isync __asm __volatile ("isync":::"memory")
+#else
+#define os_isync do { } while(0)
+#endif
+
+# define IB_MEMORY_BARRIER_STARTUP_MSG \
+ "GCC builtin __atomic_thread_fence() is used for memory barrier"
+
+#elif defined(HAVE_IB_GCC_SYNC_SYNCHRONISE)
+# define HAVE_MEMORY_BARRIER
+# define os_rmb __sync_synchronize()
+# define os_wmb __sync_synchronize()
+# define os_isync __sync_synchronize()
+# define IB_MEMORY_BARRIER_STARTUP_MSG \
+ "GCC builtin __sync_synchronize() is used for memory barrier"
+
+#elif defined(HAVE_IB_MACHINE_BARRIER_SOLARIS)
+# define HAVE_MEMORY_BARRIER
+# include <mbarrier.h>
+# define os_rmb __machine_r_barrier()
+# define os_wmb __machine_w_barrier()
+# define os_isync os_rmb; os_wmb
+# define IB_MEMORY_BARRIER_STARTUP_MSG \
+ "Solaris memory ordering functions are used for memory barrier"
+
+#elif defined(HAVE_WINDOWS_MM_FENCE)
+# define HAVE_MEMORY_BARRIER
+# include <intrin.h>
+# define os_rmb _mm_lfence()
+# define os_wmb _mm_sfence()
+# define os_isync os_rmb; os_wmb
+# define IB_MEMORY_BARRIER_STARTUP_MSG \
+ "_mm_lfence() and _mm_sfence() are used for memory barrier"
+
+# define os_atomic_lock_release_byte(ptr) \
+ (void) InterlockedExchange(ptr, 0)
+
+#else
+# define os_rmb do { } while(0)
+# define os_wmb do { } while(0)
+# define os_isync do { } while(0)
+# define IB_MEMORY_BARRIER_STARTUP_MSG \
+ "Memory barrier is not used"
+#endif
+
#ifndef UNIV_NONINL
#include "os0sync.ic"
#endif
diff --git a/storage/xtradb/include/read0read.h b/storage/xtradb/include/read0read.h
index e17d49b1321..0352f129c30 100644
--- a/storage/xtradb/include/read0read.h
+++ b/storage/xtradb/include/read0read.h
@@ -50,6 +50,27 @@ read_view_open_now(
NULL if a new one needs to be created */
/*********************************************************************//**
+Clones a read view object. This function will allocate space for two read
+views contiguously, one identical in size and content as @param view (starting
+at returned pointer) and another view immediately following the trx_ids array.
+The second view will have space for an extra trx_id_t element.
+@return read view struct */
+UNIV_INTERN
+read_view_t*
+read_view_clone(
+/*============*/
+ const read_view_t* view, /*!< in: view to clone */
+ read_view_t*& prebuilt_clone);/*!< in,out: prebuilt view or
+ NULL */
+/*********************************************************************//**
+Insert the view in the proper order into the trx_sys->view_list. The
+read view list is ordered by read_view_t::low_limit_no in descending order. */
+UNIV_INTERN
+void
+read_view_add(
+/*==========*/
+ read_view_t* view); /*!< in: view to add to */
+/*********************************************************************//**
Makes a copy of the oldest existing read view, or opens a new. The view
must be closed with ..._close.
@return own: read view struct */
diff --git a/storage/xtradb/include/srv0srv.h b/storage/xtradb/include/srv0srv.h
index aef04d003d5..3ed3ba71698 100644
--- a/storage/xtradb/include/srv0srv.h
+++ b/storage/xtradb/include/srv0srv.h
@@ -1,9 +1,9 @@
/*****************************************************************************
-Copyright (c) 1995, 2013, Oracle and/or its affiliates. All rights reserved.
+Copyright (c) 1995, 2013, Oracle and/or its affiliates.
Copyright (c) 2008, 2009, Google Inc.
Copyright (c) 2009, Percona Inc.
-Copyright (c) 2013, SkySQL Ab. All Rights Reserved.
+Copyright (c) 2013, 2014, SkySQL Ab.
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
@@ -183,9 +183,12 @@ extern char srv_disable_sort_file_cache;
thread */
extern os_event_t srv_checkpoint_completed_event;
-/* This event is set on the online redo log following thread exit to signal
-that the (slow) shutdown may proceed */
-extern os_event_t srv_redo_log_thread_finished_event;
+/* This event is set on the online redo log following thread after a successful
+log tracking iteration */
+extern os_event_t srv_redo_log_tracked_event;
+
+/** srv_redo_log_follow_thread spawn flag */
+extern bool srv_redo_log_thread_started;
/* If the last data file is auto-extended, we add this many pages to it
at a time */
@@ -642,6 +645,8 @@ extern srv_stats_t srv_stats;
When FALSE, row locks are not taken at all. */
extern my_bool srv_fake_changes_locks;
+/** Simulate compression failures. */
+extern uint srv_simulate_comp_failures;
# ifdef UNIV_PFS_THREAD
/* Keys to register InnoDB threads with performance schema */
diff --git a/storage/xtradb/include/sync0rw.h b/storage/xtradb/include/sync0rw.h
index 95bb7e16b26..0ac6b0f3f69 100644
--- a/storage/xtradb/include/sync0rw.h
+++ b/storage/xtradb/include/sync0rw.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1995, 2014, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2008, Google Inc.
Portions of this file contain modifications contributed and copyrighted by
@@ -109,14 +109,8 @@ extern ib_mutex_t rw_lock_list_mutex;
#ifdef UNIV_SYNC_DEBUG
/* The global mutex which protects debug info lists of all rw-locks.
To modify the debug info list of an rw-lock, this mutex has to be
-
acquired in addition to the mutex protecting the lock. */
-extern ib_mutex_t rw_lock_debug_mutex;
-extern os_event_t rw_lock_debug_event; /*!< If deadlock detection does
- not get immediately the mutex it
- may wait for this event */
-extern ibool rw_lock_debug_waiters; /*!< This is set to TRUE, if
- there may be waiters for the event */
+extern os_fast_mutex_t rw_lock_debug_mutex;
#endif /* UNIV_SYNC_DEBUG */
/** Counters for RW locks. */
@@ -142,7 +136,7 @@ extern mysql_pfs_key_t trx_i_s_cache_lock_key;
extern mysql_pfs_key_t trx_purge_latch_key;
extern mysql_pfs_key_t index_tree_rw_lock_key;
extern mysql_pfs_key_t index_online_log_key;
-extern mysql_pfs_key_t dict_table_stats_latch_key;
+extern mysql_pfs_key_t dict_table_stats_key;
extern mysql_pfs_key_t trx_sys_rw_lock_key;
extern mysql_pfs_key_t hash_table_rw_lock_key;
#endif /* UNIV_PFS_RWLOCK */
diff --git a/storage/xtradb/include/sync0rw.ic b/storage/xtradb/include/sync0rw.ic
index 3511987dbb0..8aadc406132 100644
--- a/storage/xtradb/include/sync0rw.ic
+++ b/storage/xtradb/include/sync0rw.ic
@@ -112,6 +112,7 @@ rw_lock_set_waiter_flag(
(void) os_compare_and_swap_ulint(&lock->waiters, 0, 1);
#else /* INNODB_RW_LOCKS_USE_ATOMICS */
lock->waiters = 1;
+ os_wmb;
#endif /* INNODB_RW_LOCKS_USE_ATOMICS */
}
@@ -129,6 +130,7 @@ rw_lock_reset_waiter_flag(
(void) os_compare_and_swap_ulint(&lock->waiters, 1, 0);
#else /* INNODB_RW_LOCKS_USE_ATOMICS */
lock->waiters = 0;
+ os_wmb;
#endif /* INNODB_RW_LOCKS_USE_ATOMICS */
}
@@ -256,7 +258,10 @@ rw_lock_lock_word_decr(
ulint amount) /*!< in: amount to decrement */
{
#ifdef INNODB_RW_LOCKS_USE_ATOMICS
- lint local_lock_word = lock->lock_word;
+ lint local_lock_word;
+
+ os_rmb;
+ local_lock_word = lock->lock_word;
while (local_lock_word > 0) {
if (os_compare_and_swap_lint(&lock->lock_word,
local_lock_word,
@@ -620,10 +625,6 @@ rw_lock_s_unlock_func(
/* A waiting next-writer exists, either high priority or
regular, sharing the same wait event. */
- if (lock->high_priority_wait_ex_waiter) {
-
- lock->high_priority_wait_ex_waiter = 0;
- }
os_event_set(lock->base_lock.wait_ex_event);
sync_array_object_signalled();
@@ -916,8 +917,9 @@ pfs_rw_lock_x_lock_func(
rw_lock_x_lock_func(lock, pass, file_name, line);
- if (locker != NULL)
+ if (locker != NULL) {
PSI_RWLOCK_CALL(end_rwlock_wrwait)(locker, 0);
+ }
}
else
{
@@ -1072,8 +1074,9 @@ pfs_rw_lock_s_lock_func(
rw_lock_s_lock_func(lock, pass, file_name, line);
- if (locker != NULL)
+ if (locker != NULL) {
PSI_RWLOCK_CALL(end_rwlock_rdwait)(locker, 0);
+ }
}
else
{
diff --git a/storage/xtradb/include/sync0sync.h b/storage/xtradb/include/sync0sync.h
index 72cfbf61dd8..5ba385ce75f 100644
--- a/storage/xtradb/include/sync0sync.h
+++ b/storage/xtradb/include/sync0sync.h
@@ -50,6 +50,8 @@ extern "C" my_bool timed_mutexes;
#ifdef _WIN32
typedef LONG lock_word_t; /*!< On Windows, InterlockedExchange operates
on LONG variable */
+#elif defined(HAVE_ATOMIC_BUILTINS) && !defined(HAVE_ATOMIC_BUILTINS_BYTE)
+typedef ulint lock_word_t;
#else
typedef byte lock_word_t;
#endif
diff --git a/storage/xtradb/include/sync0sync.ic b/storage/xtradb/include/sync0sync.ic
index a302e1473a5..0c4a8ace887 100644
--- a/storage/xtradb/include/sync0sync.ic
+++ b/storage/xtradb/include/sync0sync.ic
@@ -83,7 +83,11 @@ ib_mutex_test_and_set(
ib_mutex_t* mutex) /*!< in: mutex */
{
#if defined(HAVE_ATOMIC_BUILTINS)
+# if defined(HAVE_ATOMIC_BUILTINS_BYTE)
return(os_atomic_test_and_set_byte(&mutex->lock_word, 1));
+# else
+ return(os_atomic_test_and_set_ulint(&mutex->lock_word, 1));
+# endif
#else
ibool ret;
@@ -95,6 +99,7 @@ ib_mutex_test_and_set(
ut_a(mutex->lock_word == 0);
mutex->lock_word = 1;
+ os_wmb;
}
return((byte) ret);
@@ -111,10 +116,7 @@ mutex_reset_lock_word(
ib_mutex_t* mutex) /*!< in: mutex */
{
#if defined(HAVE_ATOMIC_BUILTINS)
- /* In theory __sync_lock_release should be used to release the lock.
- Unfortunately, it does not work properly alone. The workaround is
- that more conservative __sync_lock_test_and_set is used instead. */
- os_atomic_test_and_set_byte(&mutex->lock_word, 0);
+ os_atomic_lock_release_byte(&mutex->lock_word);
#else
mutex->lock_word = 0;
@@ -150,6 +152,7 @@ mutex_get_waiters(
ptr = &(mutex->waiters);
+ os_rmb;
return(*ptr); /* Here we assume that the read of a single
word from memory is atomic */
}
@@ -184,6 +187,7 @@ mutex_exit_func(
to wake up possible hanging threads if
they are missed in mutex_signal_object. */
+ os_isync;
if (mutex_get_waiters(mutex) != 0) {
mutex_signal_object(mutex);
diff --git a/storage/xtradb/include/trx0trx.h b/storage/xtradb/include/trx0trx.h
index be13c48fdfc..75325d73f4d 100644
--- a/storage/xtradb/include/trx0trx.h
+++ b/storage/xtradb/include/trx0trx.h
@@ -275,6 +275,17 @@ read_view_t*
trx_assign_read_view(
/*=================*/
trx_t* trx); /*!< in: active transaction */
+/********************************************************************//**
+Clones the read view from another transaction. All the consistent reads within
+the receiver transaction will get the same read view as the donor transaction
+@return read view clone */
+UNIV_INTERN
+read_view_t*
+trx_clone_read_view(
+/*================*/
+ trx_t* trx, /*!< in: receiver transaction */
+ trx_t* from_trx) /*!< in: donor transaction */
+ __attribute__((nonnull, warn_unused_result));
/****************************************************************//**
Prepares a transaction for commit/rollback. */
UNIV_INTERN
@@ -1019,6 +1030,11 @@ struct trx_t{
count of tables being flushed. */
/*------------------------------*/
+ THD* current_lock_mutex_owner;
+ /*!< If this is equal to current_thd,
+ then in innobase_kill_query() we know we
+ already hold the lock_sys->mutex. */
+ /*------------------------------*/
#ifdef UNIV_DEBUG
ulint start_line; /*!< Track where it was started from */
const char* start_file; /*!< Filename where it was started */
diff --git a/storage/xtradb/include/univ.i b/storage/xtradb/include/univ.i
index 2a3a85c219f..8353b1dcf8a 100644
--- a/storage/xtradb/include/univ.i
+++ b/storage/xtradb/include/univ.i
@@ -45,10 +45,10 @@ Created 1/20/1994 Heikki Tuuri
#define INNODB_VERSION_MAJOR 5
#define INNODB_VERSION_MINOR 6
-#define INNODB_VERSION_BUGFIX 17
+#define INNODB_VERSION_BUGFIX 20
#ifndef PERCONA_INNODB_VERSION
-#define PERCONA_INNODB_VERSION 65.0
+#define PERCONA_INNODB_VERSION 68.0
#endif
/* Enable UNIV_LOG_ARCHIVE in XtraDB */
@@ -481,10 +481,10 @@ typedef unsigned __int64 ib_uint64_t;
typedef unsigned __int32 ib_uint32_t;
#else
/* Use the integer types and formatting strings defined in the C99 standard. */
-# define UINT32PF "%"PRIu32
-# define INT64PF "%"PRId64
-# define UINT64PF "%"PRIu64
-# define UINT64PFx "%016"PRIx64
+# define UINT32PF "%" PRIu32
+# define INT64PF "%" PRId64
+# define UINT64PF "%" PRIu64
+# define UINT64PFx "%016" PRIx64
# define DBUG_LSN_PF UINT64PF
typedef int64_t ib_int64_t;
typedef uint64_t ib_uint64_t;
diff --git a/storage/xtradb/lock/lock0lock.cc b/storage/xtradb/lock/lock0lock.cc
index 018f6f9a69a..30ad3ee7922 100644
--- a/storage/xtradb/lock/lock0lock.cc
+++ b/storage/xtradb/lock/lock0lock.cc
@@ -49,6 +49,7 @@ Created 5/7/1996 Heikki Tuuri
#include "btr0btr.h"
#include "dict0boot.h"
#include <set>
+#include "mysql/plugin.h"
#include <mysql/service_wsrep.h>
@@ -375,6 +376,11 @@ struct lock_stack_t {
ulint heap_no; /*!< heap number if rec lock */
};
+extern "C" void thd_report_wait_for(const MYSQL_THD thd, MYSQL_THD other_thd);
+extern "C" int thd_need_wait_for(const MYSQL_THD thd);
+extern "C"
+int thd_need_ordering_with(const MYSQL_THD thd, const MYSQL_THD other_thd);
+
/** Stack to use during DFS search. Currently only a single stack is required
because there is no parallel deadlock check. This stack is protected by
the lock_sys_t::mutex. */
@@ -390,6 +396,14 @@ UNIV_INTERN mysql_pfs_key_t lock_sys_mutex_key;
UNIV_INTERN mysql_pfs_key_t lock_sys_wait_mutex_key;
#endif /* UNIV_PFS_MUTEX */
+/* Buffer to collect THDs to report waits for. */
+struct thd_wait_reports {
+ struct thd_wait_reports *next; /*!< List link */
+ ulint used; /*!< How many elements in waitees[] */
+ trx_t *waitees[64]; /*!< Trxs for thd_report_wait_for() */
+};
+
+
#ifdef UNIV_DEBUG
UNIV_INTERN ibool lock_print_waits = FALSE;
@@ -1021,6 +1035,32 @@ lock_rec_has_to_wait(
return(FALSE);
}
+ if ((type_mode & LOCK_GAP || lock_rec_get_gap(lock2)) &&
+ !thd_need_ordering_with(trx->mysql_thd,
+ lock2->trx->mysql_thd)) {
+ /* If the upper server layer has already decided on the
+ commit order between the transaction requesting the
+ lock and the transaction owning the lock, we do not
+ need to wait for gap locks. Such ordeering by the upper
+ server layer happens in parallel replication, where the
+ commit order is fixed to match the original order on the
+ master.
+
+ Such gap locks are mainly needed to get serialisability
+ between transactions so that they will be binlogged in
+ the correct order so that statement-based replication
+ will give the correct results. Since the right order
+ was already determined on the master, we do not need
+ to enforce it again here.
+
+ Skipping the locks is not essential for correctness,
+ since in case of deadlock we will just kill the later
+ transaction and retry it. But it can save some
+ unnecessary rollbacks and retries. */
+
+ return (FALSE);
+ }
+
#ifdef WITH_WSREP
/* if BF thread is locking and has conflict with another BF
thread, we need to look at trx ordering and lock types */
@@ -1065,6 +1105,7 @@ lock_rec_has_to_wait(
}
}
#endif /* WITH_WSREP */
+
return(TRUE);
}
@@ -4169,7 +4210,8 @@ static
trx_id_t
lock_deadlock_search(
/*=================*/
- lock_deadlock_ctx_t* ctx) /*!< in/out: deadlock context */
+ lock_deadlock_ctx_t* ctx, /*!< in/out: deadlock context */
+ struct thd_wait_reports*waitee_ptr) /*!< in/out: list of waitees */
{
const lock_t* lock;
ulint heap_no;
@@ -4250,38 +4292,65 @@ lock_deadlock_search(
/* Select the joining transaction as the victim. */
return(ctx->start->id);
- } else if (lock->trx->lock.que_state == TRX_QUE_LOCK_WAIT) {
+ } else {
+ /* We do not need to report autoinc locks to the upper
+ layer. These locks are released before commit, so they
+ can not cause deadlocks with binlog-fixed commit
+ order. */
+ if (waitee_ptr &&
+ (lock_get_type_low(lock) != LOCK_TABLE ||
+ lock_get_mode(lock) != LOCK_AUTO_INC)) {
+ if (waitee_ptr->used ==
+ sizeof(waitee_ptr->waitees) /
+ sizeof(waitee_ptr->waitees[0])) {
+ waitee_ptr->next =
+ (struct thd_wait_reports *)
+ mem_alloc(sizeof(*waitee_ptr));
+ waitee_ptr = waitee_ptr->next;
+ if (!waitee_ptr) {
+ ctx->too_deep = TRUE;
+ return(ctx->start->id);
+ }
+ waitee_ptr->next = NULL;
+ waitee_ptr->used = 0;
+ }
+ waitee_ptr->waitees[waitee_ptr->used++] = lock->trx;
+ }
+
+ if (lock->trx->lock.que_state == TRX_QUE_LOCK_WAIT) {
- /* Another trx ahead has requested a lock in an
- incompatible mode, and is itself waiting for a lock. */
+ /* Another trx ahead has requested a lock in an
+ incompatible mode, and is itself waiting for a lock. */
- ++ctx->cost;
+ ++ctx->cost;
- /* Save current search state. */
- if (!lock_deadlock_push(ctx, lock, heap_no)) {
+ /* Save current search state. */
+ if (!lock_deadlock_push(ctx, lock, heap_no)) {
- /* Unable to save current search state, stack
- size not big enough. */
+ /* Unable to save current search state, stack
+ size not big enough. */
- ctx->too_deep = TRUE;
+ ctx->too_deep = TRUE;
#ifdef WITH_WSREP
if (wsrep_thd_is_BF(ctx->start->mysql_thd, TRUE))
return(lock->trx->id);
else
#endif /* WITH_WSREP */
+
return(ctx->start->id);
- }
+ }
+
+ ctx->wait_lock = lock->trx->lock.wait_lock;
+ lock = lock_get_first_lock(ctx, &heap_no);
- ctx->wait_lock = lock->trx->lock.wait_lock;
- lock = lock_get_first_lock(ctx, &heap_no);
+ if (lock->trx->lock.deadlock_mark > ctx->mark_start) {
+ lock = lock_get_next_lock(ctx, lock, heap_no);
+ }
- if (lock->trx->lock.deadlock_mark > ctx->mark_start) {
+ } else {
lock = lock_get_next_lock(ctx, lock, heap_no);
}
-
- } else {
- lock = lock_get_next_lock(ctx, lock, heap_no);
}
}
@@ -4346,6 +4415,48 @@ lock_deadlock_trx_rollback(
trx_mutex_exit(trx);
}
+static
+void
+lock_report_waiters_to_mysql(
+/*=======================*/
+ struct thd_wait_reports* waitee_buf_ptr, /*!< in: set of trxs */
+ THD* mysql_thd, /*!< in: THD */
+ trx_id_t victim_trx_id) /*!< in: Trx selected
+ as deadlock victim, if
+ any */
+{
+ struct thd_wait_reports* p;
+ struct thd_wait_reports* q;
+ ulint i;
+
+ p = waitee_buf_ptr;
+ while (p) {
+ i = 0;
+ while (i < p->used) {
+ trx_t *w_trx = p->waitees[i];
+ /* There is no need to report waits to a trx already
+ selected as a victim. */
+ if (w_trx->id != victim_trx_id) {
+ /* If thd_report_wait_for() decides to kill the
+ transaction, then we will get a call back into
+ innobase_kill_query. We mark this by setting
+ current_lock_mutex_owner, so we can avoid trying
+ to recursively take lock_sys->mutex. */
+ w_trx->current_lock_mutex_owner = mysql_thd;
+ thd_report_wait_for(mysql_thd, w_trx->mysql_thd);
+ w_trx->current_lock_mutex_owner = NULL;
+ }
+ ++i;
+ }
+ q = p->next;
+ if (p != waitee_buf_ptr) {
+ mem_free(p);
+ }
+ p = q;
+ }
+}
+
+
/********************************************************************//**
Checks if a joining lock request results in a deadlock. If a deadlock is
found this function will resolve the dadlock by choosing a victim transaction
@@ -4361,13 +4472,23 @@ lock_deadlock_check_and_resolve(
const lock_t* lock, /*!< in: lock the transaction is requesting */
const trx_t* trx) /*!< in: transaction */
{
- trx_id_t victim_trx_id;
+ trx_id_t victim_trx_id;
+ struct thd_wait_reports waitee_buf;
+ struct thd_wait_reports*waitee_buf_ptr;
+ THD* start_mysql_thd;
ut_ad(trx != NULL);
ut_ad(lock != NULL);
ut_ad(lock_mutex_own());
assert_trx_in_list(trx);
+ start_mysql_thd = trx->mysql_thd;
+ if (start_mysql_thd && thd_need_wait_for(start_mysql_thd)) {
+ waitee_buf_ptr = &waitee_buf;
+ } else {
+ waitee_buf_ptr = NULL;
+ }
+
/* Try and resolve as many deadlocks as possible. */
do {
lock_deadlock_ctx_t ctx;
@@ -4380,7 +4501,19 @@ lock_deadlock_check_and_resolve(
ctx.wait_lock = lock;
ctx.mark_start = lock_mark_counter;
- victim_trx_id = lock_deadlock_search(&ctx);
+ if (waitee_buf_ptr) {
+ waitee_buf_ptr->next = NULL;
+ waitee_buf_ptr->used = 0;
+ }
+
+ victim_trx_id = lock_deadlock_search(&ctx, waitee_buf_ptr);
+
+ /* Report waits to upper layer, as needed. */
+ if (waitee_buf_ptr) {
+ lock_report_waiters_to_mysql(waitee_buf_ptr,
+ start_mysql_thd,
+ victim_trx_id);
+ }
/* Search too deep, we rollback the joining transaction. */
if (ctx.too_deep) {
diff --git a/storage/xtradb/log/log0log.cc b/storage/xtradb/log/log0log.cc
index ee39f5846ca..8c7f2b319c0 100644
--- a/storage/xtradb/log/log0log.cc
+++ b/storage/xtradb/log/log0log.cc
@@ -197,22 +197,24 @@ log_buf_pool_get_oldest_modification(void)
}
/****************************************************************//**
-Safely reads the log_sys->tracked_lsn value. Uses atomic operations
-if available, otherwise this field is protected with the log system
-mutex. The writer counterpart function is log_set_tracked_lsn() in
-log0online.c.
-
-@return log_sys->tracked_lsn value. */
-UNIV_INLINE
+Returns the oldest modified block lsn in the pool, or log_sys->lsn if none
+exists.
+@return LSN of oldest modification */
+static
lsn_t
-log_get_tracked_lsn()
+log_buf_pool_get_oldest_modification_peek(void)
+/*===========================================*/
{
-#ifdef HAVE_ATOMIC_BUILTINS_64
- return os_atomic_increment_uint64(&log_sys->tracked_lsn, 0);
-#else
- ut_ad(mutex_own(&(log_sys->mutex)));
- return log_sys->tracked_lsn;
-#endif
+ lsn_t lsn;
+
+ lsn = buf_pool_get_oldest_modification_peek();
+
+ if (!lsn) {
+
+ lsn = log_sys->lsn;
+ }
+
+ return(lsn);
}
/****************************************************************//**
@@ -639,7 +641,7 @@ log_pad_current_log_block(void)
byte b = MLOG_DUMMY_RECORD;
ulint pad_length;
ulint i;
- ib_uint64_t lsn;
+ lsn_t lsn;
/* We retrieve lsn only because otherwise gcc crashed on HP-UX */
lsn = log_reserve_and_open(OS_FILE_LOG_BLOCK_SIZE);
@@ -647,6 +649,12 @@ log_pad_current_log_block(void)
pad_length = OS_FILE_LOG_BLOCK_SIZE
- (log_sys->buf_free % OS_FILE_LOG_BLOCK_SIZE)
- LOG_BLOCK_TRL_SIZE;
+ if (pad_length
+ == (OS_FILE_LOG_BLOCK_SIZE - LOG_BLOCK_HDR_SIZE
+ - LOG_BLOCK_TRL_SIZE)) {
+
+ pad_length = 0;
+ }
for (i = 0; i < pad_length; i++) {
log_write_low(&b, 1);
@@ -1347,7 +1355,7 @@ log_group_file_header_flush(
mach_write_to_4(buf + LOG_GROUP_ID, group->id);
mach_write_to_8(buf + LOG_FILE_START_LSN, start_lsn);
- /* Wipe over possible label of ibbackup --restore */
+ /* Wipe over possible label of mysqlbackup --restore */
memcpy(buf + LOG_FILE_WAS_CREATED_BY_HOT_BACKUP, " ", 4);
mach_write_to_4(buf + LOG_FILE_OS_FILE_LOG_BLOCK_SIZE,
@@ -1950,6 +1958,7 @@ log_io_complete_checkpoint(void)
/* Wake the redo log watching thread to parse the log up to this
checkpoint. */
if (srv_track_changed_pages) {
+ os_event_reset(srv_redo_log_tracked_event);
os_event_set(srv_checkpoint_completed_event);
}
}
@@ -2121,7 +2130,7 @@ log_reset_first_header_and_checkpoint(
lsn = start + LOG_BLOCK_HDR_SIZE;
- /* Write the label of ibbackup --restore */
+ /* Write the label of mysqlbackup --restore */
strcpy((char*) hdr_buf + LOG_FILE_WAS_CREATED_BY_HOT_BACKUP,
"ibbackup ");
ut_sprintf_timestamp((char*) hdr_buf
@@ -3152,8 +3161,7 @@ void
log_archive_all(void)
/*=================*/
{
- ib_uint64_t present_lsn;
- ulint dummy;
+ lsn_t present_lsn;
mutex_enter(&(log_sys->mutex));
@@ -3170,6 +3178,9 @@ log_archive_all(void)
log_pad_current_log_block();
for (;;) {
+
+ ulint archived_bytes;
+
mutex_enter(&(log_sys->mutex));
if (present_lsn <= log_sys->archived_lsn) {
@@ -3181,7 +3192,10 @@ log_archive_all(void)
mutex_exit(&(log_sys->mutex));
- log_archive_do(TRUE, &dummy);
+ log_archive_do(TRUE, &archived_bytes);
+
+ if (archived_bytes == 0)
+ return;
}
}
@@ -3681,8 +3695,8 @@ loop:
/* Wake the log tracking thread which will then immediatelly
quit because of srv_shutdown_state value */
if (srv_track_changed_pages) {
+ os_event_reset(srv_redo_log_tracked_event);
os_event_set(srv_checkpoint_completed_event);
- os_event_wait(srv_redo_log_thread_finished_event);
}
fil_close_all_files();
@@ -3759,6 +3773,7 @@ loop:
/* Signal the log following thread to quit */
if (srv_track_changed_pages) {
+ os_event_reset(srv_redo_log_tracked_event);
os_event_set(srv_checkpoint_completed_event);
}
@@ -3786,10 +3801,6 @@ loop:
fil_flush_file_spaces(FIL_TABLESPACE);
}
- if (srv_track_changed_pages) {
- os_event_wait(srv_redo_log_thread_finished_event);
- }
-
fil_close_all_files();
/* Make some checks that the server really is quiet */
@@ -3885,7 +3896,7 @@ log_print(
double time_elapsed;
time_t current_time;
- mutex_enter(&(log_sys->mutex));
+ // mutex_enter(&(log_sys->mutex));
fprintf(file,
"Log sequence number " LSN_PF "\n"
@@ -3894,7 +3905,7 @@ log_print(
"Last checkpoint at " LSN_PF "\n",
log_sys->lsn,
log_sys->flushed_to_disk_lsn,
- log_buf_pool_get_oldest_modification(),
+ log_buf_pool_get_oldest_modification_peek(),
log_sys->last_checkpoint_lsn);
fprintf(file,
@@ -3904,7 +3915,7 @@ log_print(
"Checkpoint age " LSN_PF "\n",
log_sys->max_checkpoint_age,
log_sys->max_checkpoint_age_async,
- log_sys->lsn -log_buf_pool_get_oldest_modification(),
+ log_sys->lsn -log_buf_pool_get_oldest_modification_peek(),
log_sys->lsn - log_sys->last_checkpoint_lsn);
current_time = time(NULL);
@@ -3933,14 +3944,14 @@ log_print(
"Log tracking enabled\n"
"Log tracked up to " LSN_PF "\n"
"Max tracked LSN age " LSN_PF "\n",
- log_get_tracked_lsn(),
+ log_get_tracked_lsn_peek(),
log_sys->max_checkpoint_age);
}
log_sys->n_log_ios_old = log_sys->n_log_ios;
log_sys->last_printout_time = current_time;
- mutex_exit(&(log_sys->mutex));
+ //mutex_exit(&(log_sys->mutex));
}
/**********************************************************************//**
diff --git a/storage/xtradb/log/log0online.cc b/storage/xtradb/log/log0online.cc
index bc930572c09..dbd8f46caaa 100644
--- a/storage/xtradb/log/log0online.cc
+++ b/storage/xtradb/log/log0online.cc
@@ -1188,6 +1188,9 @@ log_online_write_bitmap(void)
bmp_tree_node = (ib_rbt_node_t*)
rbt_next(log_bmp_sys->modified_pages, bmp_tree_node);
+
+ DBUG_EXECUTE_IF("bitmap_page_2_write_error",
+ DBUG_SET("+d,bitmap_page_write_error"););
}
rbt_reset(log_bmp_sys->modified_pages);
@@ -1253,6 +1256,7 @@ log_online_follow_redo_log(void)
/*********************************************************************//**
Diagnose a bitmap file range setup failure and free the partially-initialized
bitmap file range. */
+UNIV_COLD
static
void
log_online_diagnose_inconsistent_dir(
@@ -1434,26 +1438,30 @@ log_online_setup_bitmap_file_range(
return FALSE;
}
-#ifdef UNIV_DEBUG
- if (!bitmap_files->files[0].seq_num) {
+ if (!bitmap_files->files[0].seq_num
+ || bitmap_files->files[0].seq_num != first_file_seq_num) {
log_online_diagnose_inconsistent_dir(bitmap_files);
return FALSE;
}
- ut_ad(bitmap_files->files[0].seq_num == first_file_seq_num);
+
{
size_t i;
for (i = 1; i < bitmap_files->count; i++) {
if (!bitmap_files->files[i].seq_num) {
break;
}
- ut_ad(bitmap_files->files[i].seq_num
- > bitmap_files->files[i - 1].seq_num);
- ut_ad(bitmap_files->files[i].start_lsn
- >= bitmap_files->files[i - 1].start_lsn);
+ if ((bitmap_files->files[i].seq_num
+ <= bitmap_files->files[i - 1].seq_num)
+ || (bitmap_files->files[i].start_lsn
+ < bitmap_files->files[i - 1].start_lsn)) {
+
+ log_online_diagnose_inconsistent_dir(
+ bitmap_files);
+ return FALSE;
+ }
}
}
-#endif
return TRUE;
}
@@ -1576,6 +1584,17 @@ log_online_bitmap_iterator_init(
{
ut_a(i);
+ if (UNIV_UNLIKELY(min_lsn > max_lsn)) {
+
+ /* Empty range */
+ i->in_files.count = 0;
+ i->in_files.files = NULL;
+ i->in.file = os_file_invalid;
+ i->page = NULL;
+ i->failed = FALSE;
+ return TRUE;
+ }
+
if (!log_online_setup_bitmap_file_range(&i->in_files, min_lsn,
max_lsn)) {
diff --git a/storage/xtradb/log/log0recv.cc b/storage/xtradb/log/log0recv.cc
index a7e3333c4fd..7e9a26ef962 100644
--- a/storage/xtradb/log/log0recv.cc
+++ b/storage/xtradb/log/log0recv.cc
@@ -67,7 +67,7 @@ Created 9/20/1997 Heikki Tuuri
/** This is set to FALSE if the backup was originally taken with the
-ibbackup --include regexp option: then we do not want to create tables in
+mysqlbackup --include regexp option: then we do not want to create tables in
directories which were not included */
UNIV_INTERN ibool recv_replay_file_ops = TRUE;
#endif /* !UNIV_HOTBACKUP */
@@ -2128,7 +2128,7 @@ recv_apply_log_recs_for_backup(void)
/* Extend the tablespace's last file if the page_no
does not fall inside its bounds; we assume the last
- file is auto-extending, and ibbackup copied the file
+ file is auto-extending, and mysqlbackup copied the file
when it still was smaller */
success = fil_extend_space_to_desired_size(
@@ -2499,10 +2499,10 @@ loop:
#ifdef UNIV_HOTBACKUP
if (recv_replay_file_ops) {
- /* In ibbackup --apply-log, replay an .ibd file
- operation, if possible; note that
- fil_path_to_mysql_datadir is set in ibbackup to
- point to the datadir we should use there */
+ /* In mysqlbackup --apply-log, replay an .ibd
+ file operation, if possible; note that
+ fil_path_to_mysql_datadir is set in mysqlbackup
+ to point to the datadir we should use there */
if (NULL == fil_op_log_parse_or_replay(
body, end_ptr, type,
@@ -3167,17 +3167,17 @@ recv_recovery_from_checkpoint_start_func(
if (srv_read_only_mode) {
ib_logf(IB_LOG_LEVEL_ERROR,
- "Cannot restore from ibbackup, InnoDB running "
- "in read-only mode!");
+ "Cannot restore from mysqlbackup, InnoDB "
+ "running in read-only mode!");
return(DB_ERROR);
}
- /* This log file was created by ibbackup --restore: print
+ /* This log file was created by mysqlbackup --restore: print
a note to the user about it */
ib_logf(IB_LOG_LEVEL_INFO,
- "The log file was created by ibbackup --apply-log "
+ "The log file was created by mysqlbackup --apply-log "
"at %s. The following crash recovery is part of a "
"normal restore.",
log_hdr_buf + LOG_FILE_WAS_CREATED_BY_HOT_BACKUP);
diff --git a/storage/xtradb/mysql-test/storage_engine/alter_tablespace.opt b/storage/xtradb/mysql-test/storage_engine/alter_tablespace.opt
new file mode 100644
index 00000000000..cf4b117e1b1
--- /dev/null
+++ b/storage/xtradb/mysql-test/storage_engine/alter_tablespace.opt
@@ -0,0 +1,2 @@
+--innodb-file-per-table=1
+
diff --git a/storage/xtradb/mysql-test/storage_engine/autoinc_secondary.rdiff b/storage/xtradb/mysql-test/storage_engine/autoinc_secondary.rdiff
new file mode 100644
index 00000000000..00cda7c4435
--- /dev/null
+++ b/storage/xtradb/mysql-test/storage_engine/autoinc_secondary.rdiff
@@ -0,0 +1,30 @@
+--- suite/storage_engine/autoinc_secondary.result 2012-07-12 04:34:18.153885986 +0400
++++ suite/storage_engine/autoinc_secondary.reject 2012-07-15 17:47:03.937703666 +0400
+@@ -13,18 +13,15 @@
+ 5 a
+ DROP TABLE t1;
+ CREATE TABLE t1 (a <CHAR_COLUMN>, b <INT_COLUMN> AUTO_INCREMENT, PRIMARY KEY (a,b)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-INSERT INTO t1 (a) VALUES ('a'),('b'),('b'),('c'),('a');
+-SELECT LAST_INSERT_ID();
+-LAST_INSERT_ID()
+-1
+-SELECT a,b FROM t1;
+-a b
+-a 1
+-a 2
+-b 1
+-b 2
+-c 1
+-DROP TABLE t1;
++ERROR 42000: Incorrect table definition; there can be only one auto column and it must be defined as a key
++# ERROR: Statement ended with errno 1075, errname ER_WRONG_AUTO_KEY (expected to succeed)
++# ------------ UNEXPECTED RESULT ------------
++# The statement|command finished with ER_WRONG_AUTO_KEY.
++# Multi-part keys or PK or AUTO_INCREMENT (on a secondary column) or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
++# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
++# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
++# Also, this problem may cause a chain effect (more errors of different kinds in the test).
++# -------------------------------------------
+ CREATE TABLE t1 (a <CHAR_COLUMN>, b <INT_COLUMN> AUTO_INCREMENT, PRIMARY KEY (a,b), <CUSTOM_INDEX>(b)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+ INSERT INTO t1 (a) VALUES ('a'),('b'),('b'),('c'),('a');
+ SELECT LAST_INSERT_ID();
diff --git a/storage/xtradb/mysql-test/storage_engine/cache_index.rdiff b/storage/xtradb/mysql-test/storage_engine/cache_index.rdiff
new file mode 100644
index 00000000000..e04df87aa34
--- /dev/null
+++ b/storage/xtradb/mysql-test/storage_engine/cache_index.rdiff
@@ -0,0 +1,71 @@
+--- suite/storage_engine/cache_index.result 2012-07-15 00:22:19.822493731 +0400
++++ suite/storage_engine/cache_index.reject 2012-07-15 17:47:18.321522834 +0400
+@@ -12,31 +12,31 @@
+ SET GLOBAL <CACHE_NAME>.key_buffer_size=128*1024;
+ CACHE INDEX t1 INDEX (a), t2 IN <CACHE_NAME>;
+ Table Op Msg_type Msg_text
+-test.t1 assign_to_keycache status OK
+-test.t2 assign_to_keycache status OK
++test.t1 assign_to_keycache note The storage engine for the table doesn't support assign_to_keycache
++test.t2 assign_to_keycache note The storage engine for the table doesn't support assign_to_keycache
+ LOAD INDEX INTO CACHE t1, t2;
+ Table Op Msg_type Msg_text
+-test.t1 preload_keys status OK
+-test.t2 preload_keys status OK
++test.t1 preload_keys note The storage engine for the table doesn't support preload_keys
++test.t2 preload_keys note The storage engine for the table doesn't support preload_keys
+ INSERT INTO t1 (a,b) VALUES (3,'c'),(4,'d');
+ SET GLOBAL <CACHE_NAME>.key_buffer_size=8*1024;
+ LOAD INDEX INTO CACHE t1, t2 IGNORE LEAVES;
+ Table Op Msg_type Msg_text
+-test.t1 preload_keys status OK
+-test.t2 preload_keys status OK
++test.t1 preload_keys note The storage engine for the table doesn't support preload_keys
++test.t2 preload_keys note The storage engine for the table doesn't support preload_keys
+ SET GLOBAL <CACHE_NAME>.key_cache_age_threshold = 100, <CACHE_NAME>.key_cache_block_size = 512, <CACHE_NAME>.key_cache_division_limit = 1, <CACHE_NAME>.key_cache_segments=2;
+ INSERT INTO t1 (a,b) VALUES (5,'e'),(6,'f');
+ LOAD INDEX INTO CACHE t1;
+ Table Op Msg_type Msg_text
+-test.t1 preload_keys status OK
++test.t1 preload_keys note The storage engine for the table doesn't support preload_keys
+ SET GLOBAL new_<CACHE_NAME>.key_buffer_size=128*1024;
+ CACHE INDEX t1 IN new_<CACHE_NAME>;
+ Table Op Msg_type Msg_text
+-test.t1 assign_to_keycache status OK
++test.t1 assign_to_keycache note The storage engine for the table doesn't support assign_to_keycache
+ INSERT INTO t1 (a,b) VALUES (7,'g'),(8,'h');
+ LOAD INDEX INTO CACHE t1 IGNORE LEAVES;
+ Table Op Msg_type Msg_text
+-test.t1 preload_keys status OK
++test.t1 preload_keys note The storage engine for the table doesn't support preload_keys
+ INSERT INTO t1 (a,b) VALUES (9,'i');
+ DROP TABLE t2;
+ DROP TABLE t1;
+@@ -47,11 +47,11 @@
+ ) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+ CACHE INDEX t1 IN <CACHE_NAME>;
+ Table Op Msg_type Msg_text
+-test.t1 assign_to_keycache status OK
++test.t1 assign_to_keycache note The storage engine for the table doesn't support assign_to_keycache
+ INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b');
+ LOAD INDEX INTO CACHE t1;
+ Table Op Msg_type Msg_text
+-test.t1 preload_keys status OK
++test.t1 preload_keys note The storage engine for the table doesn't support preload_keys
+ DROP TABLE t1;
+ CREATE TABLE t1 (a <INT_COLUMN>,
+ b <CHAR_COLUMN>,
+@@ -59,11 +59,11 @@
+ ) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+ CACHE INDEX t1 IN <CACHE_NAME>;
+ Table Op Msg_type Msg_text
+-test.t1 assign_to_keycache status OK
++test.t1 assign_to_keycache note The storage engine for the table doesn't support assign_to_keycache
+ INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b');
+ LOAD INDEX INTO CACHE t1;
+ Table Op Msg_type Msg_text
+-test.t1 preload_keys status OK
++test.t1 preload_keys note The storage engine for the table doesn't support preload_keys
+ DROP TABLE t1;
+ SET GLOBAL <CACHE_NAME>.key_buffer_size=0;
+ SET GLOBAL new_<CACHE_NAME>.key_buffer_size=0;
diff --git a/storage/xtradb/mysql-test/storage_engine/checksum_table_live.rdiff b/storage/xtradb/mysql-test/storage_engine/checksum_table_live.rdiff
new file mode 100644
index 00000000000..71c782848a6
--- /dev/null
+++ b/storage/xtradb/mysql-test/storage_engine/checksum_table_live.rdiff
@@ -0,0 +1,13 @@
+--- suite/storage_engine/checksum_table_live.result 2012-07-12 21:05:44.497062968 +0400
++++ suite/storage_engine/checksum_table_live.reject 2012-07-15 17:47:28.105399836 +0400
+@@ -11,8 +11,8 @@
+ test.t1 4272806499
+ CHECKSUM TABLE t1, t2 QUICK;
+ Table Checksum
+-test.t1 4272806499
+-test.t2 0
++test.t1 NULL
++test.t2 NULL
+ CHECKSUM TABLE t1, t2 EXTENDED;
+ Table Checksum
+ test.t1 4272806499
diff --git a/storage/xtradb/mysql-test/storage_engine/col_opt_not_null.opt b/storage/xtradb/mysql-test/storage_engine/col_opt_not_null.opt
new file mode 100644
index 00000000000..40445305fc6
--- /dev/null
+++ b/storage/xtradb/mysql-test/storage_engine/col_opt_not_null.opt
@@ -0,0 +1 @@
+--innodb_log_file_size=100M
diff --git a/storage/xtradb/mysql-test/storage_engine/col_opt_null.opt b/storage/xtradb/mysql-test/storage_engine/col_opt_null.opt
new file mode 100644
index 00000000000..40445305fc6
--- /dev/null
+++ b/storage/xtradb/mysql-test/storage_engine/col_opt_null.opt
@@ -0,0 +1 @@
+--innodb_log_file_size=100M
diff --git a/storage/xtradb/mysql-test/storage_engine/define_engine.inc b/storage/xtradb/mysql-test/storage_engine/define_engine.inc
new file mode 100644
index 00000000000..77e384d2351
--- /dev/null
+++ b/storage/xtradb/mysql-test/storage_engine/define_engine.inc
@@ -0,0 +1,49 @@
+###########################################
+#
+# This is a template of the include file define_engine.inc which
+# should be placed in storage/<engine>/mysql-test/storage_engine folder.
+#
+################################
+#
+# The name of the engine under test must be defined in $ENGINE variable.
+# You can set it either here (uncomment and edit) or in your environment.
+#
+let $ENGINE = InnoDB;
+#
+################################
+#
+# The following three variables define specific options for columns and tables.
+# Normally there should be none needed, but for some engines it can be different.
+# If the engine requires specific column option for all or indexed columns,
+# set them inside the comment, e.g. /*!NOT NULL*/.
+# Do the same for table options if needed, e.g. /*!INSERT_METHOD=LAST*/
+
+let $default_col_opts = /*!*/;
+let $default_col_indexed_opts = /*!*/;
+let $default_tbl_opts = /*!*/;
+
+# INDEX, UNIQUE INDEX, PRIMARY KEY, special index type - choose the fist that the engine allows,
+# or set it to /*!*/ if none is supported
+
+let $default_index = /*!INDEX*/;
+
+# If the engine does not support the following types, replace them with the closest possible
+
+let $default_int_type = INT(11);
+let $default_char_type = CHAR(8);
+
+################################
+
+--disable_query_log
+--disable_result_log
+
+# Here you can place your custom MTR code which needs to be executed before each test,
+# e.g. creation of an additional schema or table, etc.
+# The cleanup part should be defined in cleanup_engine.inc
+
+CALL mtr.add_suppression("InnoDB: Resizing redo log from .* to .* pages, LSN=.*");
+CALL mtr.add_suppression("InnoDB: Starting to delete and rewrite log files.");
+CALL mtr.add_suppression("InnoDB: New log files created, LSN=.*");
+
+--enable_query_log
+--enable_result_log
diff --git a/storage/xtradb/mysql-test/storage_engine/disabled.def b/storage/xtradb/mysql-test/storage_engine/disabled.def
new file mode 100644
index 00000000000..2f3793047f4
--- /dev/null
+++ b/storage/xtradb/mysql-test/storage_engine/disabled.def
@@ -0,0 +1,8 @@
+autoinc_vars : MySQL:65225 (InnoDB miscalculates auto-increment)
+tbl_opt_ai : MySQL:65901 (AUTO_INCREMENT option on InnoDB table is ignored if added before autoinc column)
+delete_low_prio : InnoDB does not use table-level locking
+insert_high_prio : InnoDB does not use table-level locking
+insert_low_prio : InnoDB does not use table-level locking
+select_high_prio : InnoDB does not use table-level locking
+update_low_prio : InnoDB does not use table-level locking
+
diff --git a/storage/xtradb/mysql-test/storage_engine/fulltext_search.rdiff b/storage/xtradb/mysql-test/storage_engine/fulltext_search.rdiff
new file mode 100644
index 00000000000..a68fe83070e
--- /dev/null
+++ b/storage/xtradb/mysql-test/storage_engine/fulltext_search.rdiff
@@ -0,0 +1,49 @@
+--- suite/storage_engine/fulltext_search.result 2013-11-27 18:50:16.000000000 +0400
++++ suite/storage_engine/fulltext_search.reject 2014-02-05 15:33:26.000000000 +0400
+@@ -52,15 +52,14 @@
+ INSERT INTO t1 (v0,v1,v2) VALUES ('text4','Contributing more...','...is a good idea'),('text5','test','test');
+ SELECT v0, MATCH(v1) AGAINST('contributing') AS rating FROM t1 WHERE MATCH(v1) AGAINST ('contributing');
+ v0 rating
+-text4 1.3705332279205322
++text4 0.4885590672492981
+ SELECT v0 FROM t1 WHERE MATCH(v1,v2) AGAINST ('-test1 +critical +Cook*' IN BOOLEAN MODE);
+-v0
+-text1
++ERROR HY000: Can't find FULLTEXT index matching the column list
+ SELECT v0 FROM t1 WHERE MATCH(v1,v2) AGAINST ('-patch +critical +Cook*' IN BOOLEAN MODE);
+-v0
++ERROR HY000: Can't find FULLTEXT index matching the column list
+ SELECT v0, MATCH(v1) AGAINST('database' WITH QUERY EXPANSION) AS rating FROM t1 WHERE MATCH(v1) AGAINST ('database' WITH QUERY EXPANSION);
+ v0 rating
+-text1 178.11756896972656
++text1 151.4530487060547
+ DROP TABLE t1;
+ CREATE TABLE t1 (v0 VARCHAR(64) <CUSTOM_COL_OPTIONS>,
+ v1 VARCHAR(16384) <CUSTOM_COL_OPTIONS>,
+@@ -112,14 +111,15 @@
+ ), ('text2','test1','test2');
+ SELECT v0 FROM t1 WHERE MATCH(v1,v2) AGAINST ('contributing' IN NATURAL LANGUAGE MODE);
+ v0
++text1
+ INSERT INTO t1 (v0,v1,v2) VALUES ('text3','test','test');
+ SELECT v0, MATCH(v1,v2) AGAINST('contributing' IN NATURAL LANGUAGE MODE) AS rating FROM t1 WHERE MATCH(v1,v2) AGAINST ('contributing' IN NATURAL LANGUAGE MODE);
+ v0 rating
+-text1 0.2809644043445587
++text1 0.45528939366340637
+ INSERT INTO t1 (v0,v1,v2) VALUES ('text4','Contributing more...','...is a good idea'),('text5','test','test');
+ SELECT v0, MATCH(v1) AGAINST('contributing') AS rating FROM t1 WHERE MATCH(v1) AGAINST ('contributing');
+ v0 rating
+-text4 1.3705332279205322
++text4 0.4885590672492981
+ SELECT v0 FROM t1 WHERE MATCH(v1,v2) AGAINST ('-test1 +critical +Cook*' IN BOOLEAN MODE);
+ v0
+ text1
+@@ -127,6 +127,6 @@
+ v0
+ SELECT v0, MATCH(v1,v2) AGAINST('database' WITH QUERY EXPANSION) AS rating FROM t1 WHERE MATCH(v1,v2) AGAINST ('database' WITH QUERY EXPANSION);
+ v0 rating
+-text1 190.56150817871094
+-text4 1.1758291721343994
++text1 229.60874938964844
++text4 0.31671249866485596
+ DROP TABLE t1;
diff --git a/storage/xtradb/mysql-test/storage_engine/index_enable_disable.rdiff b/storage/xtradb/mysql-test/storage_engine/index_enable_disable.rdiff
new file mode 100644
index 00000000000..f8e812e7edb
--- /dev/null
+++ b/storage/xtradb/mysql-test/storage_engine/index_enable_disable.rdiff
@@ -0,0 +1,33 @@
+--- suite/storage_engine/index_enable_disable.result 2012-07-15 00:30:05.296641931 +0400
++++ suite/storage_engine/index_enable_disable.reject 2012-07-15 17:49:12.988081281 +0400
+@@ -11,15 +11,19 @@
+ Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ t1 1 a 1 a # # NULL NULL YES BTREE
+ ALTER TABLE t1 DISABLE KEYS;
++Warnings:
++Note 1031 Storage engine <STORAGE_ENGINE> of the table `test`.`t1` doesn't have this option
+ SHOW INDEX IN t1;
+ Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+-t1 1 a 1 a # # NULL NULL YES BTREE disabled
++t1 1 a 1 a # # NULL NULL YES BTREE
+ EXPLAIN SELECT a FROM t1 ORDER BY a;
+ id select_type table type possible_keys key key_len ref rows Extra
+-1 SIMPLE t1 ALL NULL NULL NULL NULL 19 Using filesort
++1 SIMPLE t1 index NULL a 5 NULL 19 Using index
+ INSERT INTO t1 (a) VALUES
+ (11),(12),(13),(14),(15),(16),(17),(18),(19),(20);
+ ALTER TABLE t1 ENABLE KEYS;
++Warnings:
++Note 1031 Storage engine <STORAGE_ENGINE> of the table `test`.`t1` doesn't have this option
+ SHOW INDEX IN t1;
+ Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+ t1 1 a 1 a # # NULL NULL YES BTREE
+@@ -32,6 +36,8 @@
+ (1),(2),(3),(4),(5),(6),(7),(8),(9),
+ (21),(22),(23),(24),(25),(26),(27),(28),(29);
+ ALTER TABLE t1 DISABLE KEYS;
++Warnings:
++Note 1031 Storage engine <STORAGE_ENGINE> of the table `test`.`t1` doesn't have this option
+ INSERT INTO t1 (a) VALUES (29);
+ ERROR 23000: Duplicate entry '29' for key 'a'
+ # Statement ended with one of expected results (ER_DUP_ENTRY,ER_DUP_KEY).
diff --git a/storage/xtradb/mysql-test/storage_engine/index_type_hash.rdiff b/storage/xtradb/mysql-test/storage_engine/index_type_hash.rdiff
new file mode 100644
index 00000000000..02f9d93588f
--- /dev/null
+++ b/storage/xtradb/mysql-test/storage_engine/index_type_hash.rdiff
@@ -0,0 +1,60 @@
+--- suite/storage_engine/index_type_hash.result 2012-07-15 01:10:17.919128889 +0400
++++ suite/storage_engine/index_type_hash.reject 2012-07-15 17:49:26.135915989 +0400
+@@ -4,7 +4,7 @@
+ ) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+ SHOW KEYS IN t1;
+ Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+-t1 1 a 1 a # # NULL NULL # HASH
++t1 1 a 1 a # # NULL NULL # BTREE
+ DROP TABLE t1;
+ CREATE TABLE t1 (a <INT_COLUMN>,
+ b <CHAR_COLUMN>,
+@@ -12,8 +12,8 @@
+ ) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+ SHOW KEYS IN t1;
+ Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+-t1 1 a_b 1 a # # NULL NULL # HASH a_b index
+-t1 1 a_b 2 b # # NULL NULL # HASH a_b index
++t1 1 a_b 1 a # # NULL NULL # BTREE a_b index
++t1 1 a_b 2 b # # NULL NULL # BTREE a_b index
+ DROP TABLE t1;
+ CREATE TABLE t1 (a <INT_COLUMN>,
+ b <CHAR_COLUMN>,
+@@ -22,8 +22,8 @@
+ ) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+ SHOW KEYS IN t1;
+ Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+-t1 1 a 1 a # # NULL NULL # HASH
+-t1 1 b 1 b # # NULL NULL # HASH
++t1 1 a 1 a # # NULL NULL # BTREE
++t1 1 b 1 b # # NULL NULL # BTREE
+ DROP TABLE t1;
+ CREATE TABLE t1 (a <INT_COLUMN>,
+ b <CHAR_COLUMN>,
+@@ -31,7 +31,7 @@
+ ) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+ SHOW KEYS IN t1;
+ Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+-t1 0 a 1 a # # NULL NULL # HASH
++t1 0 a 1 a # # NULL NULL # BTREE
+ INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b');
+ INSERT INTO t1 (a,b) VALUES (1,'c');
+ ERROR 23000: Duplicate entry '1' for key 'a'
+@@ -43,7 +43,7 @@
+ ALTER TABLE t1 ADD <CUSTOM_INDEX> (a) USING HASH COMMENT 'simple index on a';
+ SHOW INDEX FROM t1;
+ Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+-t1 1 a 1 a # # NULL NULL # HASH simple index on a
++t1 1 a 1 a # # NULL NULL # BTREE simple index on a
+ ALTER TABLE t1 DROP KEY a;
+ DROP TABLE t1;
+ CREATE TABLE t1 (a <INT_COLUMN>,
+@@ -52,7 +52,7 @@
+ ) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+ SHOW KEYS IN t1;
+ Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+-t1 0 a 1 a # # NULL NULL # HASH
++t1 0 a 1 a # # NULL NULL # BTREE
+ INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b');
+ INSERT INTO t1 (a,b) VALUES (1,'c');
+ ERROR 23000: Duplicate entry '1' for key 'a'
diff --git a/storage/xtradb/mysql-test/storage_engine/insert_delayed.rdiff b/storage/xtradb/mysql-test/storage_engine/insert_delayed.rdiff
new file mode 100644
index 00000000000..9e6cddf03f0
--- /dev/null
+++ b/storage/xtradb/mysql-test/storage_engine/insert_delayed.rdiff
@@ -0,0 +1,26 @@
+--- suite/storage_engine/insert_delayed.result 2013-01-23 01:23:49.461254916 +0400
++++ suite/storage_engine/insert_delayed.reject 2013-01-23 01:47:05.975698364 +0400
+@@ -5,7 +5,16 @@
+ connect con0,localhost,root,,;
+ SET lock_wait_timeout = 1;
+ INSERT DELAYED INTO t1 (a,b) VALUES (3,'c');
++ERROR HY000: DELAYED option not supported for table 't1'
++# ------------ UNEXPECTED RESULT ------------
++# The statement|command finished with ER_DELAYED_NOT_SUPPORTED.
++# INSERT DELAYED or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
++# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
++# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
++# Also, this problem may cause a chain effect (more errors of different kinds in the test).
++# -------------------------------------------
+ INSERT DELAYED INTO t1 SET a=4, b='d';
++ERROR HY000: DELAYED option not supported for table 't1'
+ INSERT DELAYED INTO t1 (a,b) SELECT 5, 'e';
+ ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+ disconnect con0;
+@@ -20,6 +29,4 @@
+ a b
+ 1 f
+ 2 b
+-3 c
+-4 d
+ DROP TABLE t1;
diff --git a/storage/xtradb/mysql-test/storage_engine/lock_concurrent.rdiff b/storage/xtradb/mysql-test/storage_engine/lock_concurrent.rdiff
new file mode 100644
index 00000000000..fe4a0087fa9
--- /dev/null
+++ b/storage/xtradb/mysql-test/storage_engine/lock_concurrent.rdiff
@@ -0,0 +1,22 @@
+--- suite/storage_engine/lock_concurrent.result 2012-06-24 23:55:19.539380000 +0400
++++ suite/storage_engine/lock_concurrent.reject 2012-07-15 17:50:21.279222746 +0400
+@@ -3,10 +3,19 @@
+ LOCK TABLES t1 WRITE CONCURRENT, t1 AS t2 READ;
+ SET lock_wait_timeout = 1;
+ LOCK TABLES t1 READ LOCAL;
++ERROR HY000: Lock wait timeout exceeded; try restarting transaction
++# ------------ UNEXPECTED RESULT ------------
++# The statement|command finished with ER_LOCK_WAIT_TIMEOUT.
++# LOCK .. WRITE CONCURRENT or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
++# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
++# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
++# Also, this problem may cause a chain effect (more errors of different kinds in the test).
++# -------------------------------------------
+ UNLOCK TABLES;
+ UNLOCK TABLES;
+ LOCK TABLES t1 READ LOCAL;
+ LOCK TABLES t1 WRITE CONCURRENT, t1 AS t2 READ;
++ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+ UNLOCK TABLES;
+ UNLOCK TABLES;
+ DROP TABLE t1;
diff --git a/storage/xtradb/mysql-test/storage_engine/optimize_table.rdiff b/storage/xtradb/mysql-test/storage_engine/optimize_table.rdiff
new file mode 100644
index 00000000000..54d1f600516
--- /dev/null
+++ b/storage/xtradb/mysql-test/storage_engine/optimize_table.rdiff
@@ -0,0 +1,37 @@
+--- suite/storage_engine/optimize_table.result 2012-07-12 19:13:53.741428591 +0400
++++ suite/storage_engine/optimize_table.reject 2012-07-15 17:50:30.843102510 +0400
+@@ -5,25 +5,32 @@
+ INSERT INTO t1 (a,b) VALUES (3,'c'),(4,'d');
+ OPTIMIZE TABLE t1;
+ Table Op Msg_type Msg_text
++test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
+ test.t1 optimize status OK
+ INSERT INTO t2 (a,b) VALUES (4,'d');
+ OPTIMIZE NO_WRITE_TO_BINLOG TABLE t2;
+ Table Op Msg_type Msg_text
++test.t2 optimize note Table does not support optimize, doing recreate + analyze instead
+ test.t2 optimize status OK
+ INSERT INTO t2 (a,b) VALUES (5,'e');
+ INSERT INTO t1 (a,b) VALUES (6,'f');
+ OPTIMIZE LOCAL TABLE t1, t2;
+ Table Op Msg_type Msg_text
++test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
+ test.t1 optimize status OK
++test.t2 optimize note Table does not support optimize, doing recreate + analyze instead
+ test.t2 optimize status OK
+ OPTIMIZE TABLE t1, t2;
+ Table Op Msg_type Msg_text
+-test.t1 optimize status Table is already up to date
+-test.t2 optimize status Table is already up to date
++test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
++test.t1 optimize status OK
++test.t2 optimize note Table does not support optimize, doing recreate + analyze instead
++test.t2 optimize status OK
+ DROP TABLE t1, t2;
+ CREATE TABLE t1 (a <INT_COLUMN>, b <CHAR_COLUMN>, <CUSTOM_INDEX> (a)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+ INSERT INTO t1 (a,b) VALUES (1,'a'),(100,'b'),(2,'c'),(3,'d');
+ OPTIMIZE TABLE t1;
+ Table Op Msg_type Msg_text
++test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
+ test.t1 optimize status OK
+ DROP TABLE t1;
diff --git a/storage/xtradb/mysql-test/storage_engine/parts/checksum_table.rdiff b/storage/xtradb/mysql-test/storage_engine/parts/checksum_table.rdiff
new file mode 100644
index 00000000000..c8aabb787e9
--- /dev/null
+++ b/storage/xtradb/mysql-test/storage_engine/parts/checksum_table.rdiff
@@ -0,0 +1,13 @@
+--- suite/storage_engine/parts/checksum_table.result 2013-11-08 22:30:34.000000000 +0400
++++ suite/storage_engine/parts/checksum_table.reject 2013-11-08 22:32:30.000000000 +0400
+@@ -31,8 +31,8 @@
+ test.t1 4272806499
+ CHECKSUM TABLE t1, t2 QUICK;
+ Table Checksum
+-test.t1 4272806499
+-test.t2 0
++test.t1 NULL
++test.t2 NULL
+ CHECKSUM TABLE t1, t2 EXTENDED;
+ Table Checksum
+ test.t1 4272806499
diff --git a/storage/xtradb/mysql-test/storage_engine/parts/create_table.rdiff b/storage/xtradb/mysql-test/storage_engine/parts/create_table.rdiff
new file mode 100644
index 00000000000..0df91c6fc6e
--- /dev/null
+++ b/storage/xtradb/mysql-test/storage_engine/parts/create_table.rdiff
@@ -0,0 +1,20 @@
+--- suite/storage_engine/parts/create_table.result 2012-07-12 21:56:38.618667460 +0400
++++ suite/storage_engine/parts/create_table.reject 2012-07-15 20:06:43.496358345 +0400
+@@ -65,7 +65,7 @@
+ 1 SIMPLE t1 abc,def # # # # # # #
+ EXPLAIN PARTITIONS SELECT a FROM t1 WHERE a = 100;
+ id select_type table partitions type possible_keys key key_len ref rows Extra
+-1 SIMPLE NULL NULL # # # # # # #
++1 SIMPLE t1 def # # # # # # #
+ INSERT INTO t1 (a) VALUES (50);
+ ERROR HY000: Table has no partition for value 50
+ DROP TABLE t1;
+@@ -81,7 +81,7 @@
+ 1 SIMPLE t1 abc_abcsp0,def_defsp0 # # # # # # #
+ EXPLAIN PARTITIONS SELECT a FROM t1 WHERE a = 100;
+ id select_type table partitions type possible_keys key key_len ref rows Extra
+-1 SIMPLE NULL NULL # # # # # # #
++1 SIMPLE t1 def_defsp0 # # # # # # #
+ SELECT TABLE_SCHEMA, TABLE_NAME, PARTITION_NAME, SUBPARTITION_NAME, PARTITION_METHOD, SUBPARTITION_METHOD
+ FROM INFORMATION_SCHEMA.PARTITIONS WHERE TABLE_NAME = 't1';
+ TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_METHOD SUBPARTITION_METHOD
diff --git a/storage/xtradb/mysql-test/storage_engine/parts/disabled.def b/storage/xtradb/mysql-test/storage_engine/parts/disabled.def
new file mode 100644
index 00000000000..796bdfc751b
--- /dev/null
+++ b/storage/xtradb/mysql-test/storage_engine/parts/disabled.def
@@ -0,0 +1 @@
+repair_table : InnoDB of 5.6.10 does not support repair on partitioned tables (fixed by 5.6.14)
diff --git a/storage/xtradb/mysql-test/storage_engine/parts/optimize_table.rdiff b/storage/xtradb/mysql-test/storage_engine/parts/optimize_table.rdiff
new file mode 100644
index 00000000000..a35ba5167d9
--- /dev/null
+++ b/storage/xtradb/mysql-test/storage_engine/parts/optimize_table.rdiff
@@ -0,0 +1,58 @@
+--- suite/storage_engine/parts/optimize_table.result 2013-07-18 22:55:38.000000000 +0400
++++ suite/storage_engine/parts/optimize_table.reject 2013-08-05 19:45:19.000000000 +0400
+@@ -9,18 +9,22 @@
+ INSERT INTO t1 (a,b) VALUES (3,'c'),(4,'d');
+ ALTER TABLE t1 OPTIMIZE PARTITION p1;
+ Table Op Msg_type Msg_text
++test.t1 optimize note Table does not support optimize on partitions. All partitions will be rebuilt and analyzed.
+ test.t1 optimize status OK
+ INSERT INTO t2 (a,b) VALUES (4,'d');
+ ALTER TABLE t2 OPTIMIZE PARTITION p0 NO_WRITE_TO_BINLOG;
+ Table Op Msg_type Msg_text
++test.t2 optimize note Table does not support optimize on partitions. All partitions will be rebuilt and analyzed.
+ test.t2 optimize status OK
+ INSERT INTO t1 (a,b) VALUES (6,'f');
+ ALTER TABLE t1 OPTIMIZE PARTITION ALL LOCAL;
+ Table Op Msg_type Msg_text
++test.t1 optimize note Table does not support optimize on partitions. All partitions will be rebuilt and analyzed.
+ test.t1 optimize status OK
+ INSERT INTO t2 (a,b) VALUES (5,'e');
+ ALTER TABLE t2 OPTIMIZE PARTITION p1,p0;
+ Table Op Msg_type Msg_text
++test.t2 optimize note Table does not support optimize on partitions. All partitions will be rebuilt and analyzed.
+ test.t2 optimize status OK
+ DROP TABLE t1, t2;
+ DROP TABLE IF EXISTS t1,t2;
+@@ -30,25 +34,32 @@
+ INSERT INTO t1 (a,b) VALUES (3,'c'),(4,'d');
+ OPTIMIZE TABLE t1;
+ Table Op Msg_type Msg_text
++test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
+ test.t1 optimize status OK
+ INSERT INTO t2 (a,b) VALUES (4,'d');
+ OPTIMIZE NO_WRITE_TO_BINLOG TABLE t2;
+ Table Op Msg_type Msg_text
++test.t2 optimize note Table does not support optimize, doing recreate + analyze instead
+ test.t2 optimize status OK
+ INSERT INTO t2 (a,b) VALUES (5,'e');
+ INSERT INTO t1 (a,b) VALUES (6,'f');
+ OPTIMIZE LOCAL TABLE t1, t2;
+ Table Op Msg_type Msg_text
++test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
+ test.t1 optimize status OK
++test.t2 optimize note Table does not support optimize, doing recreate + analyze instead
+ test.t2 optimize status OK
+ OPTIMIZE TABLE t1, t2;
+ Table Op Msg_type Msg_text
++test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
+ test.t1 optimize status OK
++test.t2 optimize note Table does not support optimize, doing recreate + analyze instead
+ test.t2 optimize status OK
+ DROP TABLE t1, t2;
+ CREATE TABLE t1 (a <INT_COLUMN>, b <CHAR_COLUMN>, <CUSTOM_INDEX> (a)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> PARTITION BY HASH(a) PARTITIONS 2;
+ INSERT INTO t1 (a,b) VALUES (1,'a'),(100,'b'),(2,'c'),(3,'d');
+ OPTIMIZE TABLE t1;
+ Table Op Msg_type Msg_text
++test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
+ test.t1 optimize status OK
+ DROP TABLE t1;
diff --git a/storage/xtradb/mysql-test/storage_engine/parts/repair_table.rdiff b/storage/xtradb/mysql-test/storage_engine/parts/repair_table.rdiff
new file mode 100644
index 00000000000..35b150e82d1
--- /dev/null
+++ b/storage/xtradb/mysql-test/storage_engine/parts/repair_table.rdiff
@@ -0,0 +1,158 @@
+--- suite/storage_engine/parts/repair_table.result 2013-07-18 22:55:38.000000000 +0400
++++ suite/storage_engine/parts/repair_table.reject 2013-08-05 19:54:09.000000000 +0400
+@@ -25,7 +25,7 @@
+ INSERT INTO t1 (a,b) VALUES (10,'j');
+ ALTER TABLE t1 REPAIR PARTITION p1 QUICK USE_FRM;
+ Table Op Msg_type Msg_text
+-test.t1 repair status OK
++test.t1 repair note The storage engine for the table doesn't support repair
+ INSERT INTO t2 (a,b) VALUES (12,'l');
+ ALTER TABLE t2 REPAIR PARTITION NO_WRITE_TO_BINLOG ALL QUICK EXTENDED USE_FRM;
+ Table Op Msg_type Msg_text
+@@ -58,8 +58,8 @@
+ INSERT INTO t2 (a,b) VALUES (11,'k');
+ REPAIR TABLE t1, t2 QUICK USE_FRM;
+ Table Op Msg_type Msg_text
+-test.t1 repair status OK
+-test.t2 repair status OK
++test.t1 repair note The storage engine for the table doesn't support repair
++test.t2 repair note The storage engine for the table doesn't support repair
+ INSERT INTO t1 (a,b) VALUES (12,'l');
+ INSERT INTO t2 (a,b) VALUES (13,'m');
+ REPAIR NO_WRITE_TO_BINLOG TABLE t1, t2 QUICK EXTENDED USE_FRM;
+@@ -101,119 +101,13 @@
+ INSERT INTO t1 (a,b) VALUES (10,'j');
+ REPAIR TABLE t1 USE_FRM;
+ Table Op Msg_type Msg_text
+-test.t1 repair status OK
+-t1#P#p0.MYD
+-t1#P#p0.MYI
+-t1#P#p1.MYD
+-t1#P#p1.MYI
++test.t1 repair note The storage engine for the table doesn't support repair
+ t1.frm
+ t1.par
+ INSERT INTO t1 (a,b) VALUES (14,'n'),(15,'o');
+ # Statement ended with one of expected results (0,144).
+ # If you got a difference in error message, just add it to rdiff file
+ FLUSH TABLE t1;
+-Restoring <DATADIR>/test/t1#P#p0.MYD
+-CHECK TABLE t1;
+-Table Op Msg_type Msg_text
+-test.t1 check error Size of datafile is: 26 Should be: 39
+-test.t1 check error Partition p0 returned error
+-test.t1 check error Corrupt
+-SELECT a,b FROM t1;
+-a b
+-8 h
+-10 j
+-7 g
+-15 o
+-Warnings:
+-Error 145 Table './test/t1#P#p0' is marked as crashed and should be repaired
+-Error 1194 Table 't1' is marked as crashed and should be repaired
+-Error 1034 Number of rows changed from 3 to 2
+-# Statement ended with one of expected results (0,ER_NOT_KEYFILE,144).
+-# If you got a difference in error message, just add it to rdiff file
+-INSERT INTO t1 (a,b) VALUES (14,'n'),(15,'o');
+-# Statement ended with one of expected results (0,144).
+-# If you got a difference in error message, just add it to rdiff file
+-FLUSH TABLE t1;
+-Restoring <DATADIR>/test/t1#P#p0.MYI
+-CHECK TABLE t1;
+-Table Op Msg_type Msg_text
+-test.t1 check warning Size of datafile is: 39 Should be: 26
+-test.t1 check error Record-count is not ok; is 3 Should be: 2
+-test.t1 check warning Found 3 key parts. Should be: 2
+-test.t1 check error Partition p0 returned error
+-test.t1 check error Corrupt
+-SELECT a,b FROM t1;
+-a b
+-8 h
+-10 j
+-14 n
+-7 g
+-15 o
+-15 o
+-Warnings:
+-Error 145 Table './test/t1#P#p0' is marked as crashed and should be repaired
+-Error 1194 Table 't1' is marked as crashed and should be repaired
+-Error 1034 Number of rows changed from 2 to 3
+-# Statement ended with one of expected results (0,ER_NOT_KEYFILE,144).
+-# If you got a difference in error message, just add it to rdiff file
+-INSERT INTO t1 (a,b) VALUES (14,'n'),(15,'o');
+-# Statement ended with one of expected results (0,144).
+-# If you got a difference in error message, just add it to rdiff file
+-FLUSH TABLE t1;
+-Restoring <DATADIR>/test/t1#P#p1.MYD
+-CHECK TABLE t1;
+-Table Op Msg_type Msg_text
+-test.t1 check error Size of datafile is: 39 Should be: 52
+-test.t1 check error Partition p1 returned error
+-test.t1 check error Corrupt
+-SELECT a,b FROM t1;
+-a b
+-8 h
+-10 j
+-14 n
+-14 n
+-7 g
+-15 o
+-15 o
+-Warnings:
+-Error 145 Table './test/t1#P#p1' is marked as crashed and should be repaired
+-Error 1194 Table 't1' is marked as crashed and should be repaired
+-Error 1034 Number of rows changed from 4 to 3
+-# Statement ended with one of expected results (0,ER_NOT_KEYFILE,144).
+-# If you got a difference in error message, just add it to rdiff file
+-INSERT INTO t1 (a,b) VALUES (14,'n'),(15,'o');
+-# Statement ended with one of expected results (0,144).
+-# If you got a difference in error message, just add it to rdiff file
+-FLUSH TABLE t1;
+-Restoring <DATADIR>/test/t1#P#p1.MYI
+-CHECK TABLE t1;
+-Table Op Msg_type Msg_text
+-test.t1 check warning Size of datafile is: 52 Should be: 39
+-test.t1 check error Record-count is not ok; is 4 Should be: 3
+-test.t1 check warning Found 4 key parts. Should be: 3
+-test.t1 check error Partition p1 returned error
+-test.t1 check error Corrupt
+-SELECT a,b FROM t1;
+-a b
+-8 h
+-10 j
+-14 n
+-14 n
+-14 n
+-7 g
+-15 o
+-15 o
+-15 o
+-Warnings:
+-Error 145 Table './test/t1#P#p1' is marked as crashed and should be repaired
+-Error 1194 Table 't1' is marked as crashed and should be repaired
+-Error 1034 Number of rows changed from 3 to 4
+-# Statement ended with one of expected results (0,ER_NOT_KEYFILE,144).
+-# If you got a difference in error message, just add it to rdiff file
+-INSERT INTO t1 (a,b) VALUES (14,'n'),(15,'o');
+-# Statement ended with one of expected results (0,144).
+-# If you got a difference in error message, just add it to rdiff file
+-FLUSH TABLE t1;
+ Restoring <DATADIR>/test/t1.par
+ CHECK TABLE t1;
+ Table Op Msg_type Msg_text
+@@ -223,14 +117,8 @@
+ 8 h
+ 10 j
+ 14 n
+-14 n
+-14 n
+-14 n
+ 7 g
+ 15 o
+-15 o
+-15 o
+-15 o
+ # Statement ended with one of expected results (0,ER_NOT_KEYFILE,144).
+ # If you got a difference in error message, just add it to rdiff file
+ DROP TABLE t1;
diff --git a/storage/xtradb/mysql-test/storage_engine/parts/suite.opt b/storage/xtradb/mysql-test/storage_engine/parts/suite.opt
new file mode 100644
index 00000000000..66f581b56d0
--- /dev/null
+++ b/storage/xtradb/mysql-test/storage_engine/parts/suite.opt
@@ -0,0 +1,2 @@
+--innodb
+
diff --git a/storage/xtradb/mysql-test/storage_engine/repair_table.rdiff b/storage/xtradb/mysql-test/storage_engine/repair_table.rdiff
new file mode 100644
index 00000000000..be3709c5833
--- /dev/null
+++ b/storage/xtradb/mysql-test/storage_engine/repair_table.rdiff
@@ -0,0 +1,138 @@
+--- suite/storage_engine/repair_table.result 2013-10-03 20:35:06.000000000 +0400
++++ suite/storage_engine/repair_table.reject 2013-11-08 22:04:22.000000000 +0400
+@@ -4,56 +4,57 @@
+ CREATE TABLE t2 (a <INT_COLUMN>, b <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+ REPAIR TABLE t1;
+ Table Op Msg_type Msg_text
+-test.t1 repair status OK
++test.t1 repair note The storage engine for the table doesn't support repair
+ INSERT INTO t1 (a,b) VALUES (3,'c');
+ INSERT INTO t2 (a,b) VALUES (4,'d');
+ REPAIR NO_WRITE_TO_BINLOG TABLE t1, t2;
+ Table Op Msg_type Msg_text
+-test.t1 repair status OK
+-test.t2 repair status OK
++test.t1 repair note The storage engine for the table doesn't support repair
++test.t2 repair note The storage engine for the table doesn't support repair
+ INSERT INTO t2 (a,b) VALUES (5,'e'),(6,'f');
+ REPAIR LOCAL TABLE t2;
+ Table Op Msg_type Msg_text
+-test.t2 repair status OK
++test.t2 repair note The storage engine for the table doesn't support repair
+ INSERT INTO t1 (a,b) VALUES (7,'g'),(8,'h');
+ INSERT INTO t2 (a,b) VALUES (9,'i');
+ REPAIR LOCAL TABLE t2, t1 EXTENDED;
+ Table Op Msg_type Msg_text
+-test.t2 repair status OK
+-test.t1 repair status OK
++test.t2 repair note The storage engine for the table doesn't support repair
++test.t1 repair note The storage engine for the table doesn't support repair
+ INSERT INTO t1 (a,b) VALUES (10,'j');
+ INSERT INTO t2 (a,b) VALUES (11,'k');
+ REPAIR TABLE t1, t2 QUICK USE_FRM;
+ Table Op Msg_type Msg_text
+-test.t1 repair warning Number of rows changed from 0 to 6
+-test.t1 repair status OK
+-test.t2 repair warning Number of rows changed from 0 to 5
+-test.t2 repair status OK
++test.t1 repair note The storage engine for the table doesn't support repair
++test.t2 repair note The storage engine for the table doesn't support repair
+ INSERT INTO t1 (a,b) VALUES (12,'l');
+ INSERT INTO t2 (a,b) VALUES (13,'m');
+ REPAIR NO_WRITE_TO_BINLOG TABLE t1, t2 QUICK EXTENDED USE_FRM;
+ Table Op Msg_type Msg_text
+-test.t1 repair warning Number of rows changed from 0 to 7
+-test.t1 repair status OK
+-test.t2 repair warning Number of rows changed from 0 to 6
+-test.t2 repair status OK
++test.t1 repair note The storage engine for the table doesn't support repair
++test.t2 repair note The storage engine for the table doesn't support repair
+ FLUSH TABLE t1;
+ INSERT INTO t1 (a,b) VALUES (14,'n');
+-ERROR HY000: Incorrect file format 't1'
+ # Statement ended with one of expected results (0,130,ER_FAILED_READ_FROM_PAR_FILE,ER_OPEN_AS_READONLY).
+ # If you got a difference in error message, just add it to rdiff file
+ CHECK TABLE t1;
+ Table Op Msg_type Msg_text
+-test.t1 check Error Incorrect file format 't1'
+-test.t1 check error Corrupt
++test.t1 check status OK
+ SELECT a,b FROM t1;
+-ERROR HY000: Incorrect file format 't1'
++a b
++1 a
++2 b
++3 c
++7 g
++8 h
++10 j
++12 l
++14 n
+ # Statement ended with one of expected results (0,130,ER_FAILED_READ_FROM_PAR_FILE,ER_OPEN_AS_READONLY).
+ # If you got a difference in error message, just add it to rdiff file
+ REPAIR TABLE t1;
+ Table Op Msg_type Msg_text
+-test.t1 repair Error Incorrect file format 't1'
+-test.t1 repair error Corrupt
++test.t1 repair note The storage engine for the table doesn't support repair
+ DROP TABLE t1, t2;
+ call mtr.add_suppression("Got an error from thread_id=.*");
+ call mtr.add_suppression("MySQL thread id .*, query id .* localhost.*root Checking table");
+@@ -62,45 +63,32 @@
+ CREATE TABLE t1 (a <INT_COLUMN>, b <CHAR_COLUMN>, <CUSTOM_INDEX> (a)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+ REPAIR TABLE t1;
+ Table Op Msg_type Msg_text
+-test.t1 repair status OK
++test.t1 repair note The storage engine for the table doesn't support repair
+ INSERT INTO t1 (a,b) VALUES (7,'g'),(8,'h');
+ REPAIR TABLE t1 EXTENDED;
+ Table Op Msg_type Msg_text
+-test.t1 repair status OK
++test.t1 repair note The storage engine for the table doesn't support repair
+ INSERT INTO t1 (a,b) VALUES (10,'j');
+ REPAIR TABLE t1 USE_FRM;
+ Table Op Msg_type Msg_text
+-test.t1 repair warning Number of rows changed from 0 to 3
+-test.t1 repair status OK
+-t1.MYD
+-t1.MYI
++test.t1 repair note The storage engine for the table doesn't support repair
+ t1.frm
++t1.ibd
+ INSERT INTO t1 (a,b) VALUES (14,'n'),(15,'o');
+ # Statement ended with one of expected results (0,144).
+ # If you got a difference in error message, just add it to rdiff file
+ FLUSH TABLE t1;
+-Restoring <DATADIR>/test/t1.MYD
++Restoring <DATADIR>/test/t1.ibd
+ CHECK TABLE t1;
+ Table Op Msg_type Msg_text
+-test.t1 check error Size of datafile is: 39 Should be: 65
+-test.t1 check error Corrupt
++test.t1 check status OK
+ SELECT a,b FROM t1;
+-ERROR HY000: Incorrect key file for table 't1'; try to repair it
+-# Statement ended with one of expected results (0,ER_NOT_KEYFILE,144).
+-# If you got a difference in error message, just add it to rdiff file
+-INSERT INTO t1 (a,b) VALUES (14,'n'),(15,'o');
+-ERROR HY000: Table './test/t1' is marked as crashed and last (automatic?) repair failed
+-# Statement ended with one of expected results (0,144).
+-# If you got a difference in error message, just add it to rdiff file
+-FLUSH TABLE t1;
+-Restoring <DATADIR>/test/t1.MYI
+-CHECK TABLE t1;
+-Table Op Msg_type Msg_text
+-test.t1 check warning Table is marked as crashed and last repair failed
+-test.t1 check error Size of datafile is: 39 Should be: 65
+-test.t1 check error Corrupt
+-SELECT a,b FROM t1;
+-ERROR HY000: Table './test/t1' is marked as crashed and last (automatic?) repair failed
++a b
++7 g
++8 h
++10 j
++14 n
++15 o
+ # Statement ended with one of expected results (0,ER_NOT_KEYFILE,144).
+ # If you got a difference in error message, just add it to rdiff file
+ DROP TABLE t1;
diff --git a/storage/xtradb/mysql-test/storage_engine/suite.opt b/storage/xtradb/mysql-test/storage_engine/suite.opt
new file mode 100644
index 00000000000..8c10cefc626
--- /dev/null
+++ b/storage/xtradb/mysql-test/storage_engine/suite.opt
@@ -0,0 +1 @@
+--innodb
diff --git a/storage/xtradb/mysql-test/storage_engine/tbl_opt_data_index_dir.rdiff b/storage/xtradb/mysql-test/storage_engine/tbl_opt_data_index_dir.rdiff
new file mode 100644
index 00000000000..e09e50b17ec
--- /dev/null
+++ b/storage/xtradb/mysql-test/storage_engine/tbl_opt_data_index_dir.rdiff
@@ -0,0 +1,23 @@
+--- suite/storage_engine/tbl_opt_data_index_dir.result 2013-10-03 20:35:06.000000000 +0400
++++ suite/storage_engine/tbl_opt_data_index_dir.reject 2013-11-08 22:06:54.000000000 +0400
+@@ -1,10 +1,12 @@
+ DROP TABLE IF EXISTS t1;
++Warnings:
++Warning 1618 <INDEX DIRECTORY> option ignored
+ SHOW CREATE TABLE t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 DATA DIRECTORY='<DATA_DIR>' INDEX DIRECTORY='<INDEX_DIR>'
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 DATA DIRECTORY='<DATA_DIR>'
+ Warnings:
+ Warning 1618 <INDEX DIRECTORY> option ignored
+ SHOW CREATE TABLE t1;
+@@ -12,5 +14,5 @@
+ t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 DATA DIRECTORY='<DATA_DIR>' INDEX DIRECTORY='<INDEX_DIR>'
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 DATA DIRECTORY='<DATA_DIR>'
+ DROP TABLE t1;
diff --git a/storage/xtradb/mysql-test/storage_engine/tbl_opt_insert_method.rdiff b/storage/xtradb/mysql-test/storage_engine/tbl_opt_insert_method.rdiff
new file mode 100644
index 00000000000..468b82926f0
--- /dev/null
+++ b/storage/xtradb/mysql-test/storage_engine/tbl_opt_insert_method.rdiff
@@ -0,0 +1,11 @@
+--- suite/storage_engine/tbl_opt_insert_method.result 2012-06-24 23:55:19.539380000 +0400
++++ suite/storage_engine/tbl_opt_insert_method.reject 2012-07-15 17:51:09.978610512 +0400
+@@ -5,7 +5,7 @@
+ t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(8) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=FIRST
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
+ ALTER TABLE t1 INSERT_METHOD=NO;
+ SHOW CREATE TABLE t1;
+ Table Create Table
diff --git a/storage/xtradb/mysql-test/storage_engine/tbl_opt_key_block_size.opt b/storage/xtradb/mysql-test/storage_engine/tbl_opt_key_block_size.opt
new file mode 100644
index 00000000000..7cd737b2b87
--- /dev/null
+++ b/storage/xtradb/mysql-test/storage_engine/tbl_opt_key_block_size.opt
@@ -0,0 +1,3 @@
+--innodb-file-per-table=1
+--innodb-file-format=Barracuda
+
diff --git a/storage/xtradb/mysql-test/storage_engine/tbl_opt_row_format.opt b/storage/xtradb/mysql-test/storage_engine/tbl_opt_row_format.opt
new file mode 100644
index 00000000000..7cd737b2b87
--- /dev/null
+++ b/storage/xtradb/mysql-test/storage_engine/tbl_opt_row_format.opt
@@ -0,0 +1,3 @@
+--innodb-file-per-table=1
+--innodb-file-format=Barracuda
+
diff --git a/storage/xtradb/mysql-test/storage_engine/tbl_opt_row_format.rdiff b/storage/xtradb/mysql-test/storage_engine/tbl_opt_row_format.rdiff
new file mode 100644
index 00000000000..a6572ffa7f0
--- /dev/null
+++ b/storage/xtradb/mysql-test/storage_engine/tbl_opt_row_format.rdiff
@@ -0,0 +1,10 @@
+--- suite/storage_engine/tbl_opt_row_format.result 2012-06-24 23:55:19.539380000 +0400
++++ suite/storage_engine/tbl_opt_row_format.reject 2012-07-15 19:26:02.235049157 +0400
+@@ -1,5 +1,7 @@
+ DROP TABLE IF EXISTS t1;
+ CREATE TABLE t1 (a <INT_COLUMN>, b <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> ROW_FORMAT=FIXED;
++Warnings:
++Warning 1478 <STORAGE_ENGINE>: assuming ROW_FORMAT=COMPACT.
+ SHOW CREATE TABLE t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
diff --git a/storage/xtradb/mysql-test/storage_engine/tbl_opt_union.rdiff b/storage/xtradb/mysql-test/storage_engine/tbl_opt_union.rdiff
new file mode 100644
index 00000000000..cbdf5818022
--- /dev/null
+++ b/storage/xtradb/mysql-test/storage_engine/tbl_opt_union.rdiff
@@ -0,0 +1,16 @@
+--- suite/storage_engine/tbl_opt_union.result 2012-06-24 23:55:19.539380000 +0400
++++ suite/storage_engine/tbl_opt_union.reject 2012-07-15 17:51:31.014346053 +0400
+@@ -4,11 +4,11 @@
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 UNION=(`child1`)
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
+ ALTER TABLE t1 UNION = (child1,child2);
+ SHOW CREATE TABLE t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL
+-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 UNION=(`child1`,`child2`)
++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1
+ DROP TABLE t1, child1, child2;
diff --git a/storage/xtradb/mysql-test/storage_engine/trx/cons_snapshot_serializable.rdiff b/storage/xtradb/mysql-test/storage_engine/trx/cons_snapshot_serializable.rdiff
new file mode 100644
index 00000000000..e6149be58dc
--- /dev/null
+++ b/storage/xtradb/mysql-test/storage_engine/trx/cons_snapshot_serializable.rdiff
@@ -0,0 +1,18 @@
+--- suite/storage_engine/trx/cons_snapshot_serializable.result 2013-11-27 18:46:36.000000000 +0400
++++ suite/storage_engine/trx/cons_snapshot_serializable.reject 2013-11-28 19:17:02.000000000 +0400
+@@ -5,12 +5,15 @@
+ CREATE TABLE t1 (a <INT_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+ SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE;
+ START TRANSACTION WITH CONSISTENT SNAPSHOT;
++Warnings:
++Warning 138 InnoDB: WITH CONSISTENT SNAPSHOT was ignored because this phrase can only be used with REPEATABLE READ isolation level.
+ connection con2;
+ INSERT INTO t1 (a) VALUES (1);
+ connection con1;
+ # If consistent read works on this isolation level (SERIALIZABLE), the following SELECT should not return the value we inserted (1)
+ SELECT a FROM t1;
+ a
++1
+ COMMIT;
+ connection default;
+ disconnect con1;
diff --git a/storage/xtradb/mysql-test/storage_engine/trx/level_read_committed.rdiff b/storage/xtradb/mysql-test/storage_engine/trx/level_read_committed.rdiff
new file mode 100644
index 00000000000..cb64d32138b
--- /dev/null
+++ b/storage/xtradb/mysql-test/storage_engine/trx/level_read_committed.rdiff
@@ -0,0 +1,11 @@
+--- suite/storage_engine/trx/level_read_committed.result 2013-11-28 19:18:48.000000000 +0400
++++ suite/storage_engine/trx/level_read_committed.reject 2013-11-28 19:18:59.000000000 +0400
+@@ -77,6 +77,8 @@
+ CREATE TABLE t1 (a <INT_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+ SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+ START TRANSACTION WITH CONSISTENT SNAPSHOT;
++Warnings:
++Warning 138 InnoDB: WITH CONSISTENT SNAPSHOT was ignored because this phrase can only be used with REPEATABLE READ isolation level.
+ connection con2;
+ INSERT INTO t1 (a) VALUES (1);
+ connection con1;
diff --git a/storage/xtradb/mysql-test/storage_engine/trx/level_read_uncommitted.rdiff b/storage/xtradb/mysql-test/storage_engine/trx/level_read_uncommitted.rdiff
new file mode 100644
index 00000000000..6a79abe3ca5
--- /dev/null
+++ b/storage/xtradb/mysql-test/storage_engine/trx/level_read_uncommitted.rdiff
@@ -0,0 +1,11 @@
+--- suite/storage_engine/trx/level_read_uncommitted.result 2013-11-28 19:18:48.000000000 +0400
++++ suite/storage_engine/trx/level_read_uncommitted.reject 2013-11-28 19:19:50.000000000 +0400
+@@ -102,6 +102,8 @@
+ CREATE TABLE t1 (a <INT_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+ SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+ START TRANSACTION WITH CONSISTENT SNAPSHOT;
++Warnings:
++Warning 138 InnoDB: WITH CONSISTENT SNAPSHOT was ignored because this phrase can only be used with REPEATABLE READ isolation level.
+ connection con2;
+ INSERT INTO t1 (a) VALUES (1);
+ connection con1;
diff --git a/storage/xtradb/mysql-test/storage_engine/trx/suite.opt b/storage/xtradb/mysql-test/storage_engine/trx/suite.opt
new file mode 100644
index 00000000000..64bbe8b554c
--- /dev/null
+++ b/storage/xtradb/mysql-test/storage_engine/trx/suite.opt
@@ -0,0 +1,3 @@
+--innodb
+--innodb-lock-wait-timeout=1
+
diff --git a/storage/xtradb/mysql-test/storage_engine/type_blob.opt b/storage/xtradb/mysql-test/storage_engine/type_blob.opt
new file mode 100644
index 00000000000..40445305fc6
--- /dev/null
+++ b/storage/xtradb/mysql-test/storage_engine/type_blob.opt
@@ -0,0 +1 @@
+--innodb_log_file_size=100M
diff --git a/storage/xtradb/mysql-test/storage_engine/type_char_indexes.rdiff b/storage/xtradb/mysql-test/storage_engine/type_char_indexes.rdiff
new file mode 100644
index 00000000000..7a388552c57
--- /dev/null
+++ b/storage/xtradb/mysql-test/storage_engine/type_char_indexes.rdiff
@@ -0,0 +1,11 @@
+--- suite/storage_engine/type_char_indexes.result 2012-07-12 19:27:42.191013570 +0400
++++ suite/storage_engine/type_char_indexes.reject 2012-07-15 17:51:55.810034331 +0400
+@@ -135,7 +135,7 @@
+ r3a
+ EXPLAIN SELECT c,c20,v16,v128 FROM t1 WHERE v16 = 'varchar1a' OR v16 = 'varchar3a' ORDER BY v16;
+ id select_type table type possible_keys key key_len ref rows Extra
+-# # # range # v16 # # # #
++# # # ALL # NULL # # # #
+ SELECT c,c20,v16,v128 FROM t1 WHERE v16 = 'varchar1a' OR v16 = 'varchar3a' ORDER BY v16;
+ c c20 v16 v128
+ a char1 varchar1a varchar1b
diff --git a/storage/xtradb/mysql-test/storage_engine/type_float_indexes.rdiff b/storage/xtradb/mysql-test/storage_engine/type_float_indexes.rdiff
new file mode 100644
index 00000000000..6ebfd61d876
--- /dev/null
+++ b/storage/xtradb/mysql-test/storage_engine/type_float_indexes.rdiff
@@ -0,0 +1,11 @@
+--- suite/storage_engine/type_float_indexes.result 2012-07-12 19:37:27.031661128 +0400
++++ suite/storage_engine/type_float_indexes.reject 2012-07-15 17:52:12.189828410 +0400
+@@ -60,7 +60,7 @@
+ ALTER TABLE t1 ADD UNIQUE KEY(d);
+ EXPLAIN SELECT d FROM t1 WHERE r > 0 and d > 0 ORDER BY d;
+ id select_type table type possible_keys key key_len ref rows Extra
+-# # # # # d # # # #
++# # # # # NULL # # # #
+ SELECT d FROM t1 WHERE r > 0 and d > 0 ORDER BY d;
+ d
+ 1.2345
diff --git a/storage/xtradb/mysql-test/storage_engine/type_spatial_indexes.rdiff b/storage/xtradb/mysql-test/storage_engine/type_spatial_indexes.rdiff
new file mode 100644
index 00000000000..d3fb59e6ce3
--- /dev/null
+++ b/storage/xtradb/mysql-test/storage_engine/type_spatial_indexes.rdiff
@@ -0,0 +1,712 @@
+--- suite/storage_engine/type_spatial_indexes.result 2013-08-05 18:08:49.000000000 +0400
++++ suite/storage_engine/type_spatial_indexes.reject 2013-08-05 18:25:24.000000000 +0400
+@@ -702,699 +702,15 @@
+ DROP DATABASE IF EXISTS gis_ogs;
+ CREATE DATABASE gis_ogs;
+ CREATE TABLE gis_point (fid <INT_COLUMN>, g POINT NOT NULL, SPATIAL INDEX(g)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE gis_line (fid <INT_COLUMN>, g LINESTRING NOT NULL, SPATIAL INDEX(g)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE gis_polygon (fid <INT_COLUMN>, g POLYGON NOT NULL, SPATIAL INDEX(g)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE gis_multi_point (fid <INT_COLUMN>, g MULTIPOINT NOT NULL, SPATIAL INDEX(g)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE gis_multi_line (fid <INT_COLUMN>, g MULTILINESTRING NOT NULL, SPATIAL INDEX(g)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE gis_multi_polygon (fid <INT_COLUMN>, g MULTIPOLYGON NOT NULL, SPATIAL INDEX(g)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE gis_geometrycollection (fid <INT_COLUMN>, g GEOMETRYCOLLECTION NOT NULL, SPATIAL INDEX(g)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE gis_geometry (fid <INT_COLUMN>, g GEOMETRY NOT NULL) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-USE gis_ogs;
+-CREATE TABLE lakes (fid INT <CUSTOM_COL_OPTIONS>,
+-name CHAR(64) <CUSTOM_COL_OPTIONS>,
+-shore POLYGON NOT NULL, SPATIAL INDEX s(shore)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE road_segments (fid INT <CUSTOM_COL_OPTIONS>,
+-name CHAR(64) <CUSTOM_COL_OPTIONS>,
+-aliases CHAR(64) <CUSTOM_COL_OPTIONS>,
+-num_lanes INT <CUSTOM_COL_OPTIONS>,
+-centerline LINESTRING NOT NULL, SPATIAL INDEX c(centerline)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE divided_routes (fid INT <CUSTOM_COL_OPTIONS>,
+-name CHAR(64) <CUSTOM_COL_OPTIONS>,
+-num_lanes INT <CUSTOM_COL_OPTIONS>,
+-centerlines MULTILINESTRING NOT NULL, SPATIAL INDEX c(centerlines)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE forests (fid INT <CUSTOM_COL_OPTIONS>,
+-name CHAR(64) <CUSTOM_COL_OPTIONS>,
+-boundary MULTIPOLYGON NOT NULL, SPATIAL INDEX b(boundary)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE bridges (fid INT <CUSTOM_COL_OPTIONS>,
+-name CHAR(64) <CUSTOM_COL_OPTIONS>,
+-position POINT NOT NULL, SPATIAL INDEX p(position)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE streams (fid INT <CUSTOM_COL_OPTIONS>,
+-name CHAR(64) <CUSTOM_COL_OPTIONS>,
+-centerline LINESTRING NOT NULL, SPATIAL INDEX c(centerline)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE buildings (fid INT <CUSTOM_COL_OPTIONS>,
+-name CHAR(64) <CUSTOM_COL_OPTIONS>,
+-position POINT NOT NULL,
+-footprint POLYGON NOT NULL, SPATIAL INDEX p(position), SPATIAL INDEX f(footprint)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE ponds (fid INT <CUSTOM_COL_OPTIONS>,
+-name CHAR(64) <CUSTOM_COL_OPTIONS>,
+-type CHAR(64) <CUSTOM_COL_OPTIONS>,
+-shores MULTIPOLYGON NOT NULL, SPATIAL INDEX s(shores)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE named_places (fid INT <CUSTOM_COL_OPTIONS>,
+-name CHAR(64) <CUSTOM_COL_OPTIONS>,
+-boundary POLYGON NOT NULL, SPATIAL INDEX b(boundary)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-CREATE TABLE map_neatlines (fid INT <CUSTOM_COL_OPTIONS>,
+-neatline POLYGON NOT NULL, SPATIAL INDEX n(neatline)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+-USE test;
+-SHOW FIELDS FROM gis_point;
+-Field Type Null Key Default Extra
+-fid int(11) YES NULL
+-g point NO MUL NULL
+-SHOW FIELDS FROM gis_line;
+-Field Type Null Key Default Extra
+-fid int(11) YES NULL
+-g linestring NO MUL NULL
+-SHOW FIELDS FROM gis_polygon;
+-Field Type Null Key Default Extra
+-fid int(11) YES NULL
+-g polygon NO MUL NULL
+-SHOW FIELDS FROM gis_multi_point;
+-Field Type Null Key Default Extra
+-fid int(11) YES NULL
+-g multipoint NO MUL NULL
+-SHOW FIELDS FROM gis_multi_line;
+-Field Type Null Key Default Extra
+-fid int(11) YES NULL
+-g multilinestring NO MUL NULL
+-SHOW FIELDS FROM gis_multi_polygon;
+-Field Type Null Key Default Extra
+-fid int(11) YES NULL
+-g multipolygon NO MUL NULL
+-SHOW FIELDS FROM gis_geometrycollection;
+-Field Type Null Key Default Extra
+-fid int(11) YES NULL
+-g geometrycollection NO MUL NULL
+-SHOW FIELDS FROM gis_geometry;
+-Field Type Null Key Default Extra
+-fid int(11) YES NULL
+-g geometry NO NULL
+-INSERT INTO gis_point (fid,g) VALUES
+-(101, PointFromText('POINT(10 10)')),
+-(102, PointFromText('POINT(20 10)')),
+-(103, PointFromText('POINT(20 20)')),
+-(104, PointFromWKB(AsWKB(PointFromText('POINT(10 20)'))));
+-INSERT INTO gis_line (fid,g) VALUES
+-(105, LineFromText('LINESTRING(0 0,0 10,10 0)')),
+-(106, LineStringFromText('LINESTRING(10 10,20 10,20 20,10 20,10 10)')),
+-(107, LineStringFromWKB(AsWKB(LineString(Point(10, 10), Point(40, 10)))));
+-INSERT INTO gis_polygon (fid,g) VALUES
+-(108, PolygonFromText('POLYGON((10 10,20 10,20 20,10 20,10 10))')),
+-(109, PolyFromText('POLYGON((0 0,50 0,50 50,0 50,0 0), (10 10,20 10,20 20,10 20,10 10))')),
+-(110, PolyFromWKB(AsWKB(Polygon(LineString(Point(0, 0), Point(30, 0), Point(30, 30), Point(0, 0))))));
+-INSERT INTO gis_multi_point (fid,g) VALUES
+-(111, MultiPointFromText('MULTIPOINT(0 0,10 10,10 20,20 20)')),
+-(112, MPointFromText('MULTIPOINT(1 1,11 11,11 21,21 21)')),
+-(113, MPointFromWKB(AsWKB(MultiPoint(Point(3, 6), Point(4, 10)))));
+-INSERT INTO gis_multi_line (fid,g) VALUES
+-(114, MultiLineStringFromText('MULTILINESTRING((10 48,10 21,10 0),(16 0,16 23,16 48))')),
+-(115, MLineFromText('MULTILINESTRING((10 48,10 21,10 0))')),
+-(116, MLineFromWKB(AsWKB(MultiLineString(LineString(Point(1, 2), Point(3, 5)), LineString(Point(2, 5), Point(5, 8), Point(21, 7))))));
+-INSERT INTO gis_multi_polygon (fid,g) VALUES
+-(117, MultiPolygonFromText('MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))')),
+-(118, MPolyFromText('MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))')),
+-(119, MPolyFromWKB(AsWKB(MultiPolygon(Polygon(LineString(Point(0, 3), Point(3, 3), Point(3, 0), Point(0, 3)))))));
+-INSERT INTO gis_geometrycollection (fid,g) VALUES
+-(120, GeomCollFromText('GEOMETRYCOLLECTION(POINT(0 0), LINESTRING(0 0,10 10))')),
+-(121, GeometryFromWKB(AsWKB(GeometryCollection(Point(44, 6), LineString(Point(3, 6), Point(7, 9)))))),
+-(122, GeomFromText('GeometryCollection()')),
+-(123, GeomFromText('GeometryCollection EMPTY'));
+-INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_point;
+-INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_line;
+-INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_polygon;
+-INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_multi_point;
+-INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_multi_line;
+-INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_multi_polygon;
+-INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_geometrycollection;
+-SELECT fid, AsText(g) FROM gis_point;
+-fid AsText(g)
+-101 POINT(10 10)
+-102 POINT(20 10)
+-103 POINT(20 20)
+-104 POINT(10 20)
+-SELECT fid, AsText(g) FROM gis_line;
+-fid AsText(g)
+-105 LINESTRING(0 0,0 10,10 0)
+-106 LINESTRING(10 10,20 10,20 20,10 20,10 10)
+-107 LINESTRING(10 10,40 10)
+-SELECT fid, AsText(g) FROM gis_polygon;
+-fid AsText(g)
+-108 POLYGON((10 10,20 10,20 20,10 20,10 10))
+-109 POLYGON((0 0,50 0,50 50,0 50,0 0),(10 10,20 10,20 20,10 20,10 10))
+-110 POLYGON((0 0,30 0,30 30,0 0))
+-SELECT fid, AsText(g) FROM gis_multi_point;
+-fid AsText(g)
+-111 MULTIPOINT(0 0,10 10,10 20,20 20)
+-112 MULTIPOINT(1 1,11 11,11 21,21 21)
+-113 MULTIPOINT(3 6,4 10)
+-SELECT fid, AsText(g) FROM gis_multi_line;
+-fid AsText(g)
+-114 MULTILINESTRING((10 48,10 21,10 0),(16 0,16 23,16 48))
+-115 MULTILINESTRING((10 48,10 21,10 0))
+-116 MULTILINESTRING((1 2,3 5),(2 5,5 8,21 7))
+-SELECT fid, AsText(g) FROM gis_multi_polygon;
+-fid AsText(g)
+-117 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))
+-118 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))
+-119 MULTIPOLYGON(((0 3,3 3,3 0,0 3)))
+-SELECT fid, AsText(g) FROM gis_geometrycollection;
+-fid AsText(g)
+-120 GEOMETRYCOLLECTION(POINT(0 0),LINESTRING(0 0,10 10))
+-121 GEOMETRYCOLLECTION(POINT(44 6),LINESTRING(3 6,7 9))
+-122 GEOMETRYCOLLECTION EMPTY
+-123 GEOMETRYCOLLECTION EMPTY
+-SELECT fid, AsText(g) FROM gis_geometry;
+-fid AsText(g)
+-101 POINT(10 10)
+-102 POINT(20 10)
+-103 POINT(20 20)
+-104 POINT(10 20)
+-105 LINESTRING(0 0,0 10,10 0)
+-106 LINESTRING(10 10,20 10,20 20,10 20,10 10)
+-107 LINESTRING(10 10,40 10)
+-108 POLYGON((10 10,20 10,20 20,10 20,10 10))
+-109 POLYGON((0 0,50 0,50 50,0 50,0 0),(10 10,20 10,20 20,10 20,10 10))
+-110 POLYGON((0 0,30 0,30 30,0 0))
+-111 MULTIPOINT(0 0,10 10,10 20,20 20)
+-112 MULTIPOINT(1 1,11 11,11 21,21 21)
+-113 MULTIPOINT(3 6,4 10)
+-114 MULTILINESTRING((10 48,10 21,10 0),(16 0,16 23,16 48))
+-115 MULTILINESTRING((10 48,10 21,10 0))
+-116 MULTILINESTRING((1 2,3 5),(2 5,5 8,21 7))
+-117 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))
+-118 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))
+-119 MULTIPOLYGON(((0 3,3 3,3 0,0 3)))
+-120 GEOMETRYCOLLECTION(POINT(0 0),LINESTRING(0 0,10 10))
+-121 GEOMETRYCOLLECTION(POINT(44 6),LINESTRING(3 6,7 9))
+-122 GEOMETRYCOLLECTION EMPTY
+-123 GEOMETRYCOLLECTION EMPTY
+-SELECT fid, Dimension(g) FROM gis_geometry;
+-fid Dimension(g)
+-101 0
+-102 0
+-103 0
+-104 0
+-105 1
+-106 1
+-107 1
+-108 2
+-109 2
+-110 2
+-111 0
+-112 0
+-113 0
+-114 1
+-115 1
+-116 1
+-117 2
+-118 2
+-119 2
+-120 1
+-121 1
+-122 0
+-123 0
+-SELECT fid, GeometryType(g) FROM gis_geometry;
+-fid GeometryType(g)
+-101 POINT
+-102 POINT
+-103 POINT
+-104 POINT
+-105 LINESTRING
+-106 LINESTRING
+-107 LINESTRING
+-108 POLYGON
+-109 POLYGON
+-110 POLYGON
+-111 MULTIPOINT
+-112 MULTIPOINT
+-113 MULTIPOINT
+-114 MULTILINESTRING
+-115 MULTILINESTRING
+-116 MULTILINESTRING
+-117 MULTIPOLYGON
+-118 MULTIPOLYGON
+-119 MULTIPOLYGON
+-120 GEOMETRYCOLLECTION
+-121 GEOMETRYCOLLECTION
+-122 GEOMETRYCOLLECTION
+-123 GEOMETRYCOLLECTION
+-SELECT fid, IsEmpty(g) FROM gis_geometry;
+-fid IsEmpty(g)
+-101 0
+-102 0
+-103 0
+-104 0
+-105 0
+-106 0
+-107 0
+-108 0
+-109 0
+-110 0
+-111 0
+-112 0
+-113 0
+-114 0
+-115 0
+-116 0
+-117 0
+-118 0
+-119 0
+-120 0
+-121 0
+-122 0
+-123 0
+-SELECT fid, AsText(Envelope(g)) FROM gis_geometry;
+-fid AsText(Envelope(g))
+-101 POLYGON((10 10,10 10,10 10,10 10,10 10))
+-102 POLYGON((20 10,20 10,20 10,20 10,20 10))
+-103 POLYGON((20 20,20 20,20 20,20 20,20 20))
+-104 POLYGON((10 20,10 20,10 20,10 20,10 20))
+-105 POLYGON((0 0,10 0,10 10,0 10,0 0))
+-106 POLYGON((10 10,20 10,20 20,10 20,10 10))
+-107 POLYGON((10 10,40 10,40 10,10 10,10 10))
+-108 POLYGON((10 10,20 10,20 20,10 20,10 10))
+-109 POLYGON((0 0,50 0,50 50,0 50,0 0))
+-110 POLYGON((0 0,30 0,30 30,0 30,0 0))
+-111 POLYGON((0 0,20 0,20 20,0 20,0 0))
+-112 POLYGON((1 1,21 1,21 21,1 21,1 1))
+-113 POLYGON((3 6,4 6,4 10,3 10,3 6))
+-114 POLYGON((10 0,16 0,16 48,10 48,10 0))
+-115 POLYGON((10 0,10 0,10 48,10 48,10 0))
+-116 POLYGON((1 2,21 2,21 8,1 8,1 2))
+-117 POLYGON((28 0,84 0,84 42,28 42,28 0))
+-118 POLYGON((28 0,84 0,84 42,28 42,28 0))
+-119 POLYGON((0 0,3 0,3 3,0 3,0 0))
+-120 POLYGON((0 0,10 0,10 10,0 10,0 0))
+-121 POLYGON((3 6,44 6,44 9,3 9,3 6))
+-122 GEOMETRYCOLLECTION EMPTY
+-123 GEOMETRYCOLLECTION EMPTY
+-SELECT fid, X(g) FROM gis_point;
+-fid X(g)
+-101 10
+-102 20
+-103 20
+-104 10
+-SELECT fid, Y(g) FROM gis_point;
+-fid Y(g)
+-101 10
+-102 10
+-103 20
+-104 20
+-SELECT fid, AsText(StartPoint(g)) FROM gis_line;
+-fid AsText(StartPoint(g))
+-105 POINT(0 0)
+-106 POINT(10 10)
+-107 POINT(10 10)
+-SELECT fid, AsText(EndPoint(g)) FROM gis_line;
+-fid AsText(EndPoint(g))
+-105 POINT(10 0)
+-106 POINT(10 10)
+-107 POINT(40 10)
+-SELECT fid, GLength(g) FROM gis_line;
+-fid GLength(g)
+-105 24.14213562373095
+-106 40
+-107 30
+-SELECT fid, NumPoints(g) FROM gis_line;
+-fid NumPoints(g)
+-105 3
+-106 5
+-107 2
+-SELECT fid, AsText(PointN(g, 2)) FROM gis_line;
+-fid AsText(PointN(g, 2))
+-105 POINT(0 10)
+-106 POINT(20 10)
+-107 POINT(40 10)
+-SELECT fid, IsClosed(g) FROM gis_line;
+-fid IsClosed(g)
+-105 0
+-106 1
+-107 0
+-SELECT fid, AsText(Centroid(g)) FROM gis_polygon;
+-fid AsText(Centroid(g))
+-108 POINT(15 15)
+-109 POINT(25.416666666666668 25.416666666666668)
+-110 POINT(20 10)
+-SELECT fid, Area(g) FROM gis_polygon;
+-fid Area(g)
+-108 100
+-109 2400
+-110 450
+-SELECT fid, AsText(ExteriorRing(g)) FROM gis_polygon;
+-fid AsText(ExteriorRing(g))
+-108 LINESTRING(10 10,20 10,20 20,10 20,10 10)
+-109 LINESTRING(0 0,50 0,50 50,0 50,0 0)
+-110 LINESTRING(0 0,30 0,30 30,0 0)
+-SELECT fid, NumInteriorRings(g) FROM gis_polygon;
+-fid NumInteriorRings(g)
+-108 0
+-109 1
+-110 0
+-SELECT fid, AsText(InteriorRingN(g, 1)) FROM gis_polygon;
+-fid AsText(InteriorRingN(g, 1))
+-108 NULL
+-109 LINESTRING(10 10,20 10,20 20,10 20,10 10)
+-110 NULL
+-SELECT fid, IsClosed(g) FROM gis_multi_line;
+-fid IsClosed(g)
+-114 0
+-115 0
+-116 0
+-SELECT fid, AsText(Centroid(g)) FROM gis_multi_polygon;
+-fid AsText(Centroid(g))
+-117 POINT(55.58852775304245 17.426536064113982)
+-118 POINT(55.58852775304245 17.426536064113982)
+-119 POINT(2 2)
+-SELECT fid, Area(g) FROM gis_multi_polygon;
+-fid Area(g)
+-117 1684.5
+-118 1684.5
+-119 4.5
+-SELECT fid, NumGeometries(g) from gis_multi_point;
+-fid NumGeometries(g)
+-111 4
+-112 4
+-113 2
+-SELECT fid, NumGeometries(g) from gis_multi_line;
+-fid NumGeometries(g)
+-114 2
+-115 1
+-116 2
+-SELECT fid, NumGeometries(g) from gis_multi_polygon;
+-fid NumGeometries(g)
+-117 2
+-118 2
+-119 1
+-SELECT fid, NumGeometries(g) from gis_geometrycollection;
+-fid NumGeometries(g)
+-120 2
+-121 2
+-122 0
+-123 0
+-SELECT fid, AsText(GeometryN(g, 2)) from gis_multi_point;
+-fid AsText(GeometryN(g, 2))
+-111 POINT(10 10)
+-112 POINT(11 11)
+-113 POINT(4 10)
+-SELECT fid, AsText(GeometryN(g, 2)) from gis_multi_line;
+-fid AsText(GeometryN(g, 2))
+-114 LINESTRING(16 0,16 23,16 48)
+-115 NULL
+-116 LINESTRING(2 5,5 8,21 7)
+-SELECT fid, AsText(GeometryN(g, 2)) from gis_multi_polygon;
+-fid AsText(GeometryN(g, 2))
+-117 POLYGON((59 18,67 18,67 13,59 13,59 18))
+-118 POLYGON((59 18,67 18,67 13,59 13,59 18))
+-119 NULL
+-SELECT fid, AsText(GeometryN(g, 2)) from gis_geometrycollection;
+-fid AsText(GeometryN(g, 2))
+-120 LINESTRING(0 0,10 10)
+-121 LINESTRING(3 6,7 9)
+-122 NULL
+-123 NULL
+-SELECT fid, AsText(GeometryN(g, 1)) from gis_geometrycollection;
+-fid AsText(GeometryN(g, 1))
+-120 POINT(0 0)
+-121 POINT(44 6)
+-122 NULL
+-123 NULL
+-SELECT g1.fid as first, g2.fid as second,
+-Within(g1.g, g2.g) as w, Contains(g1.g, g2.g) as c, Overlaps(g1.g, g2.g) as o,
+-Equals(g1.g, g2.g) as e, Disjoint(g1.g, g2.g) as d, Touches(g1.g, g2.g) as t,
+-Intersects(g1.g, g2.g) as i, Crosses(g1.g, g2.g) as r
+-FROM gis_geometrycollection g1, gis_geometrycollection g2 ORDER BY first, second;
+-first second w c o e d t i r
+-120 120 1 1 0 1 0 1 1 0
+-120 121 0 0 1 0 0 0 1 0
+-120 122 NULL NULL NULL NULL NULL NULL NULL NULL
+-120 123 NULL NULL NULL NULL NULL NULL NULL NULL
+-121 120 0 0 1 0 0 0 1 0
+-121 121 1 1 0 1 0 1 1 0
+-121 122 NULL NULL NULL NULL NULL NULL NULL NULL
+-121 123 NULL NULL NULL NULL NULL NULL NULL NULL
+-122 120 NULL NULL NULL NULL NULL NULL NULL NULL
+-122 121 NULL NULL NULL NULL NULL NULL NULL NULL
+-122 122 NULL NULL NULL NULL NULL NULL NULL NULL
+-122 123 NULL NULL NULL NULL NULL NULL NULL NULL
+-123 120 NULL NULL NULL NULL NULL NULL NULL NULL
+-123 121 NULL NULL NULL NULL NULL NULL NULL NULL
+-123 122 NULL NULL NULL NULL NULL NULL NULL NULL
+-123 123 NULL NULL NULL NULL NULL NULL NULL NULL
+-DROP TABLE gis_point, gis_line, gis_polygon, gis_multi_point, gis_multi_line, gis_multi_polygon, gis_geometrycollection, gis_geometry;
+-USE gis_ogs;
+-# Lakes
+-INSERT INTO lakes (fid,name,shore) VALUES (
+-101, 'BLUE LAKE',
+-PolyFromText(
+-'POLYGON(
+- (52 18,66 23,73 9,48 6,52 18),
+- (59 18,67 18,67 13,59 13,59 18)
+- )',
+-101));
+-# Road Segments
+-INSERT INTO road_segments (fid,name,aliases,num_lanes,centerline) VALUES(102, 'Route 5', NULL, 2,
+-LineFromText(
+-'LINESTRING( 0 18, 10 21, 16 23, 28 26, 44 31 )' ,101));
+-INSERT INTO road_segments (fid,name,aliases,num_lanes,centerline) VALUES(103, 'Route 5', 'Main Street', 4,
+-LineFromText(
+-'LINESTRING( 44 31, 56 34, 70 38 )' ,101));
+-INSERT INTO road_segments (fid,name,aliases,num_lanes,centerline) VALUES(104, 'Route 5', NULL, 2,
+-LineFromText(
+-'LINESTRING( 70 38, 72 48 )' ,101));
+-INSERT INTO road_segments (fid,name,aliases,num_lanes,centerline) VALUES(105, 'Main Street', NULL, 4,
+-LineFromText(
+-'LINESTRING( 70 38, 84 42 )' ,101));
+-INSERT INTO road_segments (fid,name,aliases,num_lanes,centerline) VALUES(106, 'Dirt Road by Green Forest', NULL,
+-1,
+-LineFromText(
+-'LINESTRING( 28 26, 28 0 )',101));
+-# DividedRoutes
+-INSERT INTO divided_routes (fid,name,num_lanes,centerlines) VALUES(119, 'Route 75', 4,
+-MLineFromText(
+-'MULTILINESTRING((10 48,10 21,10 0),
+- (16 0,16 23,16 48))', 101));
+-# Forests
+-INSERT INTO forests (fid,name,boundary) VALUES(109, 'Green Forest',
+-MPolyFromText(
+-'MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),
+- (52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))',
+-101));
+-# Bridges
+-INSERT INTO bridges (fid,name,position) VALUES(110, 'Cam Bridge', PointFromText(
+-'POINT( 44 31 )', 101));
+-# Streams
+-INSERT INTO streams (fid,name,centerline) VALUES(111, 'Cam Stream',
+-LineFromText(
+-'LINESTRING( 38 48, 44 41, 41 36, 44 31, 52 18 )', 101));
+-INSERT INTO streams (fid,name,centerline) VALUES(112, NULL,
+-LineFromText(
+-'LINESTRING( 76 0, 78 4, 73 9 )', 101));
+-# Buildings
+-INSERT INTO buildings (fid,name,position,footprint) VALUES(113, '123 Main Street',
+-PointFromText(
+-'POINT( 52 30 )', 101),
+-PolyFromText(
+-'POLYGON( ( 50 31, 54 31, 54 29, 50 29, 50 31) )', 101));
+-INSERT INTO buildings (fid,name,position,footprint) VALUES(114, '215 Main Street',
+-PointFromText(
+-'POINT( 64 33 )', 101),
+-PolyFromText(
+-'POLYGON( ( 66 34, 62 34, 62 32, 66 32, 66 34) )', 101));
+-# Ponds
+-INSERT INTO ponds (fid,name,type,shores) VALUES(120, NULL, 'Stock Pond',
+-MPolyFromText(
+-'MULTIPOLYGON( ( ( 24 44, 22 42, 24 40, 24 44) ),
+- ( ( 26 44, 26 40, 28 42, 26 44) ) )', 101));
+-# Named Places
+-INSERT INTO named_places (fid,name,boundary) VALUES(117, 'Ashton',
+-PolyFromText(
+-'POLYGON( ( 62 48, 84 48, 84 30, 56 30, 56 34, 62 48) )', 101));
+-INSERT INTO named_places (fid,name,boundary) VALUES(118, 'Goose Island',
+-PolyFromText(
+-'POLYGON( ( 67 13, 67 18, 59 18, 59 13, 67 13) )', 101));
+-# Map Neatlines
+-INSERT INTO map_neatlines (fid,neatline) VALUES(115,
+-PolyFromText(
+-'POLYGON( ( 0 0, 0 48, 84 48, 84 0, 0 0 ) )', 101));
+-SELECT Dimension(shore)
+-FROM lakes
+-WHERE name = 'Blue Lake';
+-Dimension(shore)
+-2
+-SELECT GeometryType(centerlines)
+-FROM divided_routes
+-WHERE name = 'Route 75';
+-GeometryType(centerlines)
+-MULTILINESTRING
+-SELECT AsText(boundary)
+-FROM named_places
+-WHERE name = 'Goose Island';
+-AsText(boundary)
+-POLYGON((67 13,67 18,59 18,59 13,67 13))
+-SELECT AsText(PolyFromWKB(AsBinary(boundary),101))
+-FROM named_places
+-WHERE name = 'Goose Island';
+-AsText(PolyFromWKB(AsBinary(boundary),101))
+-POLYGON((67 13,67 18,59 18,59 13,67 13))
+-SELECT SRID(boundary)
+-FROM named_places
+-WHERE name = 'Goose Island';
+-SRID(boundary)
+-101
+-SELECT IsEmpty(centerline)
+-FROM road_segments
+-WHERE name = 'Route 5'
+-AND aliases = 'Main Street';
+-IsEmpty(centerline)
+-0
+-SELECT AsText(Envelope(boundary))
+-FROM named_places
+-WHERE name = 'Goose Island';
+-AsText(Envelope(boundary))
+-POLYGON((59 13,67 13,67 18,59 18,59 13))
+-SELECT X(position)
+-FROM bridges
+-WHERE name = 'Cam Bridge';
+-X(position)
+-44
+-SELECT Y(position)
+-FROM bridges
+-WHERE name = 'Cam Bridge';
+-Y(position)
+-31
+-SELECT AsText(StartPoint(centerline))
+-FROM road_segments
+-WHERE fid = 102;
+-AsText(StartPoint(centerline))
+-POINT(0 18)
+-SELECT AsText(EndPoint(centerline))
+-FROM road_segments
+-WHERE fid = 102;
+-AsText(EndPoint(centerline))
+-POINT(44 31)
+-SELECT GLength(centerline)
+-FROM road_segments
+-WHERE fid = 106;
+-GLength(centerline)
+-26
+-SELECT NumPoints(centerline)
+-FROM road_segments
+-WHERE fid = 102;
+-NumPoints(centerline)
+-5
+-SELECT AsText(PointN(centerline, 1))
+-FROM road_segments
+-WHERE fid = 102;
+-AsText(PointN(centerline, 1))
+-POINT(0 18)
+-SELECT AsText(Centroid(boundary))
+-FROM named_places
+-WHERE name = 'Goose Island';
+-AsText(Centroid(boundary))
+-POINT(63 15.5)
+-SELECT Area(boundary)
+-FROM named_places
+-WHERE name = 'Goose Island';
+-Area(boundary)
+-40
+-SELECT AsText(ExteriorRing(shore))
+-FROM lakes
+-WHERE name = 'Blue Lake';
+-AsText(ExteriorRing(shore))
+-LINESTRING(52 18,66 23,73 9,48 6,52 18)
+-SELECT NumInteriorRings(shore)
+-FROM lakes
+-WHERE name = 'Blue Lake';
+-NumInteriorRings(shore)
+-1
+-SELECT AsText(InteriorRingN(shore, 1))
+-FROM lakes
+-WHERE name = 'Blue Lake';
+-AsText(InteriorRingN(shore, 1))
+-LINESTRING(59 18,67 18,67 13,59 13,59 18)
+-SELECT NumGeometries(centerlines)
+-FROM divided_routes
+-WHERE name = 'Route 75';
+-NumGeometries(centerlines)
+-2
+-SELECT AsText(GeometryN(centerlines, 2))
+-FROM divided_routes
+-WHERE name = 'Route 75';
+-AsText(GeometryN(centerlines, 2))
+-LINESTRING(16 0,16 23,16 48)
+-SELECT IsClosed(centerlines)
+-FROM divided_routes
+-WHERE name = 'Route 75';
+-IsClosed(centerlines)
+-0
+-SELECT GLength(centerlines)
+-FROM divided_routes
+-WHERE name = 'Route 75';
+-GLength(centerlines)
+-96
+-SELECT AsText(Centroid(shores))
+-FROM ponds
+-WHERE fid = 120;
+-AsText(Centroid(shores))
+-POINT(25 42)
+-SELECT Area(shores)
+-FROM ponds
+-WHERE fid = 120;
+-Area(shores)
+-8
+-SELECT ST_Equals(boundary,
+-PolyFromText('POLYGON( ( 67 13, 67 18, 59 18, 59 13, 67 13) )',1))
+-FROM named_places
+-WHERE name = 'Goose Island';
+-ST_Equals(boundary,
+-PolyFromText('POLYGON( ( 67 13, 67 18, 59 18, 59 13, 67 13) )',1))
+-1
+-SELECT ST_Disjoint(centerlines, boundary)
+-FROM divided_routes, named_places
+-WHERE divided_routes.name = 'Route 75'
+-AND named_places.name = 'Ashton';
+-ST_Disjoint(centerlines, boundary)
+-1
+-SELECT ST_Touches(centerline, shore)
+-FROM streams, lakes
+-WHERE streams.name = 'Cam Stream'
+-AND lakes.name = 'Blue Lake';
+-ST_Touches(centerline, shore)
+-1
+-SELECT Crosses(road_segments.centerline, divided_routes.centerlines)
+-FROM road_segments, divided_routes
+-WHERE road_segments.fid = 102
+-AND divided_routes.name = 'Route 75';
+-Crosses(road_segments.centerline, divided_routes.centerlines)
+-1
+-SELECT ST_Intersects(road_segments.centerline, divided_routes.centerlines)
+-FROM road_segments, divided_routes
+-WHERE road_segments.fid = 102
+-AND divided_routes.name = 'Route 75';
+-ST_Intersects(road_segments.centerline, divided_routes.centerlines)
+-1
+-SELECT ST_Contains(forests.boundary, named_places.boundary)
+-FROM forests, named_places
+-WHERE forests.name = 'Green Forest'
+-AND named_places.name = 'Ashton';
+-ST_Contains(forests.boundary, named_places.boundary)
+-0
+-SELECT ST_Distance(position, boundary)
+-FROM bridges, named_places
+-WHERE bridges.name = 'Cam Bridge'
+-AND named_places.name = 'Ashton';
+-ST_Distance(position, boundary)
+-12
+-SELECT AsText(ST_Difference(named_places.boundary, forests.boundary))
+-FROM named_places, forests
+-WHERE named_places.name = 'Ashton'
+-AND forests.name = 'Green Forest';
+-AsText(ST_Difference(named_places.boundary, forests.boundary))
+-POLYGON((56 34,62 48,84 48,84 42,56 34))
+-SELECT AsText(ST_Union(shore, boundary))
+-FROM lakes, named_places
+-WHERE lakes.name = 'Blue Lake'
+-AND named_places.name = 'Goose Island';
+-AsText(ST_Union(shore, boundary))
+-POLYGON((48 6,52 18,66 23,73 9,48 6))
+-SELECT AsText(ST_SymDifference(shore, boundary))
+-FROM lakes, named_places
+-WHERE lakes.name = 'Blue Lake'
+-AND named_places.name = 'Ashton';
+-AsText(ST_SymDifference(shore, boundary))
+-MULTIPOLYGON(((48 6,52 18,66 23,73 9,48 6),(59 13,59 18,67 18,67 13,59 13)),((56 30,56 34,62 48,84 48,84 30,56 30)))
+-SELECT count(*)
+-FROM buildings, bridges
+-WHERE ST_Contains(ST_Buffer(bridges.position, 15.0), buildings.footprint) = 1;
+-count(*)
+-1
++ERROR HY000: The storage engine <STORAGE_ENGINE> doesn't support SPATIAL indexes
++# ERROR: Statement ended with errno 1464, errname ER_TABLE_CANT_HANDLE_SPKEYS (expected to succeed)
++# ------------ UNEXPECTED RESULT ------------
++# [ CREATE TABLE gis_point (fid INT(11) /*!*/ /*Custom column options*/, g POINT NOT NULL, SPATIAL INDEX(g)) ENGINE=InnoDB /*!*/ /*Custom table options*/ ]
++# The statement|command finished with ER_TABLE_CANT_HANDLE_SPKEYS.
++# Geometry types or spatial indexes or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
++# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
++# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
++# Also, this problem may cause a chain effect (more errors of different kinds in the test).
++# -------------------------------------------
+ DROP DATABASE gis_ogs;
+ USE test;
diff --git a/storage/xtradb/mysql-test/storage_engine/type_text.opt b/storage/xtradb/mysql-test/storage_engine/type_text.opt
new file mode 100644
index 00000000000..40445305fc6
--- /dev/null
+++ b/storage/xtradb/mysql-test/storage_engine/type_text.opt
@@ -0,0 +1 @@
+--innodb_log_file_size=100M
diff --git a/storage/xtradb/os/os0file.cc b/storage/xtradb/os/os0file.cc
index a7a5d5f32c0..92e66ed7901 100644
--- a/storage/xtradb/os/os0file.cc
+++ b/storage/xtradb/os/os0file.cc
@@ -2195,7 +2195,7 @@ os_file_delete_if_exists_func(
bool ret;
ulint count = 0;
loop:
- /* In Windows, deleting an .ibd file may fail if ibbackup is copying
+ /* In Windows, deleting an .ibd file may fail if mysqlbackup is copying
it */
ret = DeleteFile((LPCTSTR) name);
@@ -2220,7 +2220,7 @@ loop:
ib_logf(IB_LOG_LEVEL_WARN, "Delete of file %s failed.", name);
}
- os_thread_sleep(1000000); /* sleep for a second */
+ os_thread_sleep(500000); /* sleep for 0.5 second */
if (count > 2000) {
@@ -2258,7 +2258,7 @@ os_file_delete_func(
BOOL ret;
ulint count = 0;
loop:
- /* In Windows, deleting an .ibd file may fail if ibbackup is copying
+ /* In Windows, deleting an .ibd file may fail if mysqlbackup is copying
it */
ret = DeleteFile((LPCTSTR) name);
@@ -2281,7 +2281,7 @@ loop:
fprintf(stderr,
"InnoDB: Warning: cannot delete file %s\n"
- "InnoDB: Are you running ibbackup"
+ "InnoDB: Are you running mysqlbackup"
" to back up the file?\n", name);
}
@@ -3135,7 +3135,7 @@ try_again:
}
ib_logf(IB_LOG_LEVEL_ERROR,
- "Tried to read "ULINTPF" bytes at offset " UINT64PF". "
+ "Tried to read " ULINTPF " bytes at offset " UINT64PF ". "
"Was only able to read %ld.", n, offset, (lint) ret);
#endif /* __WIN__ */
retry = os_file_handle_error(NULL, "read", __FILE__, __LINE__);
@@ -3308,7 +3308,8 @@ os_file_write_func(
DWORD len;
ulint n_retries = 0;
ulint err;
- OVERLAPPED overlapped;
+ OVERLAPPED overlapped;
+ DWORD saved_error = 0;
/* On 64-bit Windows, ulint is 64 bits. But offset and n should be
no more than 32 bits. */
@@ -3336,7 +3337,7 @@ retry:
if (ret) {
ret = GetOverlappedResult(file, &overlapped, (DWORD *)&len, FALSE);
}
- else if(GetLastError() == ERROR_IO_PENDING) {
+ else if ( GetLastError() == ERROR_IO_PENDING) {
ret = GetOverlappedResult(file, &overlapped, (DWORD *)&len, TRUE);
}
@@ -3364,8 +3365,10 @@ retry:
}
if (!os_has_said_disk_full) {
+ char *winmsg = NULL;
- err = (ulint) GetLastError();
+ saved_error = GetLastError();
+ err = (ulint) saved_error;
ut_print_timestamp(stderr);
@@ -3382,6 +3385,23 @@ retry:
name, offset,
(ulong) n, (ulong) len, (ulong) err);
+ /* Ask Windows to prepare a standard message for a
+ GetLastError() */
+
+ FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER |
+ FORMAT_MESSAGE_FROM_SYSTEM |
+ FORMAT_MESSAGE_IGNORE_INSERTS,
+ NULL, saved_error,
+ MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
+ (LPSTR)&winmsg, 0, NULL);
+
+ if (winmsg) {
+ fprintf(stderr,
+ "InnoDB: FormatMessage: Error number %lu means '%s'.\n",
+ (ulong) saved_error, winmsg);
+ LocalFree(winmsg);
+ }
+
if (strerror((int) err) != NULL) {
fprintf(stderr,
"InnoDB: Error number %lu means '%s'.\n",
@@ -3415,7 +3435,7 @@ retry:
fprintf(stderr,
" InnoDB: Error: Write to file %s failed"
- " at offset "UINT64PF".\n"
+ " at offset " UINT64PF ".\n"
"InnoDB: %lu bytes should have been written,"
" only %ld were written.\n"
"InnoDB: Operating system error number %lu.\n"
@@ -5065,8 +5085,10 @@ os_aio_func(
wake_later = mode & OS_AIO_SIMULATED_WAKE_LATER;
mode = mode & (~OS_AIO_SIMULATED_WAKE_LATER);
- if (mode == OS_AIO_SYNC)
- {
+ DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28",
+ mode = OS_AIO_SYNC;);
+
+ if (mode == OS_AIO_SYNC) {
ibool ret;
/* This is actually an ordinary synchronous read or write:
no need to use an i/o-handler thread */
@@ -5081,7 +5103,18 @@ os_aio_func(
ret = os_file_write(name, file, buf, offset, n);
}
- ut_a(ret);
+
+ DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28",
+ os_has_said_disk_full = FALSE;);
+ DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28",
+ ret = 0;);
+ DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28",
+ errno = 28;);
+
+ if (!ret) {
+ fprintf(stderr, "FAIL");
+ }
+
return ret;
}
@@ -5978,7 +6011,13 @@ consecutive_loop:
aio_slot->page_compression);
}
- ut_a(ret);
+ DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28_2",
+ os_has_said_disk_full = FALSE;);
+ DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28_2",
+ ret = 0;);
+ DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28_2",
+ errno = 28;);
+
srv_set_io_thread_op_info(global_segment, "file i/o done");
if (aio_slot->type == OS_FILE_READ && n_consecutive > 1) {
diff --git a/storage/xtradb/os/os0stacktrace.cc b/storage/xtradb/os/os0stacktrace.cc
index 4d52e625057..c4c428e0db3 100644
--- a/storage/xtradb/os/os0stacktrace.cc
+++ b/storage/xtradb/os/os0stacktrace.cc
@@ -85,16 +85,16 @@ os_stacktrace_print(
caller_address = (void*) uc->uc_mcontext.gregs[REG_RIP] ;
#elif defined(__hppa__)
ucontext_t* uc = (ucontext_t*) ucontext;
- caller_address = (void*) uc->uc_mcontext.sc_iaoq[0] & ~0×3UL ;
+ caller_address = (void*) (uc->uc_mcontext.sc_iaoq[0] & ~0x3UL) ;
#elif (defined (__ppc__)) || (defined (__powerpc__))
ucontext_t* uc = (ucontext_t*) ucontext;
caller_address = (void*) uc->uc_mcontext.regs->nip ;
#elif defined(__sparc__)
struct sigcontext* sc = (struct sigcontext*) ucontext;
#if __WORDSIZE == 64
- caller_address = (void*) scp->sigc_regs.tpc ;
+ caller_address = (void*) sc->sigc_regs.tpc ;
#else
- pnt = (void*) scp->si_regs.pc ;
+ caller_address = (void*) sc->si_regs.pc ;
#endif
#elif defined(__i386__)
ucontext_t* uc = (ucontext_t*) ucontext;
diff --git a/storage/xtradb/page/page0zip.cc b/storage/xtradb/page/page0zip.cc
index 245eb6198a0..a12d30a8063 100644
--- a/storage/xtradb/page/page0zip.cc
+++ b/storage/xtradb/page/page0zip.cc
@@ -2,6 +2,7 @@
Copyright (c) 2005, 2014, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2012, Facebook Inc.
+Copyright (c) 2014, SkySQL Ab. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -1313,6 +1314,30 @@ page_zip_compress(
MONITOR_INC(MONITOR_PAGE_COMPRESS);
+ /* Simulate a compression failure with a probability determined by
+ innodb_simulate_comp_failures, only if the page has 2 or more
+ records. */
+
+ if (srv_simulate_comp_failures
+ && !dict_index_is_ibuf(index)
+ && page_get_n_recs(page) >= 2
+ && ((ulint)(rand() % 100) < srv_simulate_comp_failures)
+ && strcasecmp(index->table_name, "IBUF_DUMMY") != 0) {
+
+#ifdef UNIV_DEBUG
+ fprintf(stderr,
+ "InnoDB: Simulating a compression failure"
+ " for table %s, index %s, page %lu (%s)\n",
+ index->table_name,
+ index->name,
+ page_get_page_no(page),
+ page_is_leaf(page) ? "leaf" : "non-leaf");
+
+#endif
+
+ goto err_exit;
+ }
+
heap = mem_heap_create(page_zip_get_size(page_zip)
+ n_fields * (2 + sizeof(ulint))
+ REC_OFFS_HEADER_SIZE
@@ -3258,24 +3283,8 @@ page_zip_validate_low(
temp_page_buf = static_cast<byte*>(ut_malloc(2 * UNIV_PAGE_SIZE));
temp_page = static_cast<byte*>(ut_align(temp_page_buf, UNIV_PAGE_SIZE));
-#ifdef UNIV_DEBUG_VALGRIND
- /* Get detailed information on the valid bits in case the
- UNIV_MEM_ASSERT_RW() checks fail. The v-bits of page[],
- page_zip->data[] or page_zip could be viewed at temp_page[] or
- temp_page_zip in a debugger when running valgrind --db-attach. */
- (void) VALGRIND_GET_VBITS(page, temp_page, UNIV_PAGE_SIZE);
UNIV_MEM_ASSERT_RW(page, UNIV_PAGE_SIZE);
-# if UNIV_WORD_SIZE == 4
- VALGRIND_GET_VBITS(page_zip, &temp_page_zip, sizeof temp_page_zip);
- /* On 32-bit systems, there is no padding in page_zip_des_t.
- On other systems, Valgrind could complain about uninitialized
- pad bytes. */
- UNIV_MEM_ASSERT_RW(page_zip, sizeof *page_zip);
-# endif
- (void) VALGRIND_GET_VBITS(page_zip->data, temp_page,
- page_zip_get_size(page_zip));
UNIV_MEM_ASSERT_RW(page_zip->data, page_zip_get_size(page_zip));
-#endif /* UNIV_DEBUG_VALGRIND */
temp_page_zip = *page_zip;
valid = page_zip_decompress(&temp_page_zip, temp_page, TRUE);
diff --git a/storage/xtradb/read/read0read.cc b/storage/xtradb/read/read0read.cc
index 887e1717769..c350e24dbb0 100644
--- a/storage/xtradb/read/read0read.cc
+++ b/storage/xtradb/read/read0read.cc
@@ -221,7 +221,7 @@ views contiguously, one identical in size and content as @param view (starting
at returned pointer) and another view immediately following the trx_ids array.
The second view will have space for an extra trx_id_t element.
@return read view struct */
-UNIV_INLINE
+UNIV_INTERN
read_view_t*
read_view_clone(
/*============*/
@@ -256,7 +256,7 @@ read_view_clone(
/*********************************************************************//**
Insert the view in the proper order into the trx_sys->view_list. The
read view list is ordered by read_view_t::low_limit_no in descending order. */
-static
+UNIV_INTERN
void
read_view_add(
/*==========*/
diff --git a/storage/xtradb/row/row0ins.cc b/storage/xtradb/row/row0ins.cc
index 444fac87842..c0396a96cfc 100644
--- a/storage/xtradb/row/row0ins.cc
+++ b/storage/xtradb/row/row0ins.cc
@@ -151,35 +151,37 @@ row_ins_alloc_sys_fields(
ut_ad(row && table && heap);
ut_ad(dtuple_get_n_fields(row) == dict_table_get_n_cols(table));
- /* 1. Allocate buffer for row id */
+ /* allocate buffer to hold the needed system created hidden columns. */
+ uint len = DATA_ROW_ID_LEN + DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN;
+ ptr = static_cast<byte*>(mem_heap_zalloc(heap, len));
+ /* 1. Populate row-id */
col = dict_table_get_sys_col(table, DATA_ROW_ID);
dfield = dtuple_get_nth_field(row, dict_col_get_no(col));
- ptr = static_cast<byte*>(mem_heap_zalloc(heap, DATA_ROW_ID_LEN));
-
dfield_set_data(dfield, ptr, DATA_ROW_ID_LEN);
node->row_id_buf = ptr;
- /* 3. Allocate buffer for trx id */
+ ptr += DATA_ROW_ID_LEN;
+ /* 2. Populate trx id */
col = dict_table_get_sys_col(table, DATA_TRX_ID);
dfield = dtuple_get_nth_field(row, dict_col_get_no(col));
- ptr = static_cast<byte*>(mem_heap_zalloc(heap, DATA_TRX_ID_LEN));
dfield_set_data(dfield, ptr, DATA_TRX_ID_LEN);
node->trx_id_buf = ptr;
- /* 4. Allocate buffer for roll ptr */
+ ptr += DATA_TRX_ID_LEN;
+
+ /* 3. Populate roll ptr */
col = dict_table_get_sys_col(table, DATA_ROLL_PTR);
dfield = dtuple_get_nth_field(row, dict_col_get_no(col));
- ptr = static_cast<byte*>(mem_heap_zalloc(heap, DATA_ROLL_PTR_LEN));
dfield_set_data(dfield, ptr, DATA_ROLL_PTR_LEN);
}
@@ -1743,12 +1745,11 @@ do_possible_lock_wait:
table case (check_ref == 0), since MDL lock will prevent
concurrent DDL and DML on the same table */
if (!check_ref) {
- for (const dict_foreign_t* check_foreign
- = UT_LIST_GET_FIRST( table->referenced_list);
- check_foreign;
- check_foreign = UT_LIST_GET_NEXT(
- referenced_list, check_foreign)) {
- if (check_foreign == foreign) {
+ for (dict_foreign_set::iterator it
+ = table->referenced_set.begin();
+ it != table->referenced_set.end();
+ ++it) {
+ if (*it == foreign) {
verified = true;
break;
}
@@ -1801,12 +1802,15 @@ row_ins_check_foreign_constraints(
trx = thr_get_trx(thr);
- foreign = UT_LIST_GET_FIRST(table->foreign_list);
-
DEBUG_SYNC_C_IF_THD(thr_get_trx(thr)->mysql_thd,
"foreign_constraint_check_for_ins");
- while (foreign) {
+ for (dict_foreign_set::iterator it = table->foreign_set.begin();
+ it != table->foreign_set.end();
+ ++it) {
+
+ foreign = *it;
+
if (foreign->foreign_index == index) {
dict_table_t* ref_table = NULL;
dict_table_t* foreign_table = foreign->foreign_table;
@@ -1862,8 +1866,6 @@ row_ins_check_foreign_constraints(
return(err);
}
}
-
- foreign = UT_LIST_GET_NEXT(foreign_list, foreign);
}
return(DB_SUCCESS);
@@ -2913,7 +2915,7 @@ row_ins_clust_index_entry(
dberr_t err;
ulint n_uniq;
- if (UT_LIST_GET_FIRST(index->table->foreign_list)) {
+ if (!index->table->foreign_set.empty()) {
err = row_ins_check_foreign_constraints(
index->table, index, entry, thr);
if (err != DB_SUCCESS) {
@@ -2971,7 +2973,7 @@ row_ins_sec_index_entry(
mem_heap_t* offsets_heap;
mem_heap_t* heap;
- if (UT_LIST_GET_FIRST(index->table->foreign_list)) {
+ if (!index->table->foreign_set.empty()) {
err = row_ins_check_foreign_constraints(index->table, index,
entry, thr);
if (err != DB_SUCCESS) {
diff --git a/storage/xtradb/row/row0merge.cc b/storage/xtradb/row/row0merge.cc
index 84d6845363c..3dd1ac592fc 100644
--- a/storage/xtradb/row/row0merge.cc
+++ b/storage/xtradb/row/row0merge.cc
@@ -795,7 +795,7 @@ row_merge_read(
if (UNIV_UNLIKELY(!success)) {
ut_print_timestamp(stderr);
fprintf(stderr,
- " InnoDB: failed to read merge block at "UINT64PF"\n",
+ " InnoDB: failed to read merge block at " UINT64PF "\n",
ofs);
}
diff --git a/storage/xtradb/row/row0mysql.cc b/storage/xtradb/row/row0mysql.cc
index 86de2eeb14c..8f270cfbfd0 100644
--- a/storage/xtradb/row/row0mysql.cc
+++ b/storage/xtradb/row/row0mysql.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2000, 2013, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2000, 2014, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -63,6 +63,7 @@ Created 9/17/2000 Heikki Tuuri
#include "m_string.h"
#include "my_sys.h"
#include "ha_prototypes.h"
+#include <algorithm>
/** Provide optional 4.x backwards compatibility for 5.0 and above */
UNIV_INTERN ibool row_rollback_on_timeout = FALSE;
@@ -1359,7 +1360,7 @@ error_exit:
if (doc_id < next_doc_id) {
fprintf(stderr,
"InnoDB: FTS Doc ID must be large than"
- " "UINT64PF" for table",
+ " " UINT64PF " for table",
next_doc_id - 1);
ut_print_name(stderr, trx, TRUE, table->name);
putc('\n', stderr);
@@ -1374,9 +1375,9 @@ error_exit:
if (doc_id - next_doc_id >= FTS_DOC_ID_MAX_STEP) {
fprintf(stderr,
- "InnoDB: Doc ID "UINT64PF" is too"
+ "InnoDB: Doc ID " UINT64PF " is too"
" big. Its difference with largest"
- " used Doc ID "UINT64PF" cannot"
+ " used Doc ID " UINT64PF " cannot"
" exceed or equal to %d\n",
doc_id, next_doc_id - 1,
FTS_DOC_ID_MAX_STEP);
@@ -1577,8 +1578,6 @@ init_fts_doc_id_for_ref(
{
dict_foreign_t* foreign;
- foreign = UT_LIST_GET_FIRST(table->referenced_list);
-
table->fk_max_recusive_level = 0;
(*depth)++;
@@ -1590,17 +1589,25 @@ init_fts_doc_id_for_ref(
/* Loop through this table's referenced list and also
recursively traverse each table's foreign table list */
- while (foreign && foreign->foreign_table) {
- if (foreign->foreign_table->fts) {
- fts_init_doc_id(foreign->foreign_table);
+ for (dict_foreign_set::iterator it = table->referenced_set.begin();
+ it != table->referenced_set.end();
+ ++it) {
+
+ foreign = *it;
+
+ if (foreign->foreign_table == NULL) {
+ break;
}
- if (UT_LIST_GET_LEN(foreign->foreign_table->referenced_list)
- > 0 && foreign->foreign_table != table) {
- init_fts_doc_id_for_ref(foreign->foreign_table, depth);
+ if (foreign->foreign_table->fts != NULL) {
+ fts_init_doc_id(foreign->foreign_table);
}
- foreign = UT_LIST_GET_NEXT(referenced_list, foreign);
+ if (!foreign->foreign_table->referenced_set.empty()
+ && foreign->foreign_table != table) {
+ init_fts_doc_id_for_ref(
+ foreign->foreign_table, depth);
+ }
}
}
@@ -2840,43 +2847,47 @@ row_discard_tablespace_foreign_key_checks(
const trx_t* trx, /*!< in: transaction handle */
const dict_table_t* table) /*!< in: table to be discarded */
{
- const dict_foreign_t* foreign;
+
+ if (srv_read_only_mode || !trx->check_foreigns) {
+ return(DB_SUCCESS);
+ }
/* Check if the table is referenced by foreign key constraints from
some other table (not the table itself) */
+ dict_foreign_set::iterator it
+ = std::find_if(table->referenced_set.begin(),
+ table->referenced_set.end(),
+ dict_foreign_different_tables());
- for (foreign = UT_LIST_GET_FIRST(table->referenced_list);
- foreign && foreign->foreign_table == table;
- foreign = UT_LIST_GET_NEXT(referenced_list, foreign)) {
-
+ if (it == table->referenced_set.end()) {
+ return(DB_SUCCESS);
}
- if (!srv_read_only_mode && foreign && trx->check_foreigns) {
+ const dict_foreign_t* foreign = *it;
+ FILE* ef = dict_foreign_err_file;
- FILE* ef = dict_foreign_err_file;
+ ut_ad(foreign->foreign_table != table);
+ ut_ad(foreign->referenced_table == table);
- /* We only allow discarding a referenced table if
- FOREIGN_KEY_CHECKS is set to 0 */
+ /* We only allow discarding a referenced table if
+ FOREIGN_KEY_CHECKS is set to 0 */
- mutex_enter(&dict_foreign_err_mutex);
+ mutex_enter(&dict_foreign_err_mutex);
- rewind(ef);
+ rewind(ef);
- ut_print_timestamp(ef);
+ ut_print_timestamp(ef);
- fputs(" Cannot DISCARD table ", ef);
- ut_print_name(stderr, trx, TRUE, table->name);
- fputs("\n"
- "because it is referenced by ", ef);
- ut_print_name(stderr, trx, TRUE, foreign->foreign_table_name);
- putc('\n', ef);
+ fputs(" Cannot DISCARD table ", ef);
+ ut_print_name(stderr, trx, TRUE, table->name);
+ fputs("\n"
+ "because it is referenced by ", ef);
+ ut_print_name(stderr, trx, TRUE, foreign->foreign_table_name);
+ putc('\n', ef);
- mutex_exit(&dict_foreign_err_mutex);
-
- return(DB_CANNOT_DROP_CONSTRAINT);
- }
+ mutex_exit(&dict_foreign_err_mutex);
- return(DB_SUCCESS);
+ return(DB_CANNOT_DROP_CONSTRAINT);
}
/*********************************************************************//**
@@ -3179,7 +3190,6 @@ row_truncate_table_for_mysql(
dict_table_t* table, /*!< in: table handle */
trx_t* trx) /*!< in: transaction handle */
{
- dict_foreign_t* foreign;
dberr_t err;
mem_heap_t* heap;
byte* buf;
@@ -3271,18 +3281,17 @@ row_truncate_table_for_mysql(
/* Check if the table is referenced by foreign key constraints from
some other table (not the table itself) */
- for (foreign = UT_LIST_GET_FIRST(table->referenced_list);
- foreign != 0 && foreign->foreign_table == table;
- foreign = UT_LIST_GET_NEXT(referenced_list, foreign)) {
-
- /* Do nothing. */
- }
+ dict_foreign_set::iterator it
+ = std::find_if(table->referenced_set.begin(),
+ table->referenced_set.end(),
+ dict_foreign_different_tables());
if (!srv_read_only_mode
- && foreign
+ && it != table->referenced_set.end()
&& trx->check_foreigns) {
- FILE* ef = dict_foreign_err_file;
+ FILE* ef = dict_foreign_err_file;
+ dict_foreign_t* foreign = *it;
/* We only allow truncating a referenced table if
FOREIGN_KEY_CHECKS is set to 0 */
@@ -3885,42 +3894,45 @@ row_drop_table_for_mysql(
/* Check if the table is referenced by foreign key constraints from
some other table (not the table itself) */
- foreign = UT_LIST_GET_FIRST(table->referenced_list);
+ if (!srv_read_only_mode && trx->check_foreigns) {
- while (foreign && foreign->foreign_table == table) {
-check_next_foreign:
- foreign = UT_LIST_GET_NEXT(referenced_list, foreign);
- }
+ for (dict_foreign_set::iterator it
+ = table->referenced_set.begin();
+ it != table->referenced_set.end();
+ ++it) {
- if (!srv_read_only_mode
- && foreign
- && trx->check_foreigns
- && !(drop_db && dict_tables_have_same_db(
- name, foreign->foreign_table_name_lookup))) {
- FILE* ef = dict_foreign_err_file;
+ foreign = *it;
- /* We only allow dropping a referenced table if
- FOREIGN_KEY_CHECKS is set to 0 */
+ const bool ref_ok = drop_db
+ && dict_tables_have_same_db(
+ name,
+ foreign->foreign_table_name_lookup);
- err = DB_CANNOT_DROP_CONSTRAINT;
+ if (foreign->foreign_table != table && !ref_ok) {
- mutex_enter(&dict_foreign_err_mutex);
- rewind(ef);
- ut_print_timestamp(ef);
+ FILE* ef = dict_foreign_err_file;
- fputs(" Cannot drop table ", ef);
- ut_print_name(ef, trx, TRUE, name);
- fputs("\n"
- "because it is referenced by ", ef);
- ut_print_name(ef, trx, TRUE, foreign->foreign_table_name);
- putc('\n', ef);
- mutex_exit(&dict_foreign_err_mutex);
+ /* We only allow dropping a referenced table
+ if FOREIGN_KEY_CHECKS is set to 0 */
- goto funct_exit;
- }
+ err = DB_CANNOT_DROP_CONSTRAINT;
+
+ mutex_enter(&dict_foreign_err_mutex);
+ rewind(ef);
+ ut_print_timestamp(ef);
- if (foreign && trx->check_foreigns) {
- goto check_next_foreign;
+ fputs(" Cannot drop table ", ef);
+ ut_print_name(ef, trx, TRUE, name);
+ fputs("\n"
+ "because it is referenced by ", ef);
+ ut_print_name(ef, trx, TRUE,
+ foreign->foreign_table_name);
+ putc('\n', ef);
+ mutex_exit(&dict_foreign_err_mutex);
+
+ goto funct_exit;
+ }
+ }
}
/* TODO: could we replace the counter n_foreign_key_checks_running
diff --git a/storage/xtradb/row/row0sel.cc b/storage/xtradb/row/row0sel.cc
index 67107c34204..fd50e2240b5 100644
--- a/storage/xtradb/row/row0sel.cc
+++ b/storage/xtradb/row/row0sel.cc
@@ -878,16 +878,15 @@ row_sel_get_clust_rec(
if (!node->read_view) {
/* Try to place a lock on the index record */
-
- /* If innodb_locks_unsafe_for_binlog option is used
- or this session is using READ COMMITTED isolation level
- we lock only the record, i.e., next-key locking is
- not used. */
ulint lock_type;
trx_t* trx;
trx = thr_get_trx(thr);
+ /* If innodb_locks_unsafe_for_binlog option is used
+ or this session is using READ COMMITTED or lower isolation level
+ we lock only the record, i.e., next-key locking is
+ not used. */
if (srv_locks_unsafe_for_binlog
|| trx->isolation_level <= TRX_ISO_READ_COMMITTED) {
lock_type = LOCK_REC_NOT_GAP;
@@ -1505,12 +1504,6 @@ rec_loop:
search result set, resulting in the phantom problem. */
if (!consistent_read) {
-
- /* If innodb_locks_unsafe_for_binlog option is used
- or this session is using READ COMMITTED isolation
- level, we lock only the record, i.e., next-key
- locking is not used. */
-
rec_t* next_rec = page_rec_get_next(rec);
ulint lock_type;
trx_t* trx;
@@ -1520,6 +1513,10 @@ rec_loop:
offsets = rec_get_offsets(next_rec, index, offsets,
ULINT_UNDEFINED, &heap);
+ /* If innodb_locks_unsafe_for_binlog option is used
+ or this session is using READ COMMITTED or lower isolation
+ level, we lock only the record, i.e., next-key
+ locking is not used. */
if (srv_locks_unsafe_for_binlog
|| trx->isolation_level
<= TRX_ISO_READ_COMMITTED) {
@@ -1568,12 +1565,6 @@ skip_lock:
if (!consistent_read) {
/* Try to place a lock on the index record */
-
- /* If innodb_locks_unsafe_for_binlog option is used
- or this session is using READ COMMITTED isolation level,
- we lock only the record, i.e., next-key locking is
- not used. */
-
ulint lock_type;
trx_t* trx;
@@ -1582,6 +1573,10 @@ skip_lock:
trx = thr_get_trx(thr);
+ /* If innodb_locks_unsafe_for_binlog option is used
+ or this session is using READ COMMITTED or lower isolation level,
+ we lock only the record, i.e., next-key locking is
+ not used. */
if (srv_locks_unsafe_for_binlog
|| trx->isolation_level <= TRX_ISO_READ_COMMITTED) {
@@ -4228,7 +4223,7 @@ rec_loop:
/* Try to place a lock on the index record */
/* If innodb_locks_unsafe_for_binlog option is used
- or this session is using a READ COMMITTED isolation
+ or this session is using a READ COMMITTED or lower isolation
level we do not lock gaps. Supremum record is really
a gap and therefore we do not set locks there. */
@@ -4379,7 +4374,7 @@ wrong_offs:
/* Try to place a gap lock on the index
record only if innodb_locks_unsafe_for_binlog
option is not set or this session is not
- using a READ COMMITTED isolation level. */
+ using a READ COMMITTED or lower isolation level. */
err = sel_set_rec_lock(
btr_pcur_get_block(pcur),
@@ -4428,7 +4423,7 @@ wrong_offs:
/* Try to place a gap lock on the index
record only if innodb_locks_unsafe_for_binlog
option is not set or this session is not
- using a READ COMMITTED isolation level. */
+ using a READ COMMITTED or lower isolation level. */
err = sel_set_rec_lock(
btr_pcur_get_block(pcur),
diff --git a/storage/xtradb/row/row0upd.cc b/storage/xtradb/row/row0upd.cc
index a642f7932b7..ad6f10542cf 100644
--- a/storage/xtradb/row/row0upd.cc
+++ b/storage/xtradb/row/row0upd.cc
@@ -53,6 +53,7 @@ Created 12/27/1996 Heikki Tuuri
#include "pars0sym.h"
#include "eval0eval.h"
#include "buf0lru.h"
+#include <algorithm>
#include <mysql/plugin.h>
#include <mysql/service_wsrep.h>
@@ -140,12 +141,10 @@ row_upd_index_is_referenced(
trx_t* trx) /*!< in: transaction */
{
dict_table_t* table = index->table;
- dict_foreign_t* foreign;
ibool froze_data_dict = FALSE;
ibool is_referenced = FALSE;
- if (!UT_LIST_GET_FIRST(table->referenced_list)) {
-
+ if (table->referenced_set.empty()) {
return(FALSE);
}
@@ -154,19 +153,13 @@ row_upd_index_is_referenced(
froze_data_dict = TRUE;
}
- foreign = UT_LIST_GET_FIRST(table->referenced_list);
+ dict_foreign_set::iterator it
+ = std::find_if(table->referenced_set.begin(),
+ table->referenced_set.end(),
+ dict_foreign_with_index(index));
- while (foreign) {
- if (foreign->referenced_index == index) {
+ is_referenced = (it != table->referenced_set.end());
- is_referenced = TRUE;
- goto func_exit;
- }
-
- foreign = UT_LIST_GET_NEXT(referenced_list, foreign);
- }
-
-func_exit:
if (froze_data_dict) {
row_mysql_unfreeze_data_dictionary(trx);
}
@@ -187,7 +180,7 @@ wsrep_row_upd_index_is_foreign(
ibool froze_data_dict = FALSE;
ibool is_referenced = FALSE;
- if (!UT_LIST_GET_FIRST(table->foreign_list)) {
+ if (table->foreign_set.empty()) {
return(FALSE);
}
@@ -197,16 +190,18 @@ wsrep_row_upd_index_is_foreign(
froze_data_dict = TRUE;
}
- foreign = UT_LIST_GET_FIRST(table->foreign_list);
+ for (dict_foreign_set::iterator it= table->foreign_set.begin();
+ it != table->foreign_set.end();
+ ++ it)
+ {
+ foreign= *it;
- while (foreign) {
if (foreign->foreign_index == index) {
is_referenced = TRUE;
goto func_exit;
}
- foreign = UT_LIST_GET_NEXT(foreign_list, foreign);
}
func_exit:
@@ -248,7 +243,7 @@ row_upd_check_references_constraints(
dberr_t err;
ibool got_s_lock = FALSE;
- if (UT_LIST_GET_FIRST(table->referenced_list) == NULL) {
+ if (table->referenced_set.empty()) {
return(DB_SUCCESS);
}
@@ -275,9 +270,13 @@ row_upd_check_references_constraints(
}
run_again:
- foreign = UT_LIST_GET_FIRST(table->referenced_list);
- while (foreign) {
+ for (dict_foreign_set::iterator it = table->referenced_set.begin();
+ it != table->referenced_set.end();
+ ++it) {
+
+ foreign = *it;
+
/* Note that we may have an update which updates the index
record, but does NOT update the first fields which are
referenced in a foreign key constraint. Then the update does
@@ -330,8 +329,6 @@ run_again:
goto func_exit;
}
}
-
- foreign = UT_LIST_GET_NEXT(referenced_list, foreign);
}
err = DB_SUCCESS;
@@ -368,7 +365,7 @@ wsrep_row_upd_check_foreign_constraints(
ibool got_s_lock = FALSE;
ibool opened = FALSE;
- if (UT_LIST_GET_FIRST(table->foreign_list) == NULL) {
+ if (table->foreign_set.empty()) {
return(DB_SUCCESS);
}
@@ -395,9 +392,13 @@ wsrep_row_upd_check_foreign_constraints(
row_mysql_freeze_data_dictionary(trx);
}
- foreign = UT_LIST_GET_FIRST(table->foreign_list);
+ for (dict_foreign_set::iterator it= table->foreign_set.begin();
+ it != table->foreign_set.end();
+ ++ it)
+ {
+ foreign= *it;
+
- while (foreign) {
/* Note that we may have an update which updates the index
record, but does NOT update the first fields which are
referenced in a foreign key constraint. Then the update does
@@ -448,7 +449,6 @@ wsrep_row_upd_check_foreign_constraints(
}
}
- foreign = UT_LIST_GET_NEXT(foreign_list, foreign);
}
err = DB_SUCCESS;
diff --git a/storage/xtradb/srv/srv0mon.cc b/storage/xtradb/srv/srv0mon.cc
index f276efdc021..5880e03073e 100644
--- a/storage/xtradb/srv/srv0mon.cc
+++ b/storage/xtradb/srv/srv0mon.cc
@@ -41,8 +41,8 @@ Created 12/9/2009 Jimmy Yang
/* Macro to standardize the counter names for counters in the
"monitor_buf_page" module as they have very structured defines */
#define MONITOR_BUF_PAGE(name, description, code, op, op_code) \
- {"buffer_page_"op"_"name, "buffer_page_io", \
- "Number of "description" Pages "op, \
+ {"buffer_page_" op "_" name, "buffer_page_io", \
+ "Number of " description " Pages " op, \
MONITOR_GROUP_MODULE, MONITOR_DEFAULT_START, \
MONITOR_##code##_##op_code}
diff --git a/storage/xtradb/srv/srv0srv.cc b/storage/xtradb/srv/srv0srv.cc
index a12a8b197fb..16df97edd64 100644
--- a/storage/xtradb/srv/srv0srv.cc
+++ b/storage/xtradb/srv/srv0srv.cc
@@ -515,7 +515,12 @@ UNIV_INTERN ulong srv_log_checksum_algorithm =
SRV_CHECKSUM_ALGORITHM_INNODB;
/*-------------------------------------------*/
+#ifdef HAVE_MEMORY_BARRIER
+/* No idea to wait long with memory barriers */
+UNIV_INTERN ulong srv_n_spin_wait_rounds = 15;
+#else
UNIV_INTERN ulong srv_n_spin_wait_rounds = 30;
+#endif
UNIV_INTERN ulong srv_spin_wait_delay = 6;
UNIV_INTERN ibool srv_priority_boost = TRUE;
@@ -671,6 +676,9 @@ current_time % 5 != 0. */
? thd_lock_wait_timeout((trx)->mysql_thd) \
: 0)
+/** Simulate compression failures. */
+UNIV_INTERN uint srv_simulate_comp_failures = 0;
+
/*
IMPLEMENTATION OF THE SERVER MAIN PROGRAM
=========================================
@@ -798,7 +806,9 @@ static const ulint SRV_MASTER_SLOT = 0;
UNIV_INTERN os_event_t srv_checkpoint_completed_event;
-UNIV_INTERN os_event_t srv_redo_log_thread_finished_event;
+UNIV_INTERN os_event_t srv_redo_log_tracked_event;
+
+UNIV_INTERN bool srv_redo_log_thread_started = false;
/*********************************************************************//**
Prints counters for work done by srv_master_thread. */
@@ -1152,7 +1162,10 @@ srv_init(void)
srv_checkpoint_completed_event = os_event_create();
- srv_redo_log_thread_finished_event = os_event_create();
+ if (srv_track_changed_pages) {
+ srv_redo_log_tracked_event = os_event_create();
+ os_event_set(srv_redo_log_tracked_event);
+ }
UT_LIST_INIT(srv_sys->tasks);
}
@@ -2164,9 +2177,10 @@ loop:
/* Try to track a strange bug reported by Harald Fuchs and others,
where the lsn seems to decrease at times */
- new_lsn = log_get_lsn();
+ /* We have to use nowait to ensure we don't block */
+ new_lsn= log_get_lsn_nowait();
- if (new_lsn < old_lsn) {
+ if (new_lsn && new_lsn < old_lsn) {
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: Error: old log sequence number " LSN_PF
@@ -2178,7 +2192,8 @@ loop:
ut_ad(0);
}
- old_lsn = new_lsn;
+ if (new_lsn)
+ old_lsn = new_lsn;
if (difftime(time(NULL), srv_last_monitor_time) > 60) {
/* We referesh InnoDB Monitor values so that averages are
@@ -2391,6 +2406,7 @@ DECLARE_THREAD(srv_redo_log_follow_thread)(
#endif
my_thread_init();
+ srv_redo_log_thread_started = true;
do {
os_event_wait(srv_checkpoint_completed_event);
@@ -2410,13 +2426,15 @@ DECLARE_THREAD(srv_redo_log_follow_thread)(
"stopping log tracking thread!\n");
break;
}
+ os_event_set(srv_redo_log_tracked_event);
}
} while (srv_shutdown_state < SRV_SHUTDOWN_LAST_PHASE);
srv_track_changed_pages = FALSE;
log_online_read_shutdown();
- os_event_set(srv_redo_log_thread_finished_event);
+ os_event_set(srv_redo_log_tracked_event);
+ srv_redo_log_thread_started = false; /* Defensive, not required */
my_thread_end();
os_thread_exit(NULL);
diff --git a/storage/xtradb/srv/srv0start.cc b/storage/xtradb/srv/srv0start.cc
index 86b0764d948..86bc8ce041e 100644
--- a/storage/xtradb/srv/srv0start.cc
+++ b/storage/xtradb/srv/srv0start.cc
@@ -1576,6 +1576,7 @@ innobase_start_or_create_for_mysql(void)
char logfilename[10000];
char* logfile0 = NULL;
size_t dirnamelen;
+ bool sys_datafiles_created = false;
/* This should be initialized early */
ut_init_timer();
@@ -1725,6 +1726,19 @@ innobase_start_or_create_for_mysql(void)
"" IB_ATOMICS_STARTUP_MSG "");
ib_logf(IB_LOG_LEVEL_INFO,
+ "" IB_MEMORY_BARRIER_STARTUP_MSG "");
+
+#ifndef HAVE_MEMORY_BARRIER
+#if defined __i386__ || defined __x86_64__ || defined _M_IX86 || defined _M_X64 || defined __WIN__
+#else
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "MySQL was built without a memory barrier capability on this"
+ " architecture, which might allow a mutex/rw_lock violation"
+ " under high thread concurrency. This may cause a hang.");
+#endif /* IA32 or AMD64 */
+#endif /* HAVE_MEMORY_BARRIER */
+
+ ib_logf(IB_LOG_LEVEL_INFO,
"Compressed tables use zlib " ZLIB_VERSION
#ifdef UNIV_ZIP_DEBUG
" with validation"
@@ -2274,9 +2288,9 @@ innobase_start_or_create_for_mysql(void)
} else if (size != srv_log_file_size) {
ib_logf(IB_LOG_LEVEL_ERROR,
"Log file %s is"
- " of different size "UINT64PF" bytes"
+ " of different size " UINT64PF " bytes"
" than other log"
- " files "UINT64PF" bytes!",
+ " files " UINT64PF " bytes!",
logfilename,
size << UNIV_PAGE_SIZE_SHIFT,
(os_offset_t) srv_log_file_size
@@ -2528,6 +2542,15 @@ files_checked:
dict_check = DICT_CHECK_NONE_LOADED;
}
+ /* Create the SYS_TABLESPACES and SYS_DATAFILES system table */
+ err = dict_create_or_check_sys_tablespace();
+ if (err != DB_SUCCESS) {
+ return(err);
+ }
+
+ sys_datafiles_created = true;
+
+ /* This function assumes that SYS_DATAFILES exists */
dict_check_tablespaces_and_store_max_id(dict_check);
}
@@ -2709,13 +2732,6 @@ files_checked:
srv_undo_logs = ULONG_UNDEFINED;
}
- /* Flush the changes made to TRX_SYS_PAGE by trx_sys_create_rsegs()*/
- if (!srv_force_recovery && !srv_read_only_mode) {
- bool success = buf_flush_list(ULINT_MAX, LSN_MAX, NULL);
- ut_a(success);
- buf_flush_wait_batch_end(NULL, BUF_FLUSH_LIST);
- }
-
if (!srv_read_only_mode) {
/* Create the thread which watches the timeouts
for lock waits */
@@ -2740,10 +2756,13 @@ files_checked:
return(err);
}
- /* Create the SYS_TABLESPACES system table */
- err = dict_create_or_check_sys_tablespace();
- if (err != DB_SUCCESS) {
- return(err);
+ /* Create the SYS_TABLESPACES and SYS_DATAFILES system tables if we
+ have not done that already on crash recovery. */
+ if (sys_datafiles_created == false) {
+ err = dict_create_or_check_sys_tablespace();
+ if (err != DB_SUCCESS) {
+ return(err);
+ }
}
srv_is_being_started = FALSE;
diff --git a/storage/xtradb/sync/sync0arr.cc b/storage/xtradb/sync/sync0arr.cc
index 126cf8de0d5..7ad9fe8d40b 100644
--- a/storage/xtradb/sync/sync0arr.cc
+++ b/storage/xtradb/sync/sync0arr.cc
@@ -2,6 +2,7 @@
Copyright (c) 1995, 2013, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2008, Google Inc.
+Copyright (c) 2013, 2014, SkySQL Ab. All Rights Reserved.
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
@@ -183,6 +184,33 @@ sync_array_get_nth_cell(
}
/******************************************************************//**
+Looks for a cell with the given thread id.
+@return pointer to cell or NULL if not found */
+static
+sync_cell_t*
+sync_array_find_thread(
+/*===================*/
+ sync_array_t* arr, /*!< in: wait array */
+ os_thread_id_t thread) /*!< in: thread id */
+{
+ ulint i;
+ sync_cell_t* cell;
+
+ for (i = 0; i < arr->n_cells; i++) {
+
+ cell = sync_array_get_nth_cell(arr, i);
+
+ if (cell->wait_object != NULL
+ && os_thread_eq(cell->thread, thread)) {
+
+ return(cell); /* Found */
+ }
+ }
+
+ return(NULL); /* Not found */
+}
+
+/******************************************************************//**
Reserves the mutex semaphore protecting a sync array. */
static
void
@@ -441,8 +469,10 @@ static
void
sync_array_cell_print(
/*==================*/
- FILE* file, /*!< in: file where to print */
- sync_cell_t* cell) /*!< in: sync cell */
+ FILE* file, /*!< in: file where to print */
+ sync_cell_t* cell, /*!< in: sync cell */
+ os_thread_id_t* reserver) /*!< out: write reserver or
+ 0 */
{
ib_mutex_t* mutex;
ib_prio_mutex_t* prio_mutex;
@@ -460,16 +490,9 @@ sync_array_cell_print(
innobase_basename(cell->file), (ulong) cell->line,
difftime(time(NULL), cell->reservation_time));
- /* If stacktrace feature is enabled we will send a SIGUSR2
- signal to thread waiting for the semaphore. Signal handler
- will then dump the current stack to error log. */
- if (srv_use_stacktrace) {
-#ifdef __linux__
- pthread_kill(cell->thread, SIGUSR2);
-#endif
- }
if (type == SYNC_MUTEX || type == SYNC_PRIO_MUTEX) {
+
/* We use old_wait_mutex in case the cell has already
been freed meanwhile */
if (type == SYNC_MUTEX) {
@@ -483,18 +506,29 @@ sync_array_cell_print(
}
- fprintf(file,
- "Mutex at %p '%s', lock var %lu\n"
+ if (mutex) {
+ fprintf(file,
+ "Mutex at %p '%s', lock var %lu\n"
#ifdef UNIV_SYNC_DEBUG
- "Last time reserved in file %s line %lu, "
+ "Last time reserved in file %s line %lu, "
#endif /* UNIV_SYNC_DEBUG */
- "waiters flag %lu\n",
- (void*) mutex, mutex->cmutex_name,
- (ulong) mutex->lock_word,
+ "waiters flag %lu\n",
+ (void*) mutex, mutex->cmutex_name,
+ (ulong) mutex->lock_word,
#ifdef UNIV_SYNC_DEBUG
- mutex->file_name, (ulong) mutex->line,
+ mutex->file_name, (ulong) mutex->line,
#endif /* UNIV_SYNC_DEBUG */
- (ulong) mutex->waiters);
+ (ulong) mutex->waiters);
+ }
+
+ /* If stacktrace feature is enabled we will send a SIGUSR2
+ signal to thread waiting for the semaphore. Signal handler
+ will then dump the current stack to error log. */
+ if (srv_use_stacktrace && cell && cell->thread) {
+#ifdef __linux__
+ pthread_kill(cell->thread, SIGUSR2);
+#endif
+ }
if (type == SYNC_PRIO_MUTEX) {
@@ -529,40 +563,47 @@ sync_array_cell_print(
rwlock = &prio_rwlock->base_lock;
}
- fprintf(file,
- " RW-latch at %p '%s'\n",
- (void*) rwlock, rwlock->lock_name);
- writer = rw_lock_get_writer(rwlock);
- if (writer != RW_LOCK_NOT_LOCKED) {
+ if (rwlock) {
fprintf(file,
- "a writer (thread id %lu) has"
- " reserved it in mode %s",
- (ulong) os_thread_pf(rwlock->writer_thread),
- writer == RW_LOCK_EX
- ? " exclusive\n"
- : " wait exclusive\n");
- }
+ " RW-latch at %p '%s'\n",
+ (void*) rwlock, rwlock->lock_name);
- fprintf(file,
- "number of readers %lu, waiters flag %lu, "
- "lock_word: %lx\n"
- "Last time read locked in file %s line %lu\n"
- "Last time write locked in file %s line %lu\n",
- (ulong) rw_lock_get_reader_count(rwlock),
- (ulong) rwlock->waiters,
- rwlock->lock_word,
- innobase_basename(rwlock->last_s_file_name),
- (ulong) rwlock->last_s_line,
- rwlock->last_x_file_name,
- (ulong) rwlock->last_x_line);
+ writer = rw_lock_get_writer(rwlock);
- /* If stacktrace feature is enabled we will send a SIGUSR2
- signal to thread that has locked RW-latch with write mode.
- Signal handler will then dump the current stack to error log. */
- if (writer != RW_LOCK_NOT_LOCKED && srv_use_stacktrace) {
+ if (writer && writer != RW_LOCK_NOT_LOCKED) {
+ fprintf(file,
+ "a writer (thread id %lu) has"
+ " reserved it in mode %s",
+ (ulong) os_thread_pf(rwlock->writer_thread),
+ writer == RW_LOCK_EX
+ ? " exclusive\n"
+ : " wait exclusive\n");
+
+ *reserver = rwlock->writer_thread;
+ }
+
+ fprintf(file,
+ "number of readers %lu, waiters flag %lu, "
+ "lock_word: %lx\n"
+ "Last time read locked in file %s line %lu\n"
+ "Last time write locked in file %s line %lu\n",
+ (ulong) rw_lock_get_reader_count(rwlock),
+ (ulong) rwlock->waiters,
+ rwlock->lock_word,
+ innobase_basename(rwlock->last_s_file_name),
+ (ulong) rwlock->last_s_line,
+ rwlock->last_x_file_name,
+ (ulong) rwlock->last_x_line);
+
+ /* If stacktrace feature is enabled we will send a SIGUSR2
+ signal to thread that has locked RW-latch with write mode.
+ Signal handler will then dump the current stack to error log. */
+ if (writer != RW_LOCK_NOT_LOCKED && srv_use_stacktrace &&
+ rwlock && rwlock->writer_thread) {
#ifdef __linux__
- pthread_kill(rwlock->writer_thread, SIGUSR2);
+ pthread_kill(rwlock->writer_thread, SIGUSR2);
#endif
+ }
}
if (prio_rwlock) {
@@ -584,32 +625,6 @@ sync_array_cell_print(
}
#ifdef UNIV_SYNC_DEBUG
-/******************************************************************//**
-Looks for a cell with the given thread id.
-@return pointer to cell or NULL if not found */
-static
-sync_cell_t*
-sync_array_find_thread(
-/*===================*/
- sync_array_t* arr, /*!< in: wait array */
- os_thread_id_t thread) /*!< in: thread id */
-{
- ulint i;
- sync_cell_t* cell;
-
- for (i = 0; i < arr->n_cells; i++) {
-
- cell = sync_array_get_nth_cell(arr, i);
-
- if (cell->wait_object != NULL
- && os_thread_eq(cell->thread, thread)) {
-
- return(cell); /* Found */
- }
- }
-
- return(NULL); /* Not found */
-}
/******************************************************************//**
Recursion step for deadlock detection.
@@ -671,6 +686,7 @@ sync_array_detect_deadlock(
os_thread_id_t thread;
ibool ret;
rw_lock_debug_t*debug;
+ os_thread_id_t r = 0;
ut_a(arr);
ut_a(start);
@@ -715,7 +731,7 @@ sync_array_detect_deadlock(
"Mutex %p owned by thread %lu file %s line %lu\n",
mutex, (ulong) os_thread_pf(mutex->thread_id),
mutex->file_name, (ulong) mutex->line);
- sync_array_cell_print(stderr, cell);
+ sync_array_cell_print(stderr, cell, &r);
return(TRUE);
}
@@ -754,7 +770,7 @@ sync_array_detect_deadlock(
print:
fprintf(stderr, "rw-lock %p ",
(void*) lock);
- sync_array_cell_print(stderr, cell);
+ sync_array_cell_print(stderr, cell, &r);
rw_lock_debug_print(stderr, debug);
return(TRUE);
}
@@ -823,6 +839,7 @@ sync_arr_cell_can_wake_up(
cell->wait_object))->base_mutex;
}
+ os_rmb;
if (mutex_get_lock_word(mutex) == 0) {
return(TRUE);
@@ -833,6 +850,7 @@ sync_arr_cell_can_wake_up(
lock = static_cast<rw_lock_t*>(cell->wait_object);
+ os_rmb;
if (lock->lock_word > 0) {
/* Either unlocked or only read locked. */
@@ -844,6 +862,7 @@ sync_arr_cell_can_wake_up(
lock = static_cast<rw_lock_t*>(cell->wait_object);
/* lock_word == 0 means all readers have left */
+ os_rmb;
if (lock->lock_word == 0) {
return(TRUE);
@@ -853,6 +872,7 @@ sync_arr_cell_can_wake_up(
lock = static_cast<rw_lock_t*>(cell->wait_object);
/* lock_word > 0 means no writer or reserved writer */
+ os_rmb;
if (lock->lock_word > 0) {
return(TRUE);
@@ -1009,6 +1029,7 @@ sync_array_print_long_waits_low(
double diff;
sync_cell_t* cell;
void* wait_object;
+ os_thread_id_t reserver=0;
cell = sync_array_get_nth_cell(arr, i);
@@ -1024,7 +1045,7 @@ sync_array_print_long_waits_low(
if (diff > SYNC_ARRAY_TIMEOUT) {
fputs("InnoDB: Warning: a long semaphore wait:\n",
stderr);
- sync_array_cell_print(stderr, cell);
+ sync_array_cell_print(stderr, cell, &reserver);
*noticed = TRUE;
}
@@ -1039,6 +1060,57 @@ sync_array_print_long_waits_low(
}
}
+ /* We found a long semaphore wait, wait all threads that are
+ waiting for a semaphore. */
+ if (*noticed) {
+ for (i = 0; i < arr->n_cells; i++) {
+ void* wait_object;
+ sync_cell_t* cell;
+ os_thread_id_t reserver=(os_thread_id_t)ULINT_UNDEFINED;
+ ulint loop=0;
+
+ cell = sync_array_get_nth_cell(arr, i);
+
+ wait_object = cell->wait_object;
+
+ if (wait_object == NULL || !cell->waiting) {
+
+ continue;
+ }
+
+ fputs("InnoDB: Warning: semaphore wait:\n",
+ stderr);
+ sync_array_cell_print(stderr, cell, &reserver);
+
+ /* Try to output cell information for writer recursive way */
+ while (reserver != (os_thread_id_t)ULINT_UNDEFINED) {
+ sync_cell_t* reserver_wait;
+
+ reserver_wait = sync_array_find_thread(arr, reserver);
+
+ if (reserver_wait &&
+ reserver_wait->wait_object != NULL &&
+ reserver_wait->waiting) {
+ fputs("InnoDB: Warning: Writer thread is waiting this semaphore:\n",
+ stderr);
+ sync_array_cell_print(stderr, reserver_wait, &reserver);
+
+ if (reserver_wait->thread == reserver) {
+ reserver = (os_thread_id_t)ULINT_UNDEFINED;
+ }
+ } else {
+ reserver = (os_thread_id_t)ULINT_UNDEFINED;
+ }
+
+ /* This is protection against loop */
+ if (loop > 100) {
+ fputs("InnoDB: Warning: Too many waiting threads.\n", stderr);
+ break;
+ }
+ }
+ }
+ }
+
#undef SYNC_ARRAY_TIMEOUT
return(fatal);
@@ -1125,12 +1197,13 @@ sync_array_print_info_low(
for (i = 0; count < arr->n_reserved; ++i) {
sync_cell_t* cell;
+ os_thread_id_t r = 0;
cell = sync_array_get_nth_cell(arr, i);
if (cell->wait_object != NULL) {
count++;
- sync_array_cell_print(file, cell);
+ sync_array_cell_print(file, cell, &r);
}
}
}
diff --git a/storage/xtradb/sync/sync0rw.cc b/storage/xtradb/sync/sync0rw.cc
index 2ff75b55cf6..7fad78ea577 100644
--- a/storage/xtradb/sync/sync0rw.cc
+++ b/storage/xtradb/sync/sync0rw.cc
@@ -41,6 +41,7 @@ Created 9/11/1995 Heikki Tuuri
#include "srv0srv.h"
#include "os0sync.h" /* for INNODB_RW_LOCKS_USE_ATOMICS */
#include "ha_prototypes.h"
+#include "my_cpu.h"
/*
IMPLEMENTATION OF THE RW_LOCK
@@ -151,18 +152,12 @@ UNIV_INTERN mysql_pfs_key_t rw_lock_mutex_key;
To modify the debug info list of an rw-lock, this mutex has to be
acquired in addition to the mutex protecting the lock. */
-UNIV_INTERN ib_mutex_t rw_lock_debug_mutex;
+UNIV_INTERN os_fast_mutex_t rw_lock_debug_mutex;
# ifdef UNIV_PFS_MUTEX
UNIV_INTERN mysql_pfs_key_t rw_lock_debug_mutex_key;
# endif
-/* If deadlock detection does not get immediately the mutex,
-it may wait for this event */
-UNIV_INTERN os_event_t rw_lock_debug_event;
-/* This is set to TRUE, if there may be waiters for the event */
-UNIV_INTERN ibool rw_lock_debug_waiters;
-
/******************************************************************//**
Creates a debug info struct. */
static
@@ -454,6 +449,8 @@ lock_loop:
lock)) {
/* Spin waiting for the writer field to become free */
+ os_rmb;
+ HMT_low();
while (i < SYNC_SPIN_ROUNDS && lock->lock_word <= 0) {
if (srv_spin_wait_delay) {
ut_delay(ut_rnd_interval(0,
@@ -461,9 +458,11 @@ lock_loop:
}
i++;
+ os_rmb;
}
- if (i == SYNC_SPIN_ROUNDS) {
+ HMT_medium();
+ if (i >= SYNC_SPIN_ROUNDS) {
os_thread_yield();
}
@@ -609,16 +608,26 @@ rw_lock_x_lock_wait(
counter_index = (size_t) os_thread_get_curr_id();
+ os_rmb;
ut_ad(lock->lock_word <= 0);
+ HMT_low();
+ if (high_priority) {
+
+ prio_rw_lock = reinterpret_cast<prio_rw_lock_t *>(lock);
+ prio_rw_lock->high_priority_wait_ex_waiter = 1;
+ }
+
while (lock->lock_word < 0) {
if (srv_spin_wait_delay) {
ut_delay(ut_rnd_interval(0, srv_spin_wait_delay));
}
if(i < SYNC_SPIN_ROUNDS) {
i++;
+ os_rmb;
continue;
}
+ HMT_medium();
/* If there is still a reader, then go to sleep.*/
rw_lock_stats.rw_x_spin_round_count.add(counter_index, i);
@@ -628,13 +637,6 @@ rw_lock_x_lock_wait(
file_name,
line, &index);
- if (high_priority) {
-
- prio_rw_lock
- = reinterpret_cast<prio_rw_lock_t *>(lock);
- prio_rw_lock->high_priority_wait_ex_waiter = 1;
- }
-
i = 0;
/* Check lock_word to ensure wake-up isn't missed.*/
@@ -661,12 +663,16 @@ rw_lock_x_lock_wait(
We must pass the while-loop check to proceed.*/
} else {
sync_array_free_cell(sync_arr, index);
- if (prio_rw_lock) {
-
- prio_rw_lock->high_priority_wait_ex_waiter = 0;
- }
}
+ HMT_low();
}
+ HMT_medium();
+
+ if (prio_rw_lock) {
+
+ prio_rw_lock->high_priority_wait_ex_waiter = 0;
+ }
+
rw_lock_stats.rw_x_spin_round_count.add(counter_index, i);
}
@@ -708,6 +714,10 @@ rw_lock_x_lock_low(
} else {
os_thread_id_t thread_id = os_thread_get_curr_id();
+ if (!pass) {
+ os_rmb;
+ }
+
/* Decrement failed: relock or failed lock */
if (!pass && lock->recursive
&& os_thread_eq(lock->writer_thread, thread_id)) {
@@ -798,6 +808,8 @@ lock_loop:
}
/* Spin waiting for the lock_word to become free */
+ os_rmb;
+ HMT_low();
while (i < SYNC_SPIN_ROUNDS
&& lock->lock_word <= 0) {
if (srv_spin_wait_delay) {
@@ -806,8 +818,10 @@ lock_loop:
}
i++;
+ os_rmb;
}
- if (i == SYNC_SPIN_ROUNDS) {
+ HMT_medium();
+ if (i >= SYNC_SPIN_ROUNDS) {
os_thread_yield();
} else {
goto lock_loop;
@@ -920,22 +934,7 @@ void
rw_lock_debug_mutex_enter(void)
/*===========================*/
{
-loop:
- if (0 == mutex_enter_nowait(&rw_lock_debug_mutex)) {
- return;
- }
-
- os_event_reset(rw_lock_debug_event);
-
- rw_lock_debug_waiters = TRUE;
-
- if (0 == mutex_enter_nowait(&rw_lock_debug_mutex)) {
- return;
- }
-
- os_event_wait(rw_lock_debug_event);
-
- goto loop;
+ os_fast_mutex_lock(&rw_lock_debug_mutex);
}
/******************************************************************//**
@@ -945,12 +944,7 @@ void
rw_lock_debug_mutex_exit(void)
/*==========================*/
{
- mutex_exit(&rw_lock_debug_mutex);
-
- if (rw_lock_debug_waiters) {
- rw_lock_debug_waiters = FALSE;
- os_event_set(rw_lock_debug_event);
- }
+ os_fast_mutex_unlock(&rw_lock_debug_mutex);
}
/******************************************************************//**
diff --git a/storage/xtradb/sync/sync0sync.cc b/storage/xtradb/sync/sync0sync.cc
index 1c5b144eb24..46343a71d28 100644
--- a/storage/xtradb/sync/sync0sync.cc
+++ b/storage/xtradb/sync/sync0sync.cc
@@ -46,6 +46,7 @@ Created 9/5/1995 Heikki Tuuri
# include "srv0start.h" /* srv_is_being_started */
#endif /* UNIV_SYNC_DEBUG */
#include "ha_prototypes.h"
+#include "my_cpu.h"
/*
REASONS FOR IMPLEMENTING THE SPIN LOCK MUTEX
@@ -535,6 +536,8 @@ mutex_set_waiters(
ptr = &(mutex->waiters);
+ os_wmb;
+
*ptr = n; /* Here we assume that the write of a single
word in memory is atomic */
}
@@ -587,15 +590,17 @@ mutex_loop:
spin_loop:
+ HMT_low();
+ os_rmb;
while (mutex_get_lock_word(mutex) != 0 && i < SYNC_SPIN_ROUNDS) {
if (srv_spin_wait_delay) {
ut_delay(ut_rnd_interval(0, srv_spin_wait_delay));
}
-
i++;
}
+ HMT_medium();
- if (i == SYNC_SPIN_ROUNDS) {
+ if (i >= SYNC_SPIN_ROUNDS) {
os_thread_yield();
}
@@ -1599,11 +1604,7 @@ sync_init(void)
SYNC_NO_ORDER_CHECK);
#ifdef UNIV_SYNC_DEBUG
- mutex_create(rw_lock_debug_mutex_key, &rw_lock_debug_mutex,
- SYNC_NO_ORDER_CHECK);
-
- rw_lock_debug_event = os_event_create();
- rw_lock_debug_waiters = FALSE;
+ os_fast_mutex_init(rw_lock_debug_mutex_key, &rw_lock_debug_mutex);
#endif /* UNIV_SYNC_DEBUG */
}
@@ -1677,6 +1678,7 @@ sync_close(void)
sync_order_checks_on = FALSE;
sync_thread_level_arrays_free();
+ os_fast_mutex_free(&rw_lock_debug_mutex);
#endif /* UNIV_SYNC_DEBUG */
sync_initialized = FALSE;
@@ -1691,12 +1693,12 @@ sync_print_wait_info(
FILE* file) /*!< in: file where to print */
{
fprintf(file,
- "Mutex spin waits "UINT64PF", rounds "UINT64PF", "
- "OS waits "UINT64PF"\n"
- "RW-shared spins "UINT64PF", rounds "UINT64PF", "
- "OS waits "UINT64PF"\n"
- "RW-excl spins "UINT64PF", rounds "UINT64PF", "
- "OS waits "UINT64PF"\n",
+ "Mutex spin waits " UINT64PF ", rounds " UINT64PF ", "
+ "OS waits " UINT64PF "\n"
+ "RW-shared spins " UINT64PF ", rounds " UINT64PF ", "
+ "OS waits " UINT64PF "\n"
+ "RW-excl spins " UINT64PF ", rounds " UINT64PF ", "
+ "OS waits " UINT64PF "\n",
(ib_uint64_t) mutex_spin_wait_count,
(ib_uint64_t) mutex_spin_round_count,
(ib_uint64_t) mutex_os_wait_count,
diff --git a/storage/xtradb/trx/trx0i_s.cc b/storage/xtradb/trx/trx0i_s.cc
index f5d4a6c862f..794ee432ca4 100644
--- a/storage/xtradb/trx/trx0i_s.cc
+++ b/storage/xtradb/trx/trx0i_s.cc
@@ -1653,7 +1653,7 @@ trx_i_s_create_lock_id(
} else {
/* table lock */
res_len = ut_snprintf(lock_id, lock_id_size,
- TRX_ID_FMT":"UINT64PF,
+ TRX_ID_FMT":" UINT64PF,
row->lock_trx_id,
row->lock_table_id);
}
diff --git a/storage/xtradb/trx/trx0sys.cc b/storage/xtradb/trx/trx0sys.cc
index 9b59ae14278..32948d6847c 100644
--- a/storage/xtradb/trx/trx0sys.cc
+++ b/storage/xtradb/trx/trx0sys.cc
@@ -1097,7 +1097,7 @@ trx_sys_print_mysql_binlog_offset_from_page(
== TRX_SYS_MYSQL_LOG_MAGIC_N) {
fprintf(stderr,
- "ibbackup: Last MySQL binlog file position %lu %lu,"
+ "mysqlbackup: Last MySQL binlog file position %lu %lu,"
" file name %s\n",
(ulong) mach_read_from_4(
sys_header + TRX_SYS_MYSQL_LOG_INFO
@@ -1148,9 +1148,9 @@ trx_sys_read_file_format_id(
ut_print_timestamp(stderr);
fprintf(stderr,
- " ibbackup: Error: trying to read system tablespace "
- "file format,\n"
- " ibbackup: but could not open the tablespace "
+ " mysqlbackup: Error: trying to read system "
+ "tablespace file format,\n"
+ " mysqlbackup: but could not open the tablespace "
"file %s!\n", pathname);
return(FALSE);
}
@@ -1167,9 +1167,9 @@ trx_sys_read_file_format_id(
ut_print_timestamp(stderr);
fprintf(stderr,
- " ibbackup: Error: trying to read system tablespace "
- "file format,\n"
- " ibbackup: but failed to read the tablespace "
+ " mysqlbackup: Error: trying to read system "
+ "tablespace file format,\n"
+ " mysqlbackup: but failed to read the tablespace "
"file %s!\n", pathname);
os_file_close(file);
@@ -1228,9 +1228,9 @@ trx_sys_read_pertable_file_format_id(
ut_print_timestamp(stderr);
fprintf(stderr,
- " ibbackup: Error: trying to read per-table "
+ " mysqlbackup: Error: trying to read per-table "
"tablespace format,\n"
- " ibbackup: but could not open the tablespace "
+ " mysqlbackup: but could not open the tablespace "
"file %s!\n", pathname);
return(FALSE);
@@ -1247,9 +1247,9 @@ trx_sys_read_pertable_file_format_id(
ut_print_timestamp(stderr);
fprintf(stderr,
- " ibbackup: Error: trying to per-table data file "
+ " mysqlbackup: Error: trying to per-table data file "
"format,\n"
- " ibbackup: but failed to read the tablespace "
+ " mysqlbackup: but failed to read the tablespace "
"file %s!\n", pathname);
os_file_close(file);
diff --git a/storage/xtradb/trx/trx0trx.cc b/storage/xtradb/trx/trx0trx.cc
index 30d4b7f6546..12f680bd7e2 100644
--- a/storage/xtradb/trx/trx0trx.cc
+++ b/storage/xtradb/trx/trx0trx.cc
@@ -53,6 +53,9 @@ Created 3/26/1996 Heikki Tuuri
#include<set>
+extern "C"
+int thd_deadlock_victim_preference(const MYSQL_THD thd1, const MYSQL_THD thd2);
+
/** Set of table_id */
typedef std::set<table_id_t> table_id_set;
@@ -1725,6 +1728,38 @@ trx_assign_read_view(
return(trx->read_view);
}
+/********************************************************************//**
+Clones the read view from another transaction. All consistent reads within
+the receiver transaction will get the same read view as the donor transaction
+@return read view clone */
+UNIV_INTERN
+read_view_t*
+trx_clone_read_view(
+/*================*/
+ trx_t* trx, /*!< in: receiver transaction */
+ trx_t* from_trx) /*!< in: donor transaction */
+{
+ ut_ad(lock_mutex_own());
+ ut_ad(mutex_own(&trx_sys->mutex));
+ ut_ad(trx_mutex_own(from_trx));
+ ut_ad(trx->read_view == NULL);
+
+ if (from_trx->state != TRX_STATE_ACTIVE ||
+ from_trx->read_view == NULL) {
+
+ return(NULL);
+ }
+
+ trx->read_view = read_view_clone(from_trx->read_view,
+ trx->prebuilt_view);
+
+ read_view_add(trx->read_view);
+
+ trx->global_read_view = trx->read_view;
+
+ return(trx->read_view);
+}
+
/****************************************************************//**
Prepares a transaction for commit/rollback. */
UNIV_INTERN
@@ -2071,7 +2106,7 @@ state_ok:
if (trx->undo_no != 0) {
newline = TRUE;
- fprintf(f, ", undo log entries "TRX_ID_FMT, trx->undo_no);
+ fprintf(f, ", undo log entries " TRX_ID_FMT, trx->undo_no);
}
if (newline) {
@@ -2174,9 +2209,8 @@ trx_assert_started(
#endif /* UNIV_DEBUG */
/*******************************************************************//**
-Compares the "weight" (or size) of two transactions. Transactions that
-have edited non-transactional tables are considered heavier than ones
-that have not.
+Compares the "weight" (or size) of two transactions. The heavier the weight,
+the more reluctant we will be to choose the transaction as a deadlock victim.
@return TRUE if weight(a) >= weight(b) */
UNIV_INTERN
ibool
@@ -2185,26 +2219,19 @@ trx_weight_ge(
const trx_t* a, /*!< in: the first transaction to be compared */
const trx_t* b) /*!< in: the second transaction to be compared */
{
- ibool a_notrans_edit;
- ibool b_notrans_edit;
-
- /* If mysql_thd is NULL for a transaction we assume that it has
- not edited non-transactional tables. */
-
- a_notrans_edit = a->mysql_thd != NULL
- && thd_has_edited_nontrans_tables(a->mysql_thd);
-
- b_notrans_edit = b->mysql_thd != NULL
- && thd_has_edited_nontrans_tables(b->mysql_thd);
-
- if (a_notrans_edit != b_notrans_edit) {
+ int pref;
- return(a_notrans_edit);
+ /* First ask the upper server layer if it has any preference for which
+ to prefer as a deadlock victim. */
+ pref= thd_deadlock_victim_preference(a->mysql_thd, b->mysql_thd);
+ if (pref < 0) {
+ return FALSE;
+ } else if (pref > 0) {
+ return TRUE;
}
- /* Either both had edited non-transactional tables or both had
- not, we fall back to comparing the number of altered/locked
- rows. */
+ /* Upper server layer had no preference, we fall back to comparing the
+ number of altered/locked rows. */
#if 0
fprintf(stderr,
@@ -2371,7 +2398,7 @@ trx_recover_for_mysql(
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: Transaction contains changes"
- " to "TRX_ID_FMT" rows\n",
+ " to " TRX_ID_FMT " rows\n",
trx->undo_no);
count++;