summaryrefslogtreecommitdiff
path: root/storage/innobase
diff options
context:
space:
mode:
authorMarko Mäkelä <marko.makela@mariadb.com>2019-11-14 14:49:20 +0200
committerMarko Mäkelä <marko.makela@mariadb.com>2019-11-14 14:49:20 +0200
commitae90f8431ba80383f75810248ddaaa9d2c6fd09f (patch)
tree8f4e6f29d991188b01db6695c3229f6201cc6121 /storage/innobase
parentc454b8964c10301dceab6d1a5489350a0f8fbf9c (diff)
parent89ae01fd0085cf0d1af272eca545e49fdadf4538 (diff)
downloadmariadb-git-ae90f8431ba80383f75810248ddaaa9d2c6fd09f.tar.gz
Merge 10.4 into 10.5
Diffstat (limited to 'storage/innobase')
-rw-r--r--storage/innobase/btr/btr0btr.cc18
-rw-r--r--storage/innobase/btr/btr0bulk.cc2
-rw-r--r--storage/innobase/btr/btr0cur.cc30
-rw-r--r--storage/innobase/btr/btr0defragment.cc2
-rw-r--r--storage/innobase/btr/btr0scrub.cc2
-rw-r--r--storage/innobase/dict/dict0crea.cc5
-rw-r--r--storage/innobase/dict/dict0defrag_bg.cc6
-rw-r--r--storage/innobase/dict/dict0dict.cc198
-rw-r--r--storage/innobase/dict/dict0stats.cc32
-rw-r--r--storage/innobase/fsp/fsp0fsp.cc10
-rw-r--r--storage/innobase/gis/gis0rtree.cc2
-rw-r--r--storage/innobase/gis/gis0sea.cc2
-rw-r--r--storage/innobase/handler/ha_innodb.cc264
-rw-r--r--storage/innobase/handler/ha_innodb.h7
-rw-r--r--storage/innobase/handler/handler0alter.cc18
-rw-r--r--storage/innobase/ibuf/ibuf0ibuf.cc8
-rw-r--r--storage/innobase/include/dict0dict.h6
-rw-r--r--storage/innobase/include/dict0mem.h66
-rw-r--r--storage/innobase/include/mtr0mtr.h89
-rw-r--r--storage/innobase/include/mtr0mtr.ic33
-rw-r--r--storage/innobase/include/page0zip.ic3
-rw-r--r--storage/innobase/mtr/mtr0mtr.cc202
-rw-r--r--storage/innobase/row/row0ins.cc10
-rw-r--r--storage/innobase/row/row0mysql.cc3
-rw-r--r--storage/innobase/row/row0purge.cc24
-rw-r--r--storage/innobase/row/row0uins.cc6
-rw-r--r--storage/innobase/row/row0umod.cc14
-rw-r--r--storage/innobase/row/row0upd.cc4
-rw-r--r--storage/innobase/trx/trx0purge.cc2
-rw-r--r--storage/innobase/trx/trx0rseg.cc2
-rw-r--r--storage/innobase/trx/trx0sys.cc2
31 files changed, 542 insertions, 530 deletions
diff --git a/storage/innobase/btr/btr0btr.cc b/storage/innobase/btr/btr0btr.cc
index 7b871a2414a..748efed022c 100644
--- a/storage/innobase/btr/btr0btr.cc
+++ b/storage/innobase/btr/btr0btr.cc
@@ -4805,13 +4805,13 @@ btr_validate_level(
ulint parent_right_page_no = FIL_NULL;
bool rightmost_child = false;
- mtr_start(&mtr);
+ mtr.start();
if (!srv_read_only_mode) {
if (lockout) {
- mtr_x_lock(dict_index_get_lock(index), &mtr);
+ mtr_x_lock_index(index, &mtr);
} else {
- mtr_sx_lock(dict_index_get_lock(index), &mtr);
+ mtr_sx_lock_index(index, &mtr);
}
}
@@ -4885,9 +4885,9 @@ loop:
offsets = offsets2 = NULL;
if (!srv_read_only_mode) {
if (lockout) {
- mtr_x_lock(dict_index_get_lock(index), &mtr);
+ mtr_x_lock_index(index, &mtr);
} else {
- mtr_sx_lock(dict_index_get_lock(index), &mtr);
+ mtr_sx_lock_index(index, &mtr);
}
}
@@ -5177,13 +5177,13 @@ node_ptr_fails:
/* Commit the mini-transaction to release the latch on 'page'.
Re-acquire the latch on right_page, which will become 'page'
on the next loop. The page has already been checked. */
- mtr_commit(&mtr);
+ mtr.commit();
if (trx_is_interrupted(trx)) {
/* On interrupt, return the current status. */
} else if (right_page_no != FIL_NULL) {
- mtr_start(&mtr);
+ mtr.start();
if (!lockout) {
if (rightmost_child) {
@@ -5235,9 +5235,9 @@ btr_validate_index(
if (!srv_read_only_mode) {
if (lockout) {
- mtr_x_lock(dict_index_get_lock(index), &mtr);
+ mtr_x_lock_index(index, &mtr);
} else {
- mtr_sx_lock(dict_index_get_lock(index), &mtr);
+ mtr_sx_lock_index(index, &mtr);
}
}
diff --git a/storage/innobase/btr/btr0bulk.cc b/storage/innobase/btr/btr0bulk.cc
index 2c36db7cab5..dd691000b79 100644
--- a/storage/innobase/btr/btr0bulk.cc
+++ b/storage/innobase/btr/btr0bulk.cc
@@ -1012,7 +1012,7 @@ BtrBulk::finish(dberr_t err)
mtr.start();
m_index->set_modified(mtr);
- mtr_x_lock(&m_index->lock, &mtr);
+ mtr_x_lock_index(m_index, &mtr);
ut_ad(last_page_no != FIL_NULL);
last_block = btr_block_get(*m_index, last_page_no, RW_X_LATCH,
diff --git a/storage/innobase/btr/btr0cur.cc b/storage/innobase/btr/btr0cur.cc
index 9bf88f66889..59f436605fc 100644
--- a/storage/innobase/btr/btr0cur.cc
+++ b/storage/innobase/btr/btr0cur.cc
@@ -1398,16 +1398,16 @@ btr_cur_search_to_nth_level_func(
if (lock_intention == BTR_INTENTION_DELETE
&& trx_sys.rseg_history_len > BTR_CUR_FINE_HISTORY_LENGTH
&& buf_get_n_pending_read_ios()) {
- mtr_x_lock(dict_index_get_lock(index), mtr);
- } else if (dict_index_is_spatial(index)
+x_latch_index:
+ mtr_x_lock_index(index, mtr);
+ } else if (index->is_spatial()
&& lock_intention <= BTR_INTENTION_BOTH) {
/* X lock the if there is possibility of
pessimistic delete on spatial index. As we could
lock upward for the tree */
-
- mtr_x_lock(dict_index_get_lock(index), mtr);
+ goto x_latch_index;
} else {
- mtr_sx_lock(dict_index_get_lock(index), mtr);
+ mtr_sx_lock_index(index, mtr);
}
upper_rw_latch = RW_X_LATCH;
break;
@@ -1439,10 +1439,10 @@ btr_cur_search_to_nth_level_func(
BTR_ALREADY_S_LATCHED */
ut_ad(latch_mode != BTR_SEARCH_TREE);
- mtr_s_lock(dict_index_get_lock(index), mtr);
+ mtr_s_lock_index(index, mtr);
} else {
/* BTR_MODIFY_EXTERNAL needs to be excluded */
- mtr_sx_lock(dict_index_get_lock(index), mtr);
+ mtr_sx_lock_index(index, mtr);
}
upper_rw_latch = RW_S_LATCH;
} else {
@@ -2526,9 +2526,9 @@ btr_cur_open_at_index_side_func(
if (lock_intention == BTR_INTENTION_DELETE
&& trx_sys.rseg_history_len > BTR_CUR_FINE_HISTORY_LENGTH
&& buf_get_n_pending_read_ios()) {
- mtr_x_lock(dict_index_get_lock(index), mtr);
+ mtr_x_lock_index(index, mtr);
} else {
- mtr_sx_lock(dict_index_get_lock(index), mtr);
+ mtr_sx_lock_index(index, mtr);
}
upper_rw_latch = RW_X_LATCH;
break;
@@ -2544,7 +2544,7 @@ btr_cur_open_at_index_side_func(
BTR_ALREADY_S_LATCHED */
ut_ad(latch_mode != BTR_SEARCH_TREE);
- mtr_s_lock(dict_index_get_lock(index), mtr);
+ mtr_s_lock_index(index, mtr);
}
upper_rw_latch = RW_S_LATCH;
} else {
@@ -2835,7 +2835,7 @@ btr_cur_open_at_rnd_pos_func(
ulint* offsets = offsets_;
rec_offs_init(offsets_);
- ut_ad(!dict_index_is_spatial(index));
+ ut_ad(!index->is_spatial());
lock_intention = btr_cur_get_and_clear_intention(&latch_mode);
@@ -2853,9 +2853,9 @@ btr_cur_open_at_rnd_pos_func(
if (lock_intention == BTR_INTENTION_DELETE
&& trx_sys.rseg_history_len > BTR_CUR_FINE_HISTORY_LENGTH
&& buf_get_n_pending_read_ios()) {
- mtr_x_lock(dict_index_get_lock(index), mtr);
+ mtr_x_lock_index(index, mtr);
} else {
- mtr_sx_lock(dict_index_get_lock(index), mtr);
+ mtr_sx_lock_index(index, mtr);
}
upper_rw_latch = RW_X_LATCH;
break;
@@ -2871,7 +2871,7 @@ btr_cur_open_at_rnd_pos_func(
/* fall through */
default:
if (!srv_read_only_mode) {
- mtr_s_lock(dict_index_get_lock(index), mtr);
+ mtr_s_lock_index(index, mtr);
upper_rw_latch = RW_S_LATCH;
} else {
upper_rw_latch = RW_NO_LATCH;
@@ -5094,7 +5094,7 @@ btr_cur_pessimistic_update(
MTR_MEMO_X_LOCK |
MTR_MEMO_SX_LOCK));
- mtr_sx_lock(dict_index_get_lock(index), mtr);
+ mtr_sx_lock_index(index, mtr);
}
/* Was the record to be updated positioned as the first user
diff --git a/storage/innobase/btr/btr0defragment.cc b/storage/innobase/btr/btr0defragment.cc
index 15e4e6cd70b..fc34a75d11a 100644
--- a/storage/innobase/btr/btr0defragment.cc
+++ b/storage/innobase/btr/btr0defragment.cc
@@ -741,7 +741,7 @@ DECLARE_THREAD(btr_defragment_thread)(void*)
index->set_modified(mtr);
/* To follow the latching order defined in WL#6326, acquire index->lock X-latch.
This entitles us to acquire page latches in any order for the index. */
- mtr_x_lock(&index->lock, &mtr);
+ mtr_x_lock_index(index, &mtr);
/* This will acquire index->lock SX-latch, which per WL#6363 is allowed
when we are already holding the X-latch. */
btr_pcur_restore_position(BTR_MODIFY_TREE, pcur, &mtr);
diff --git a/storage/innobase/btr/btr0scrub.cc b/storage/innobase/btr/btr0scrub.cc
index 63c2271c1e3..9fdb942c1a5 100644
--- a/storage/innobase/btr/btr0scrub.cc
+++ b/storage/innobase/btr/btr0scrub.cc
@@ -738,7 +738,7 @@ btr_scrub_recheck_page(
}
mtr_start(mtr);
- mtr_x_lock(dict_index_get_lock(scrub_data->current_index), mtr);
+ mtr_x_lock_index(scrub_data->current_index, mtr);
/** set savepoint for X-latch of block */
scrub_data->savepoint = mtr_set_savepoint(mtr);
return BTR_SCRUB_PAGE;
diff --git a/storage/innobase/dict/dict0crea.cc b/storage/innobase/dict/dict0crea.cc
index b67ddd533bf..ceb4b8dd483 100644
--- a/storage/innobase/dict/dict0crea.cc
+++ b/storage/innobase/dict/dict0crea.cc
@@ -1246,9 +1246,8 @@ dict_create_index_step(
if (node->state == INDEX_ADD_TO_CACHE) {
ut_ad(node->index->table == node->table);
- err = dict_index_add_to_cache(
- node->index, FIL_NULL, trx_is_strict(trx),
- node->add_v);
+ err = dict_index_add_to_cache(node->index, FIL_NULL,
+ node->add_v);
ut_ad((node->index == NULL) == (err != DB_SUCCESS));
diff --git a/storage/innobase/dict/dict0defrag_bg.cc b/storage/innobase/dict/dict0defrag_bg.cc
index 7de50f19217..7e61e298ac6 100644
--- a/storage/innobase/dict/dict0defrag_bg.cc
+++ b/storage/innobase/dict/dict0defrag_bg.cc
@@ -279,11 +279,11 @@ dict_stats_save_defrag_stats(
mtr_t mtr;
ulint n_leaf_pages;
ulint n_leaf_reserved;
- mtr_start(&mtr);
- mtr_s_lock(dict_index_get_lock(index), &mtr);
+ mtr.start();
+ mtr_s_lock_index(index, &mtr);
n_leaf_reserved = btr_get_size_and_reserved(index, BTR_N_LEAF_PAGES,
&n_leaf_pages, &mtr);
- mtr_commit(&mtr);
+ mtr.commit();
if (n_leaf_reserved == ULINT_UNDEFINED) {
// The index name is different during fast index creation,
diff --git a/storage/innobase/dict/dict0dict.cc b/storage/innobase/dict/dict0dict.cc
index 7cd4bb4a401..862698d75cc 100644
--- a/storage/innobase/dict/dict0dict.cc
+++ b/storage/innobase/dict/dict0dict.cc
@@ -45,11 +45,6 @@ dict_index_t* dict_ind_redundant;
extern uint ibuf_debug;
#endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */
-/**********************************************************************
-Issue a warning that the row is too big. */
-void
-ib_warn_row_too_big(const dict_table_t* table);
-
#include "btr0btr.h"
#include "btr0cur.h"
#include "btr0sea.h"
@@ -1759,194 +1754,17 @@ dict_col_name_is_reserved(
return(FALSE);
}
-bool dict_index_t::rec_potentially_too_big(bool strict) const
-{
- ut_ad(table);
-
- ulint comp;
- ulint i;
- /* maximum possible storage size of a record */
- ulint rec_max_size;
- /* maximum allowed size of a record on a leaf page */
- ulint page_rec_max;
- /* maximum allowed size of a node pointer record */
- ulint page_ptr_max;
-
- /* FTS index consists of auxiliary tables, they shall be excluded from
- index row size check */
- if (type & DICT_FTS) {
- return false;
- }
-
- DBUG_EXECUTE_IF(
- "ib_force_create_table",
- return(FALSE););
-
- comp = dict_table_is_comp(table);
-
- const ulint zip_size = dict_tf_get_zip_size(table->flags);
-
- if (zip_size && zip_size < srv_page_size) {
- /* On a compressed page, two records must fit in the
- uncompressed page modification log. On compressed pages
- with size.physical() == srv_page_size,
- this limit will never be reached. */
- ut_ad(comp);
- /* The maximum allowed record size is the size of
- an empty page, minus a byte for recoding the heap
- number in the page modification log. The maximum
- allowed node pointer size is half that. */
- page_rec_max = page_zip_empty_size(n_fields, zip_size);
- if (page_rec_max) {
- page_rec_max--;
- }
- page_ptr_max = page_rec_max / 2;
- /* On a compressed page, there is a two-byte entry in
- the dense page directory for every record. But there
- is no record header. */
- rec_max_size = 2;
- } else {
- /* The maximum allowed record size is half a B-tree
- page(16k for 64k page size). No additional sparse
- page directory entry will be generated for the first
- few user records. */
- page_rec_max = (comp || srv_page_size < UNIV_PAGE_SIZE_MAX)
- ? page_get_free_space_of_empty(comp) / 2
- : REDUNDANT_REC_MAX_DATA_SIZE;
-
- page_ptr_max = page_rec_max;
- /* Each record has a header. */
- rec_max_size = comp
- ? REC_N_NEW_EXTRA_BYTES
- : REC_N_OLD_EXTRA_BYTES;
- }
-
- if (comp) {
- /* Include the "null" flags in the
- maximum possible record size. */
- rec_max_size += UT_BITS_IN_BYTES(unsigned(n_nullable));
- } else {
- /* For each column, include a 2-byte offset and a
- "null" flag. The 1-byte format is only used in short
- records that do not contain externally stored columns.
- Such records could never exceed the page limit, even
- when using the 2-byte format. */
- rec_max_size += 2 * unsigned(n_fields);
- }
-
- const ulint max_local_len = table->get_overflow_field_local_len();
-
- /* Compute the maximum possible record size. */
- for (i = 0; i < n_fields; i++) {
- const dict_field_t* field
- = dict_index_get_nth_field(this, i);
- const dict_col_t* col
- = dict_field_get_col(field);
-
- /* In dtuple_convert_big_rec(), variable-length columns
- that are longer than BTR_EXTERN_LOCAL_STORED_MAX_SIZE
- may be chosen for external storage.
-
- Fixed-length columns, and all columns of secondary
- index records are always stored inline. */
-
- /* Determine the maximum length of the index field.
- The field_ext_max_size should be computed as the worst
- case in rec_get_converted_size_comp() for
- REC_STATUS_ORDINARY records. */
-
- size_t field_max_size = dict_col_get_fixed_size(col, comp);
- if (field_max_size && field->fixed_len != 0) {
- /* dict_index_add_col() should guarantee this */
- ut_ad(!field->prefix_len
- || field->fixed_len == field->prefix_len);
- /* Fixed lengths are not encoded
- in ROW_FORMAT=COMPACT. */
- goto add_field_size;
- }
-
- field_max_size = dict_col_get_max_size(col);
-
- if (field->prefix_len) {
- if (field->prefix_len < field_max_size) {
- field_max_size = field->prefix_len;
- }
-
- // those conditions were copied from dtuple_convert_big_rec()
- } else if (field_max_size > max_local_len
- && field_max_size > BTR_EXTERN_LOCAL_STORED_MAX_SIZE
- && DATA_BIG_COL(col)
- && dict_index_is_clust(this)) {
-
- /* In the worst case, we have a locally stored
- column of BTR_EXTERN_LOCAL_STORED_MAX_SIZE bytes.
- The length can be stored in one byte. If the
- column were stored externally, the lengths in
- the clustered index page would be
- BTR_EXTERN_FIELD_REF_SIZE and 2. */
- field_max_size = max_local_len;
- }
-
- if (comp) {
- /* Add the extra size for ROW_FORMAT=COMPACT.
- For ROW_FORMAT=REDUNDANT, these bytes were
- added to rec_max_size before this loop. */
- rec_max_size += field_max_size < 256 ? 1 : 2;
- }
-add_field_size:
- rec_max_size += field_max_size;
-
- /* Check the size limit on leaf pages. */
- if (rec_max_size >= page_rec_max) {
- // with 4k page size innodb_index_stats becomes too big
- // this crutch allows server bootstrapping to continue
- if (table->is_system_db) {
- return false;
- }
-
- ib::error_or_warn(strict)
- << "Cannot add field " << field->name
- << " in table " << table->name
- << " because after adding it, the row size is "
- << rec_max_size
- << " which is greater than maximum allowed"
- " size (" << page_rec_max
- << ") for a record on index leaf page.";
-
- return true;
- }
-
- /* Check the size limit on non-leaf pages. Records
- stored in non-leaf B-tree pages consist of the unique
- columns of the record (the key columns of the B-tree)
- and a node pointer field. When we have processed the
- unique columns, rec_max_size equals the size of the
- node pointer record minus the node pointer column. */
- if (i + 1 == dict_index_get_n_unique_in_tree(this)
- && rec_max_size + REC_NODE_PTR_SIZE >= page_ptr_max) {
-
- return true;
- }
- }
-
- return false;
-}
-
/** Adds an index to the dictionary cache, with possible indexing newly
added column.
@param[in,out] index index; NOTE! The index memory
object is freed in this function!
@param[in] page_no root page number of the index
-@param[in] strict true=refuse to create the index
- if records could be too big to fit in
- an B-tree page
@param[in] add_v virtual columns being added along with ADD INDEX
-@return DB_SUCCESS, DB_TOO_BIG_RECORD, or DB_CORRUPTION */
+@return DB_SUCCESS, or DB_CORRUPTION */
dberr_t
dict_index_add_to_cache(
dict_index_t*& index,
ulint page_no,
- bool strict,
const dict_add_v_col_t* add_v)
{
dict_index_t* new_index;
@@ -1995,20 +1813,6 @@ dict_index_add_to_cache(
new_index->disable_ahi = index->disable_ahi;
#endif
- if (new_index->rec_potentially_too_big(strict)) {
-
- if (strict) {
- dict_mem_index_free(new_index);
- dict_mem_index_free(index);
- index = NULL;
- return DB_TOO_BIG_RECORD;
- } else if (current_thd != NULL) {
- /* Avoid the warning to be printed
- during recovery. */
- ib_warn_row_too_big(index->table);
- }
- }
-
n_ord = new_index->n_uniq;
/* Flag the ordering columns and also set column max_prefix */
diff --git a/storage/innobase/dict/dict0stats.cc b/storage/innobase/dict/dict0stats.cc
index ad2698a1be9..a85b02aa844 100644
--- a/storage/innobase/dict/dict0stats.cc
+++ b/storage/innobase/dict/dict0stats.cc
@@ -850,10 +850,8 @@ dict_stats_update_transient_for_index(
mtr_t mtr;
ulint size;
- mtr_start(&mtr);
-
- mtr_s_lock(dict_index_get_lock(index), &mtr);
-
+ mtr.start();
+ mtr_s_lock_index(index, &mtr);
size = btr_get_size(index, BTR_TOTAL_SIZE, &mtr);
if (size != ULINT_UNDEFINED) {
@@ -863,7 +861,7 @@ dict_stats_update_transient_for_index(
index, BTR_N_LEAF_PAGES, &mtr);
}
- mtr_commit(&mtr);
+ mtr.commit();
switch (size) {
case ULINT_UNDEFINED:
@@ -1928,10 +1926,8 @@ dict_stats_analyze_index(
dict_stats_empty_index(index, false);
- mtr_start(&mtr);
-
- mtr_s_lock(dict_index_get_lock(index), &mtr);
-
+ mtr.start();
+ mtr_s_lock_index(index, &mtr);
size = btr_get_size(index, BTR_TOTAL_SIZE, &mtr);
if (size != ULINT_UNDEFINED) {
@@ -1940,7 +1936,7 @@ dict_stats_analyze_index(
}
/* Release the X locks on the root page taken by btr_get_size() */
- mtr_commit(&mtr);
+ mtr.commit();
switch (size) {
case ULINT_UNDEFINED:
@@ -1953,10 +1949,8 @@ dict_stats_analyze_index(
index->stat_n_leaf_pages = size;
- mtr_start(&mtr);
-
- mtr_sx_lock(dict_index_get_lock(index), &mtr);
-
+ mtr.start();
+ mtr_sx_lock_index(index, &mtr);
root_level = btr_height_get(index, &mtr);
n_uniq = dict_index_get_n_unique(index);
@@ -1996,7 +1990,7 @@ dict_stats_analyze_index(
index->stat_n_sample_sizes[i] = total_pages;
}
- mtr_commit(&mtr);
+ mtr.commit();
dict_stats_assert_initialized_index(index);
DBUG_VOID_RETURN;
@@ -2042,9 +2036,9 @@ dict_stats_analyze_index(
/* Commit the mtr to release the tree S lock to allow
other threads to do some work too. */
- mtr_commit(&mtr);
- mtr_start(&mtr);
- mtr_sx_lock(dict_index_get_lock(index), &mtr);
+ mtr.commit();
+ mtr.start();
+ mtr_sx_lock_index(index, &mtr);
if (root_level != btr_height_get(index, &mtr)) {
/* Just quit if the tree has changed beyond
recognition here. The old stats from previous
@@ -2182,7 +2176,7 @@ found_level:
data, &mtr);
}
- mtr_commit(&mtr);
+ mtr.commit();
UT_DELETE_ARRAY(n_diff_boundaries);
diff --git a/storage/innobase/fsp/fsp0fsp.cc b/storage/innobase/fsp/fsp0fsp.cc
index 039da3eb2bd..691f86c11cc 100644
--- a/storage/innobase/fsp/fsp0fsp.cc
+++ b/storage/innobase/fsp/fsp0fsp.cc
@@ -542,7 +542,7 @@ void fsp_header_init(fil_space_t* space, ulint size, mtr_t* mtr)
const page_id_t page_id(space->id, 0);
const ulint zip_size = space->zip_size();
- mtr_x_lock(&space->latch, mtr);
+ mtr_x_lock_space(space, mtr);
buf_block_t* block = buf_page_create(page_id, zip_size, mtr);
buf_page_get(page_id, zip_size, RW_SX_LATCH, mtr);
buf_block_dbg_add_level(block, SYNC_FSP_PAGE);
@@ -1769,7 +1769,7 @@ fseg_create(
ut_ad(byte_offset + FSEG_HEADER_SIZE
<= srv_page_size - FIL_PAGE_DATA_END);
- mtr_x_lock(&space->latch, mtr);
+ mtr_x_lock_space(space, mtr);
ut_d(space->modify_check(*mtr));
if (page != 0) {
@@ -2452,7 +2452,7 @@ fsp_reserve_free_extents(
ut_ad(mtr);
*n_reserved = n_ext;
- mtr_x_lock(&space->latch, mtr);
+ mtr_x_lock_space(space, mtr);
const ulint physical_size = space->physical_size();
space_header = fsp_get_space_header(space, mtr);
@@ -2735,7 +2735,7 @@ fseg_free_page_func(
DBUG_ENTER("fseg_free_page");
fseg_inode_t* seg_inode;
buf_block_t* iblock;
- mtr_x_lock(&space->latch, mtr);
+ mtr_x_lock_space(space, mtr);
DBUG_LOG("fseg_free_page", "space_id: " << space->id
<< ", page_no: " << offset);
@@ -2765,7 +2765,7 @@ fseg_page_is_free(fil_space_t* space, unsigned page)
page);
mtr.start();
- mtr_s_lock(&space->latch, &mtr);
+ mtr_s_lock_space(space, &mtr);
if (page >= space->free_limit || page >= space->size_in_header) {
is_free = true;
diff --git a/storage/innobase/gis/gis0rtree.cc b/storage/innobase/gis/gis0rtree.cc
index 1c1c2dd12f9..db4788b8920 100644
--- a/storage/innobase/gis/gis0rtree.cc
+++ b/storage/innobase/gis/gis0rtree.cc
@@ -1844,7 +1844,7 @@ rtr_estimate_n_rows_in_range(
mtr.start();
index->set_modified(mtr);
- mtr_s_lock(&index->lock, &mtr);
+ mtr_s_lock_index(index, &mtr);
buf_block_t* block = btr_root_block_get(index, RW_S_LATCH, &mtr);
if (!block) {
diff --git a/storage/innobase/gis/gis0sea.cc b/storage/innobase/gis/gis0sea.cc
index 4b20fc8e114..cd46f9f1c2a 100644
--- a/storage/innobase/gis/gis0sea.cc
+++ b/storage/innobase/gis/gis0sea.cc
@@ -137,7 +137,7 @@ rtr_pcur_getnext_from_path(
if (!index_locked) {
ut_ad(latch_mode & BTR_SEARCH_LEAF
|| latch_mode & BTR_MODIFY_LEAF);
- mtr_s_lock(dict_index_get_lock(index), mtr);
+ mtr_s_lock_index(index, mtr);
} else {
ut_ad(mtr_memo_contains_flagged(mtr, &index->lock,
MTR_MEMO_SX_LOCK
diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc
index 90d2d904c0f..da317ca23e5 100644
--- a/storage/innobase/handler/ha_innodb.cc
+++ b/storage/innobase/handler/ha_innodb.cc
@@ -5301,7 +5301,7 @@ normalize_table_name_c_low(
create_table_info_t::create_table_info_t(
THD* thd,
- TABLE* form,
+ const TABLE* form,
HA_CREATE_INFO* create_info,
char* table_name,
char* remote_path,
@@ -12342,9 +12342,246 @@ int create_table_info_t::create_table(bool create_fk)
}
}
+ if (!row_size_is_acceptable(*m_table)) {
+ DBUG_RETURN(convert_error_code_to_mysql(
+ DB_TOO_BIG_RECORD, m_flags, NULL));
+ }
+
DBUG_RETURN(0);
}
+bool create_table_info_t::row_size_is_acceptable(
+ const dict_table_t &table) const
+{
+ for (dict_index_t *index= dict_table_get_first_index(&table); index;
+ index= dict_table_get_next_index(index))
+ {
+
+ if (!row_size_is_acceptable(*index))
+ {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/* FIXME: row size check has some flaws and should be improved */
+dict_index_t::record_size_info_t dict_index_t::record_size_info() const
+{
+ ut_ad(!(type & DICT_FTS));
+
+ /* maximum allowed size of a node pointer record */
+ ulint page_ptr_max;
+ const bool comp= table->not_redundant();
+ /* table->space == NULL after DISCARD TABLESPACE */
+ const ulint zip_size= dict_tf_get_zip_size(table->flags);
+ record_size_info_t result;
+
+ if (zip_size && zip_size < srv_page_size)
+ {
+ /* On a ROW_FORMAT=COMPRESSED page, two records must fit in the
+ uncompressed page modification log. On compressed pages
+ with size.physical() == univ_page_size.physical(),
+ this limit will never be reached. */
+ ut_ad(comp);
+ /* The maximum allowed record size is the size of
+ an empty page, minus a byte for recoding the heap
+ number in the page modification log. The maximum
+ allowed node pointer size is half that. */
+ result.max_leaf_size= page_zip_empty_size(n_fields, zip_size);
+ if (result.max_leaf_size)
+ {
+ result.max_leaf_size--;
+ }
+ page_ptr_max= result.max_leaf_size / 2;
+ /* On a compressed page, there is a two-byte entry in
+ the dense page directory for every record. But there
+ is no record header. */
+ result.shortest_size= 2;
+ }
+ else
+ {
+ /* The maximum allowed record size is half a B-tree
+ page(16k for 64k page size). No additional sparse
+ page directory entry will be generated for the first
+ few user records. */
+ result.max_leaf_size= (comp || srv_page_size < UNIV_PAGE_SIZE_MAX)
+ ? page_get_free_space_of_empty(comp) / 2
+ : REDUNDANT_REC_MAX_DATA_SIZE;
+
+ page_ptr_max= result.max_leaf_size;
+ /* Each record has a header. */
+ result.shortest_size= comp ? REC_N_NEW_EXTRA_BYTES : REC_N_OLD_EXTRA_BYTES;
+ }
+
+ if (comp)
+ {
+ /* Include the "null" flags in the
+ maximum possible record size. */
+ result.shortest_size+= UT_BITS_IN_BYTES(n_nullable);
+ }
+ else
+ {
+ /* For each column, include a 2-byte offset and a
+ "null" flag. The 1-byte format is only used in short
+ records that do not contain externally stored columns.
+ Such records could never exceed the page limit, even
+ when using the 2-byte format. */
+ result.shortest_size+= 2 * n_fields;
+ }
+
+ const ulint max_local_len= table->get_overflow_field_local_len();
+
+ /* Compute the maximum possible record size. */
+ for (unsigned i= 0; i < n_fields; i++)
+ {
+ const dict_field_t &f= fields[i];
+ const dict_col_t &col= *f.col;
+
+ /* In dtuple_convert_big_rec(), variable-length columns
+ that are longer than BTR_EXTERN_LOCAL_STORED_MAX_SIZE
+ may be chosen for external storage.
+
+ Fixed-length columns, and all columns of secondary
+ index records are always stored inline. */
+
+ /* Determine the maximum length of the index field.
+ The field_ext_max_size should be computed as the worst
+ case in rec_get_converted_size_comp() for
+ REC_STATUS_ORDINARY records. */
+
+ size_t field_max_size= dict_col_get_fixed_size(&col, comp);
+ if (field_max_size && f.fixed_len != 0)
+ {
+ /* dict_index_add_col() should guarantee this */
+ ut_ad(!f.prefix_len || f.fixed_len == f.prefix_len);
+ /* Fixed lengths are not encoded
+ in ROW_FORMAT=COMPACT. */
+ goto add_field_size;
+ }
+
+ field_max_size= dict_col_get_max_size(&col);
+
+ if (f.prefix_len)
+ {
+ if (f.prefix_len < field_max_size)
+ {
+ field_max_size= f.prefix_len;
+ }
+
+ /* those conditions were copied from dtuple_convert_big_rec()*/
+ }
+ else if (field_max_size > max_local_len &&
+ field_max_size > BTR_EXTERN_LOCAL_STORED_MAX_SIZE &&
+ DATA_BIG_COL(&col) && dict_index_is_clust(this))
+ {
+
+ /* In the worst case, we have a locally stored
+ column of BTR_EXTERN_LOCAL_STORED_MAX_SIZE bytes.
+ The length can be stored in one byte. If the
+ column were stored externally, the lengths in
+ the clustered index page would be
+ BTR_EXTERN_FIELD_REF_SIZE and 2. */
+ field_max_size= max_local_len;
+ }
+
+ if (comp)
+ {
+ /* Add the extra size for ROW_FORMAT=COMPACT.
+ For ROW_FORMAT=REDUNDANT, these bytes were
+ added to result.shortest_size before this loop. */
+ result.shortest_size+= field_max_size < 256 ? 1 : 2;
+ }
+ add_field_size:
+ result.shortest_size+= field_max_size;
+
+ /* Check the size limit on leaf pages. */
+ if (result.shortest_size >= result.max_leaf_size)
+ {
+ result.set_too_big(i);
+ }
+
+ /* Check the size limit on non-leaf pages. Records
+ stored in non-leaf B-tree pages consist of the unique
+ columns of the record (the key columns of the B-tree)
+ and a node pointer field. When we have processed the
+ unique columns, result.shortest_size equals the size of the
+ node pointer record minus the node pointer column. */
+ if (i + 1 == dict_index_get_n_unique_in_tree(this) &&
+ result.shortest_size + REC_NODE_PTR_SIZE >= page_ptr_max)
+ {
+ result.set_too_big(i);
+ }
+ }
+
+ return result;
+}
+
+/** Issue a warning that the row is too big. */
+static void ib_warn_row_too_big(THD *thd, const dict_table_t *table)
+{
+ /* FIXME: this row size check should be improved */
+ /* If prefix is true then a 768-byte prefix is stored
+ locally for BLOB fields. Refer to dict_table_get_format() */
+ const bool prefix= !dict_table_has_atomic_blobs(table);
+
+ const ulint free_space=
+ page_get_free_space_of_empty(table->flags & DICT_TF_COMPACT) / 2;
+
+ push_warning_printf(
+ thd, Sql_condition::WARN_LEVEL_WARN, HA_ERR_TO_BIG_ROW,
+ "Row size too large (> " ULINTPF "). Changing some columns to TEXT"
+ " or BLOB %smay help. In current row format, BLOB prefix of"
+ " %d bytes is stored inline.",
+ free_space,
+ prefix ? "or using ROW_FORMAT=DYNAMIC or ROW_FORMAT=COMPRESSED " : "",
+ prefix ? DICT_MAX_FIXED_COL_LEN : 0);
+}
+
+bool create_table_info_t::row_size_is_acceptable(
+ const dict_index_t &index) const
+{
+ if ((index.type & DICT_FTS) || index.table->is_system_db)
+ {
+ /* Ignore system tables check because innodb_table_stats
+ maximum row size can not fit on 4k page. */
+ return true;
+ }
+
+ const bool strict= THDVAR(m_thd, strict_mode);
+ dict_index_t::record_size_info_t info= index.record_size_info();
+
+ if (info.row_is_too_big())
+ {
+ ut_ad(info.get_overrun_size() != 0);
+ ut_ad(info.max_leaf_size != 0);
+
+ const size_t idx= info.get_first_overrun_field_index();
+ const dict_field_t *field= dict_index_get_nth_field(&index, idx);
+
+ ut_ad((!field->name) == field->col->is_dropped());
+ ib::error_or_warn eow(strict);
+ if (field->name)
+ eow << "Cannot add field " << field->name << " in table ";
+ else
+ eow << "Cannot add an instantly dropped column in table ";
+ eow << index.table->name << " because after adding it, the row size is "
+ << info.get_overrun_size()
+ << " which is greater than maximum allowed size ("
+ << info.max_leaf_size << " bytes) for a record on index leaf page.";
+
+ if (strict)
+ {
+ return false;
+ }
+
+ ib_warn_row_too_big(m_thd, index.table);
+ }
+
+ return true;
+}
+
/** Update a new table in an InnoDB database.
@return error number */
int
@@ -20865,31 +21102,6 @@ innobase_convert_to_system_charset(
cs2, to, static_cast<uint>(len), errors)));
}
-/**********************************************************************
-Issue a warning that the row is too big. */
-void
-ib_warn_row_too_big(const dict_table_t* table)
-{
- /* If prefix is true then a 768-byte prefix is stored
- locally for BLOB fields. */
- const bool prefix = !dict_table_has_atomic_blobs(table);
-
- const ulint free_space = page_get_free_space_of_empty(
- table->flags & DICT_TF_COMPACT) / 2;
-
- THD* thd = current_thd;
-
- push_warning_printf(
- thd, Sql_condition::WARN_LEVEL_WARN, HA_ERR_TO_BIG_ROW,
- "Row size too large (> " ULINTPF ")."
- " Changing some columns to TEXT"
- " or BLOB %smay help. In current row format, BLOB prefix of"
- " %d bytes is stored inline.", free_space
- , prefix ? "or using ROW_FORMAT=DYNAMIC or"
- " ROW_FORMAT=COMPRESSED ": ""
- , prefix ? DICT_MAX_FIXED_COL_LEN : 0);
-}
-
/** Validate the requested buffer pool size. Also, reserve the necessary
memory needed for buffer pool resize.
@param[in] thd thread handle
diff --git a/storage/innobase/handler/ha_innodb.h b/storage/innobase/handler/ha_innodb.h
index 28e1a1e36f1..e00003c30a6 100644
--- a/storage/innobase/handler/ha_innodb.h
+++ b/storage/innobase/handler/ha_innodb.h
@@ -630,7 +630,7 @@ public:
- all but name/path is used, when validating options and using flags. */
create_table_info_t(
THD* thd,
- TABLE* form,
+ const TABLE* form,
HA_CREATE_INFO* create_info,
char* table_name,
char* remote_path,
@@ -678,6 +678,11 @@ public:
void allocate_trx();
+ /** Checks that every index have sane size. Depends on strict mode */
+ bool row_size_is_acceptable(const dict_table_t& table) const;
+ /** Checks that given index have sane size. Depends on strict mode */
+ bool row_size_is_acceptable(const dict_index_t& index) const;
+
/** Determines InnoDB table flags.
If strict_mode=OFF, this will adjust the flags to what should be assumed.
@retval true if successful, false if error */
diff --git a/storage/innobase/handler/handler0alter.cc b/storage/innobase/handler/handler0alter.cc
index d36796c1f37..3aa1fcc5292 100644
--- a/storage/innobase/handler/handler0alter.cc
+++ b/storage/innobase/handler/handler0alter.cc
@@ -6153,6 +6153,10 @@ prepare_inplace_alter_table_dict(
new_clustered = (DICT_CLUSTERED & index_defs[0].ind_type) != 0;
+ create_table_info_t info(ctx->prebuilt->trx->mysql_thd, altered_table,
+ ha_alter_info->create_info, NULL, NULL,
+ srv_file_per_table);
+
/* The primary index would be rebuilt if a FTS Doc ID
column is to be added, and the primary index definition
is just copied from old table and stored in indexdefs[0] */
@@ -6524,7 +6528,7 @@ new_clustered_failed:
for (uint a = 0; a < ctx->num_to_add_index; a++) {
ctx->add_index[a]->table = ctx->new_table;
error = dict_index_add_to_cache(
- ctx->add_index[a], FIL_NULL, false, add_v);
+ ctx->add_index[a], FIL_NULL, add_v);
ut_a(error == DB_SUCCESS);
}
@@ -6752,6 +6756,10 @@ new_table_failed:
}
ctx->add_index[a] = index;
+ if (!info.row_size_is_acceptable(*index)) {
+ error = DB_TOO_BIG_RECORD;
+ goto error_handling;
+ }
index->parser = index_defs[a].parser;
index->has_new_v_col = has_new_v_col;
/* Note the id of the transaction that created this
@@ -6849,6 +6857,10 @@ error_handling_drop_uncached:
DBUG_ASSERT(index != ctx->add_index[a]);
}
ctx->add_index[a]= index;
+ if (!info.row_size_is_acceptable(*index)) {
+ error = DB_TOO_BIG_RECORD;
+ goto error_handling_drop_uncached;
+ }
index->parser = index_defs[a].parser;
index->has_new_v_col = has_new_v_col;
@@ -6897,6 +6909,10 @@ error_handling_drop_uncached:
}
}
}
+ } else if (ctx->is_instant()
+ && !info.row_size_is_acceptable(*user_table)) {
+ error = DB_TOO_BIG_RECORD;
+ goto error_handling;
}
if (ctx->online && ctx->num_to_add_index) {
diff --git a/storage/innobase/ibuf/ibuf0ibuf.cc b/storage/innobase/ibuf/ibuf0ibuf.cc
index 844d6901ce5..3c5fedf1ff2 100644
--- a/storage/innobase/ibuf/ibuf0ibuf.cc
+++ b/storage/innobase/ibuf/ibuf0ibuf.cc
@@ -347,7 +347,7 @@ ibuf_tree_root_get(
ut_ad(ibuf_inside(mtr));
ut_ad(mutex_own(&ibuf_mutex));
- mtr_sx_lock(dict_index_get_lock(ibuf.index), mtr);
+ mtr_sx_lock_index(ibuf.index, mtr);
/* only segment list access is exclusive each other */
block = buf_page_get(
@@ -425,7 +425,7 @@ ibuf_init_at_db_start(void)
mtr.start();
compile_time_assert(IBUF_SPACE_ID == TRX_SYS_SPACE);
compile_time_assert(IBUF_SPACE_ID == 0);
- mtr_x_lock(&fil_system.sys_space->latch, &mtr);
+ mtr_x_lock_space(fil_system.sys_space, &mtr);
header_page = ibuf_header_page_get(&mtr);
if (!header_page) {
@@ -1910,7 +1910,7 @@ ibuf_add_free_page(void)
mtr_start(&mtr);
/* Acquire the fsp latch before the ibuf header, obeying the latching
order */
- mtr_x_lock(&fil_system.sys_space->latch, &mtr);
+ mtr_x_lock_space(fil_system.sys_space, &mtr);
header_page = ibuf_header_page_get(&mtr);
/* Allocate a new page: NOTE that if the page has been a part of a
@@ -1989,7 +1989,7 @@ ibuf_remove_free_page(void)
/* Acquire the fsp latch before the ibuf header, obeying the latching
order */
- mtr_x_lock(&fil_system.sys_space->latch, &mtr);
+ mtr_x_lock_space(fil_system.sys_space, &mtr);
header_page = ibuf_header_page_get(&mtr);
/* Prevent pessimistic inserts to insert buffer trees for a while */
diff --git a/storage/innobase/include/dict0dict.h b/storage/innobase/include/dict0dict.h
index 5647cb488b5..a2666e7cfbc 100644
--- a/storage/innobase/include/dict0dict.h
+++ b/storage/innobase/include/dict0dict.h
@@ -976,16 +976,12 @@ added column.
@param[in,out] index index; NOTE! The index memory
object is freed in this function!
@param[in] page_no root page number of the index
-@param[in] strict true=refuse to create the index
- if records could be too big to fit in
- an B-tree page
@param[in] add_v virtual columns being added along with ADD INDEX
-@return DB_SUCCESS, DB_TOO_BIG_RECORD, or DB_CORRUPTION */
+@return DB_SUCCESS, or DB_CORRUPTION */
dberr_t
dict_index_add_to_cache(
dict_index_t*& index,
ulint page_no,
- bool strict = false,
const dict_add_v_col_t* add_v = NULL)
MY_ATTRIBUTE((warn_unused_result));
/********************************************************************//**
diff --git a/storage/innobase/include/dict0mem.h b/storage/innobase/include/dict0mem.h
index 336834cda95..e2a42519d99 100644
--- a/storage/innobase/include/dict0mem.h
+++ b/storage/innobase/include/dict0mem.h
@@ -1214,12 +1214,6 @@ struct dict_index_t {
bool
vers_history_row(const rec_t* rec, bool &history_row);
- /** If a record of this index might not fit on a single B-tree page,
- return true.
- @param[in] strict issue error or warning
- @return true if the index record could become too big */
- bool rec_potentially_too_big(bool strict) const;
-
/** Reconstruct the clustered index fields. */
inline void reconstruct_fields();
@@ -1229,6 +1223,66 @@ struct dict_index_t {
@return whether the index contains the column or its prefix */
bool contains_col_or_prefix(ulint n, bool is_virtual) const
MY_ATTRIBUTE((warn_unused_result));
+
+ /** This ad-hoc class is used by record_size_info only. */
+ class record_size_info_t {
+ public:
+ record_size_info_t()
+ : max_leaf_size(0), shortest_size(0), too_big(false),
+ first_overrun_field_index(SIZE_T_MAX), overrun_size(0)
+ {
+ }
+
+ /** Mark row potentially too big for page and set up first
+ overflow field index. */
+ void set_too_big(size_t field_index)
+ {
+ ut_ad(field_index != SIZE_T_MAX);
+
+ too_big = true;
+ if (first_overrun_field_index > field_index) {
+ first_overrun_field_index = field_index;
+ overrun_size = shortest_size;
+ }
+ }
+
+ /** @return overrun field index or SIZE_T_MAX if nothing
+ overflowed*/
+ size_t get_first_overrun_field_index() const
+ {
+ ut_ad(row_is_too_big());
+ ut_ad(first_overrun_field_index != SIZE_T_MAX);
+ return first_overrun_field_index;
+ }
+
+ size_t get_overrun_size() const
+ {
+ ut_ad(row_is_too_big());
+ return overrun_size;
+ }
+
+ bool row_is_too_big() const { return too_big; }
+
+ size_t max_leaf_size; /** Bigger row size this index can
+ produce */
+ size_t shortest_size; /** shortest because it counts everything
+ as in overflow pages */
+
+ private:
+ bool too_big; /** This one is true when maximum row size this
+ index can produce is bigger than maximum row
+ size given page can hold. */
+ size_t first_overrun_field_index; /** After adding this field
+ index row overflowed maximum
+ allowed size. Useful for
+ reporting back to user. */
+ size_t overrun_size; /** Just overrun row size */
+ };
+
+ /** Returns max possibly record size for that index, size of a shortest
+ everything in overflow) size of the longest possible row and index
+ of a field which made index records too big to fit on a page.*/
+ inline record_size_info_t record_size_info() const;
};
/** Detach a column from an index.
diff --git a/storage/innobase/include/mtr0mtr.h b/storage/innobase/include/mtr0mtr.h
index 0dbfc14c68f..f364730b21f 100644
--- a/storage/innobase/include/mtr0mtr.h
+++ b/storage/innobase/include/mtr0mtr.h
@@ -81,17 +81,12 @@ savepoint. */
/** Push an object to an mtr memo stack. */
#define mtr_memo_push(m, o, t) (m)->memo_push(o, t)
-/** Lock an rw-lock in s-mode. */
-#define mtr_s_lock(l, m) (m)->s_lock((l), __FILE__, __LINE__)
-
-/** Lock an rw-lock in x-mode. */
-#define mtr_x_lock(l, m) (m)->x_lock((l), __FILE__, __LINE__)
-
-/** Lock a tablespace in x-mode. */
+#define mtr_s_lock_space(s, m) (m)->s_lock_space((s), __FILE__, __LINE__)
#define mtr_x_lock_space(s, m) (m)->x_lock_space((s), __FILE__, __LINE__)
-/** Lock an rw-lock in sx-mode. */
-#define mtr_sx_lock(l, m) (m)->sx_lock((l), __FILE__, __LINE__)
+#define mtr_s_lock_index(i, m) (m)->s_lock(&(i)->lock, __FILE__, __LINE__)
+#define mtr_x_lock_index(i, m) (m)->x_lock(&(i)->lock, __FILE__, __LINE__)
+#define mtr_sx_lock_index(i, m) (m)->sx_lock(&(i)->lock, __FILE__, __LINE__)
#define mtr_memo_contains_flagged(m, p, l) \
(m)->memo_contains_flagged((p), (l))
@@ -240,29 +235,7 @@ struct mtr_t {
bool is_named_space(const fil_space_t* space) const;
#endif /* UNIV_DEBUG */
- /** Locks a rw-latch in S mode.
- NOTE: use mtr_s_lock().
- @param lock rw-lock
- @param file file name from where called
- @param line line number in file */
- inline void s_lock(rw_lock_t* lock, const char* file, unsigned line);
-
- /** Locks a rw-latch in X mode.
- NOTE: use mtr_x_lock().
- @param lock rw-lock
- @param file file name from where called
- @param line line number in file */
- inline void x_lock(rw_lock_t* lock, const char* file, unsigned line);
-
- /** Locks a rw-latch in X mode.
- NOTE: use mtr_sx_lock().
- @param lock rw-lock
- @param file file name from where called
- @param line line number in file */
- inline void sx_lock(rw_lock_t* lock, const char* file, unsigned line);
-
/** Acquire a tablespace X-latch.
- NOTE: use mtr_x_lock_space().
@param[in] space_id tablespace ID
@param[in] file file name from where called
@param[in] line line number in file
@@ -272,6 +245,60 @@ struct mtr_t {
const char* file,
unsigned line);
+ /** Acquire a shared rw-latch.
+ @param[in] lock rw-latch
+ @param[in] file file name from where called
+ @param[in] line line number in file */
+ void s_lock(rw_lock_t* lock, const char* file, unsigned line)
+ {
+ rw_lock_s_lock_inline(lock, 0, file, line);
+ memo_push(lock, MTR_MEMO_S_LOCK);
+ }
+
+ /** Acquire an exclusive rw-latch.
+ @param[in] lock rw-latch
+ @param[in] file file name from where called
+ @param[in] line line number in file */
+ void x_lock(rw_lock_t* lock, const char* file, unsigned line)
+ {
+ rw_lock_x_lock_inline(lock, 0, file, line);
+ memo_push(lock, MTR_MEMO_X_LOCK);
+ }
+
+ /** Acquire an shared/exclusive rw-latch.
+ @param[in] lock rw-latch
+ @param[in] file file name from where called
+ @param[in] line line number in file */
+ void sx_lock(rw_lock_t* lock, const char* file, unsigned line)
+ {
+ rw_lock_sx_lock_inline(lock, 0, file, line);
+ memo_push(lock, MTR_MEMO_SX_LOCK);
+ }
+
+ /** Acquire a tablespace S-latch.
+ @param[in] space tablespace
+ @param[in] file file name from where called
+ @param[in] line line number in file */
+ void s_lock_space(fil_space_t* space, const char* file, unsigned line)
+ {
+ ut_ad(space->purpose == FIL_TYPE_TEMPORARY
+ || space->purpose == FIL_TYPE_IMPORT
+ || space->purpose == FIL_TYPE_TABLESPACE);
+ s_lock(&space->latch, file, line);
+ }
+
+ /** Acquire a tablespace X-latch.
+ @param[in] space tablespace
+ @param[in] file file name from where called
+ @param[in] line line number in file */
+ void x_lock_space(fil_space_t* space, const char* file, unsigned line)
+ {
+ ut_ad(space->purpose == FIL_TYPE_TEMPORARY
+ || space->purpose == FIL_TYPE_IMPORT
+ || space->purpose == FIL_TYPE_TABLESPACE);
+ x_lock(&space->latch, file, line);
+ }
+
/** Release an object in the memo stack.
@param object object
@param type object type: MTR_MEMO_S_LOCK, ...
diff --git a/storage/innobase/include/mtr0mtr.ic b/storage/innobase/include/mtr0mtr.ic
index 7f991269d46..0fe56f960b7 100644
--- a/storage/innobase/include/mtr0mtr.ic
+++ b/storage/innobase/include/mtr0mtr.ic
@@ -227,36 +227,3 @@ mtr_t::set_log_mode(mtr_log_t mode)
ut_ad(0);
return(old_mode);
}
-
-/**
-Locks a lock in s-mode. */
-
-void
-mtr_t::s_lock(rw_lock_t* lock, const char* file, unsigned line)
-{
- rw_lock_s_lock_inline(lock, 0, file, line);
-
- memo_push(lock, MTR_MEMO_S_LOCK);
-}
-
-/**
-Locks a lock in x-mode. */
-
-void
-mtr_t::x_lock(rw_lock_t* lock, const char* file, unsigned line)
-{
- rw_lock_x_lock_inline(lock, 0, file, line);
-
- memo_push(lock, MTR_MEMO_X_LOCK);
-}
-
-/**
-Locks a lock in sx-mode. */
-
-void
-mtr_t::sx_lock(rw_lock_t* lock, const char* file, unsigned line)
-{
- rw_lock_sx_lock_inline(lock, 0, file, line);
-
- memo_push(lock, MTR_MEMO_SX_LOCK);
-}
diff --git a/storage/innobase/include/page0zip.ic b/storage/innobase/include/page0zip.ic
index 8df7078594e..23a14b5947c 100644
--- a/storage/innobase/include/page0zip.ic
+++ b/storage/innobase/include/page0zip.ic
@@ -154,6 +154,9 @@ tablespace is not compressed
inline bool page_zip_rec_needs_ext(ulint rec_size, ulint comp, ulint n_fields,
ulint zip_size)
{
+ /* FIXME: row size check is this function seems to be the most correct.
+ Put it in a separate function and use in more places of InnoDB */
+
ut_ad(rec_size
> ulint(comp ? REC_N_NEW_EXTRA_BYTES : REC_N_OLD_EXTRA_BYTES));
ut_ad(comp || !zip_size);
diff --git a/storage/innobase/mtr/mtr0mtr.cc b/storage/innobase/mtr/mtr0mtr.cc
index dbaae5e72bd..9918d0bbc21 100644
--- a/storage/innobase/mtr/mtr0mtr.cc
+++ b/storage/innobase/mtr/mtr0mtr.cc
@@ -31,8 +31,6 @@ Created 11/26/1995 Heikki Tuuri
#include "fsp0sysspace.h"
#include "page0types.h"
#include "mtr0log.h"
-#include "log0log.h"
-
#include "log0recv.h"
/** Iterate over a memo block in reverse. */
@@ -204,143 +202,84 @@ private:
/** Release latches and decrement the buffer fix count.
@param slot memo slot */
-static
-void
-memo_slot_release(mtr_memo_slot_t* slot)
-{
- switch (slot->type) {
- case MTR_MEMO_BUF_FIX:
- case MTR_MEMO_PAGE_S_FIX:
- case MTR_MEMO_PAGE_SX_FIX:
- case MTR_MEMO_PAGE_X_FIX: {
-
- buf_block_t* block;
-
- block = reinterpret_cast<buf_block_t*>(slot->object);
-
- block->unfix();
- buf_page_release_latch(block, slot->type);
- break;
- }
-
- case MTR_MEMO_S_LOCK:
- rw_lock_s_unlock(reinterpret_cast<rw_lock_t*>(slot->object));
- break;
-
- case MTR_MEMO_SX_LOCK:
- rw_lock_sx_unlock(reinterpret_cast<rw_lock_t*>(slot->object));
- break;
-
- case MTR_MEMO_X_LOCK:
- rw_lock_x_unlock(reinterpret_cast<rw_lock_t*>(slot->object));
- break;
-
-#ifdef UNIV_DEBUG
- default:
- ut_ad(slot->type == MTR_MEMO_MODIFY);
-#endif /* UNIV_DEBUG */
- }
-
- slot->object = NULL;
-}
-
-/** Unfix a page, do not release the latches on the page.
-@param slot memo slot */
-static
-void
-memo_block_unfix(mtr_memo_slot_t* slot)
+static void memo_slot_release(mtr_memo_slot_t *slot)
{
- switch (slot->type) {
- case MTR_MEMO_BUF_FIX:
- case MTR_MEMO_PAGE_S_FIX:
- case MTR_MEMO_PAGE_X_FIX:
- case MTR_MEMO_PAGE_SX_FIX: {
- reinterpret_cast<buf_block_t*>(slot->object)->unfix();
- break;
- }
-
- case MTR_MEMO_S_LOCK:
- case MTR_MEMO_X_LOCK:
- case MTR_MEMO_SX_LOCK:
- break;
-#ifdef UNIV_DEBUG
- default:
-#endif /* UNIV_DEBUG */
- break;
- }
-}
-/** Release latches represented by a slot.
-@param slot memo slot */
-static
-void
-memo_latch_release(mtr_memo_slot_t* slot)
-{
- switch (slot->type) {
- case MTR_MEMO_BUF_FIX:
- case MTR_MEMO_PAGE_S_FIX:
- case MTR_MEMO_PAGE_SX_FIX:
- case MTR_MEMO_PAGE_X_FIX: {
- buf_block_t* block;
-
- block = reinterpret_cast<buf_block_t*>(slot->object);
-
- memo_block_unfix(slot);
-
- buf_page_release_latch(block, slot->type);
-
- slot->object = NULL;
- break;
- }
-
- case MTR_MEMO_S_LOCK:
- rw_lock_s_unlock(reinterpret_cast<rw_lock_t*>(slot->object));
- slot->object = NULL;
- break;
-
- case MTR_MEMO_X_LOCK:
- rw_lock_x_unlock(reinterpret_cast<rw_lock_t*>(slot->object));
- slot->object = NULL;
- break;
-
- case MTR_MEMO_SX_LOCK:
- rw_lock_sx_unlock(reinterpret_cast<rw_lock_t*>(slot->object));
- slot->object = NULL;
- break;
-
+ switch (slot->type) {
#ifdef UNIV_DEBUG
- default:
- ut_ad(slot->type == MTR_MEMO_MODIFY);
-
- slot->object = NULL;
+ default:
+ ut_ad(!"invalid type");
+ break;
+ case MTR_MEMO_MODIFY:
+ break;
#endif /* UNIV_DEBUG */
- }
+ case MTR_MEMO_S_LOCK:
+ rw_lock_s_unlock(reinterpret_cast<rw_lock_t*>(slot->object));
+ break;
+ case MTR_MEMO_SX_LOCK:
+ rw_lock_sx_unlock(reinterpret_cast<rw_lock_t*>(slot->object));
+ break;
+ case MTR_MEMO_X_LOCK:
+ rw_lock_x_unlock(reinterpret_cast<rw_lock_t*>(slot->object));
+ break;
+ case MTR_MEMO_BUF_FIX:
+ case MTR_MEMO_PAGE_S_FIX:
+ case MTR_MEMO_PAGE_SX_FIX:
+ case MTR_MEMO_PAGE_X_FIX:
+ buf_block_t *block= reinterpret_cast<buf_block_t*>(slot->object);
+ block->unfix();
+ buf_page_release_latch(block, slot->type);
+ break;
+ }
+ slot->object= NULL;
}
/** Release the latches acquired by the mini-transaction. */
struct ReleaseLatches {
-
- /** @return true always. */
- bool operator()(mtr_memo_slot_t* slot) const
- {
- if (slot->object != NULL) {
- memo_latch_release(slot);
- }
-
- return(true);
- }
+ /** @return true always. */
+ bool operator()(mtr_memo_slot_t *slot) const
+ {
+ if (!slot->object)
+ return true;
+ switch (slot->type) {
+#ifdef UNIV_DEBUG
+ default:
+ ut_ad(!"invalid type");
+ break;
+ case MTR_MEMO_MODIFY:
+ break;
+#endif /* UNIV_DEBUG */
+ case MTR_MEMO_S_LOCK:
+ rw_lock_s_unlock(reinterpret_cast<rw_lock_t*>(slot->object));
+ break;
+ case MTR_MEMO_X_LOCK:
+ rw_lock_x_unlock(reinterpret_cast<rw_lock_t*>(slot->object));
+ break;
+ case MTR_MEMO_SX_LOCK:
+ rw_lock_sx_unlock(reinterpret_cast<rw_lock_t*>(slot->object));
+ break;
+ case MTR_MEMO_BUF_FIX:
+ case MTR_MEMO_PAGE_S_FIX:
+ case MTR_MEMO_PAGE_SX_FIX:
+ case MTR_MEMO_PAGE_X_FIX:
+ buf_block_t *block= reinterpret_cast<buf_block_t*>(slot->object);
+ block->unfix();
+ buf_page_release_latch(block, slot->type);
+ break;
+ }
+ slot->object= NULL;
+ return true;
+ }
};
/** Release the latches and blocks acquired by the mini-transaction. */
struct ReleaseAll {
- /** @return true always. */
- bool operator()(mtr_memo_slot_t* slot) const
- {
- if (slot->object != NULL) {
- memo_slot_release(slot);
- }
-
- return(true);
- }
+ /** @return true always. */
+ bool operator()(mtr_memo_slot_t *slot) const
+ {
+ if (slot->object)
+ memo_slot_release(slot);
+ return true;
+ }
};
#ifdef UNIV_DEBUG
@@ -349,7 +288,7 @@ struct DebugCheck {
/** @return true always. */
bool operator()(const mtr_memo_slot_t* slot) const
{
- ut_a(slot->object == NULL);
+ ut_ad(!slot->object);
return(true);
}
};
@@ -630,10 +569,7 @@ mtr_t::x_lock_space(ulint space_id, const char* file, unsigned line)
ut_ad(space);
ut_ad(space->id == space_id);
- x_lock(&space->latch, file, line);
- ut_ad(space->purpose == FIL_TYPE_TEMPORARY
- || space->purpose == FIL_TYPE_IMPORT
- || space->purpose == FIL_TYPE_TABLESPACE);
+ x_lock_space(space, file, line);
return(space);
}
diff --git a/storage/innobase/row/row0ins.cc b/storage/innobase/row/row0ins.cc
index b4b767df3e4..42bc482ff56 100644
--- a/storage/innobase/row/row0ins.cc
+++ b/storage/innobase/row/row0ins.cc
@@ -2622,7 +2622,7 @@ row_ins_clust_index_entry_low(
if (mode == BTR_MODIFY_LEAF
&& dict_index_is_online_ddl(index)) {
mode = BTR_MODIFY_LEAF_ALREADY_S_LATCHED;
- mtr_s_lock(dict_index_get_lock(index), &mtr);
+ mtr_s_lock_index(index, &mtr);
}
if (unsigned ai = index->table->persistent_autoinc) {
@@ -2847,9 +2847,9 @@ row_ins_sec_mtr_start_and_check_if_aborted(
}
if (search_mode & BTR_ALREADY_S_LATCHED) {
- mtr_s_lock(dict_index_get_lock(index), mtr);
+ mtr_s_lock_index(index, mtr);
} else {
- mtr_sx_lock(dict_index_get_lock(index), mtr);
+ mtr_sx_lock_index(index, mtr);
}
switch (index->online_status) {
@@ -2935,9 +2935,9 @@ row_ins_sec_index_entry_low(
DEBUG_SYNC_C("row_ins_sec_index_enter");
if (mode == BTR_MODIFY_LEAF) {
search_mode |= BTR_ALREADY_S_LATCHED;
- mtr_s_lock(dict_index_get_lock(index), &mtr);
+ mtr_s_lock_index(index, &mtr);
} else {
- mtr_sx_lock(dict_index_get_lock(index), &mtr);
+ mtr_sx_lock_index(index, &mtr);
}
if (row_log_online_op_try(
diff --git a/storage/innobase/row/row0mysql.cc b/storage/innobase/row/row0mysql.cc
index 8573ed137d9..da11fa2f948 100644
--- a/storage/innobase/row/row0mysql.cc
+++ b/storage/innobase/row/row0mysql.cc
@@ -2557,8 +2557,7 @@ row_create_index_for_mysql(
} else {
dict_build_index_def(table, index, trx);
- err = dict_index_add_to_cache(
- index, FIL_NULL, trx_is_strict(trx));
+ err = dict_index_add_to_cache(index, FIL_NULL);
ut_ad((index == NULL) == (err != DB_SUCCESS));
if (UNIV_LIKELY(err == DB_SUCCESS)) {
ut_ad(!index->is_instant());
diff --git a/storage/innobase/row/row0purge.cc b/storage/innobase/row/row0purge.cc
index 8a6bd674126..4dee8de5aad 100644
--- a/storage/innobase/row/row0purge.cc
+++ b/storage/innobase/row/row0purge.cc
@@ -388,14 +388,14 @@ row_purge_remove_sec_if_poss_tree(
enum row_search_result search_result;
log_free_check();
- mtr_start(&mtr);
+ mtr.start();
index->set_modified(mtr);
if (!index->is_committed()) {
/* The index->online_status may change if the index is
or was being created online, but not committed yet. It
is protected by index->lock. */
- mtr_sx_lock(dict_index_get_lock(index), &mtr);
+ mtr_sx_lock_index(index, &mtr);
if (dict_index_is_online_ddl(index)) {
/* Online secondary index creation will not
@@ -490,9 +490,9 @@ row_purge_remove_sec_if_poss_tree(
}
func_exit:
- btr_pcur_close(&pcur);
+ btr_pcur_close(&pcur); // FIXME: need this?
func_exit_no_pcur:
- mtr_commit(&mtr);
+ mtr.commit();
return(success);
}
@@ -519,7 +519,7 @@ row_purge_remove_sec_if_poss_leaf(
log_free_check();
ut_ad(index->table == node->table);
ut_ad(!index->table->is_temporary());
- mtr_start(&mtr);
+ mtr.start();
index->set_modified(mtr);
if (!index->is_committed()) {
@@ -531,7 +531,7 @@ row_purge_remove_sec_if_poss_leaf(
/* The index->online_status may change if the the
index is or was being created online, but not
committed yet. It is protected by index->lock. */
- mtr_s_lock(dict_index_get_lock(index), &mtr);
+ mtr_s_lock_index(index, &mtr);
if (dict_index_is_online_ddl(index)) {
/* Online secondary index creation will not
@@ -635,7 +635,7 @@ row_purge_remove_sec_if_poss_leaf(
->page.id);
btr_pcur_close(&pcur);
- mtr_commit(&mtr);
+ mtr.commit();
return(success);
}
}
@@ -661,9 +661,9 @@ row_purge_remove_sec_if_poss_leaf(
/* The deletion was buffered. */
case ROW_NOT_FOUND:
/* The index entry does not exist, nothing to do. */
- btr_pcur_close(&pcur);
+ btr_pcur_close(&pcur); // FIXME: do we need these? when is btr_cur->rtr_info set?
func_exit_no_pcur:
- mtr_commit(&mtr);
+ mtr.commit();
return(success);
}
@@ -954,12 +954,12 @@ skip_secondaries:
ut_ad(rseg->id == rseg_id);
ut_ad(rseg->is_persistent());
- mtr_start(&mtr);
+ mtr.start();
/* We have to acquire an SX-latch to the clustered
index tree (exclude other tree changes) */
- mtr_sx_lock(dict_index_get_lock(index), &mtr);
+ mtr_sx_lock_index(index, &mtr);
index->set_modified(mtr);
@@ -990,7 +990,7 @@ skip_secondaries:
data_field + dfield_get_len(&ufield->new_val)
- BTR_EXTERN_FIELD_REF_SIZE,
NULL, NULL, NULL, 0, false, &mtr);
- mtr_commit(&mtr);
+ mtr.commit();
}
}
diff --git a/storage/innobase/row/row0uins.cc b/storage/innobase/row/row0uins.cc
index becca3600dc..70f89d9115d 100644
--- a/storage/innobase/row/row0uins.cc
+++ b/storage/innobase/row/row0uins.cc
@@ -91,7 +91,7 @@ row_undo_ins_remove_clust_rec(
!= RW_X_LATCH);
ut_ad(node->table->id != DICT_INDEXES_ID);
ut_ad(node->table->id != DICT_COLUMNS_ID);
- mtr_s_lock(dict_index_get_lock(index), &mtr);
+ mtr_s_lock_index(index, &mtr);
}
}
@@ -257,10 +257,10 @@ row_undo_ins_remove_sec_low(
if (modify_leaf) {
mode = BTR_MODIFY_LEAF | BTR_ALREADY_S_LATCHED;
- mtr_s_lock(dict_index_get_lock(index), &mtr);
+ mtr_s_lock_index(index, &mtr);
} else {
ut_ad(mode == (BTR_MODIFY_TREE | BTR_LATCH_FOR_DELETE));
- mtr_sx_lock(dict_index_get_lock(index), &mtr);
+ mtr_sx_lock_index(index, &mtr);
}
if (row_log_online_op_try(index, entry, 0)) {
diff --git a/storage/innobase/row/row0umod.cc b/storage/innobase/row/row0umod.cc
index 9820fc3b06e..4775dc12348 100644
--- a/storage/innobase/row/row0umod.cc
+++ b/storage/innobase/row/row0umod.cc
@@ -237,7 +237,7 @@ static bool row_undo_mod_must_purge(undo_node_t* node, mtr_t* mtr)
btr_cur_t* btr_cur = btr_pcur_get_btr_cur(&node->pcur);
ut_ad(btr_cur->index->is_primary());
- mtr_s_lock(&purge_sys.latch, mtr);
+ mtr->s_lock(&purge_sys.latch, __FILE__, __LINE__);
if (!purge_sys.view.changes_visible(node->new_trx_id,
node->table->name)) {
@@ -288,7 +288,7 @@ row_undo_mod_clust(
online = dict_index_is_online_ddl(index);
if (online) {
ut_ad(node->trx->dict_operation_lock_mode != RW_X_LATCH);
- mtr_s_lock(dict_index_get_lock(index), &mtr);
+ mtr_s_lock_index(index, &mtr);
}
mem_heap_t* heap = mem_heap_create(1024);
@@ -443,7 +443,7 @@ row_undo_mod_clust(
goto mtr_commit_exit;
}
rec_t* rec = btr_pcur_get_rec(pcur);
- mtr_s_lock(&purge_sys.latch, &mtr);
+ mtr.s_lock(&purge_sys.latch, __FILE__, __LINE__);
if (!purge_sys.view.changes_visible(node->new_trx_id,
node->table->name)) {
goto mtr_commit_exit;
@@ -536,10 +536,10 @@ row_undo_mod_del_mark_or_remove_sec_low(
is protected by index->lock. */
if (modify_leaf) {
mode = BTR_MODIFY_LEAF | BTR_ALREADY_S_LATCHED;
- mtr_s_lock(dict_index_get_lock(index), &mtr);
+ mtr_s_lock_index(index, &mtr);
} else {
ut_ad(mode == (BTR_MODIFY_TREE | BTR_LATCH_FOR_DELETE));
- mtr_sx_lock(dict_index_get_lock(index), &mtr);
+ mtr_sx_lock_index(index, &mtr);
}
if (row_log_online_op_try(index, entry, 0)) {
@@ -732,10 +732,10 @@ try_again:
is protected by index->lock. */
if (mode == BTR_MODIFY_LEAF) {
mode = BTR_MODIFY_LEAF | BTR_ALREADY_S_LATCHED;
- mtr_s_lock(dict_index_get_lock(index), &mtr);
+ mtr_s_lock_index(index, &mtr);
} else {
ut_ad(mode == BTR_MODIFY_TREE);
- mtr_sx_lock(dict_index_get_lock(index), &mtr);
+ mtr_sx_lock_index(index, &mtr);
}
if (row_log_online_op_try(index, entry, trx->id)) {
diff --git a/storage/innobase/row/row0upd.cc b/storage/innobase/row/row0upd.cc
index a3279a3c87b..bc072ccffc9 100644
--- a/storage/innobase/row/row0upd.cc
+++ b/storage/innobase/row/row0upd.cc
@@ -2321,7 +2321,7 @@ row_upd_sec_index_entry(
or was being created online, but not committed yet. It
is protected by index->lock. */
- mtr_s_lock(dict_index_get_lock(index), &mtr);
+ mtr_s_lock_index(index, &mtr);
switch (dict_index_get_online_status(index)) {
case ONLINE_INDEX_COMPLETE:
@@ -3100,7 +3100,7 @@ row_upd_clust_step(
if (dict_index_is_online_ddl(index)) {
ut_ad(node->table->id != DICT_INDEXES_ID);
mode = BTR_MODIFY_LEAF | BTR_ALREADY_S_LATCHED;
- mtr_s_lock(dict_index_get_lock(index), &mtr);
+ mtr_s_lock_index(index, &mtr);
} else {
mode = BTR_MODIFY_LEAF;
}
diff --git a/storage/innobase/trx/trx0purge.cc b/storage/innobase/trx/trx0purge.cc
index 8dd17ce4509..356999b17b2 100644
--- a/storage/innobase/trx/trx0purge.cc
+++ b/storage/innobase/trx/trx0purge.cc
@@ -697,7 +697,7 @@ not_free:
mtr_t mtr;
const ulint size = SRV_UNDO_TABLESPACE_SIZE_IN_PAGES;
mtr.start();
- mtr_x_lock(&purge_sys.truncate.current->latch, &mtr);
+ mtr_x_lock_space(purge_sys.truncate.current, &mtr);
fil_truncate_log(purge_sys.truncate.current, size, &mtr);
fsp_header_init(purge_sys.truncate.current, size, &mtr);
mutex_enter(&fil_system.mutex);
diff --git a/storage/innobase/trx/trx0rseg.cc b/storage/innobase/trx/trx0rseg.cc
index ef4732a98af..29e6acc773c 100644
--- a/storage/innobase/trx/trx0rseg.cc
+++ b/storage/innobase/trx/trx0rseg.cc
@@ -696,7 +696,7 @@ trx_temp_rseg_create()
for (ulong i = 0; i < TRX_SYS_N_RSEGS; i++) {
mtr.start();
mtr.set_log_mode(MTR_LOG_NO_REDO);
- mtr_x_lock(&fil_system.temp_space->latch, &mtr);
+ mtr_x_lock_space(fil_system.temp_space, &mtr);
buf_block_t* rblock = trx_rseg_header_create(
fil_system.temp_space, i, NULL, &mtr);
diff --git a/storage/innobase/trx/trx0sys.cc b/storage/innobase/trx/trx0sys.cc
index f41bf942b2b..4c0ca852aef 100644
--- a/storage/innobase/trx/trx0sys.cc
+++ b/storage/innobase/trx/trx0sys.cc
@@ -156,7 +156,7 @@ trx_sysf_create(
then enter the kernel: we must do it in this order to conform
to the latching order rules. */
- mtr_x_lock(&fil_system.sys_space->latch, mtr);
+ mtr_x_lock_space(fil_system.sys_space, mtr);
compile_time_assert(TRX_SYS_SPACE == 0);
/* Create the trx sys file block in a new allocated file segment */