diff options
author | Sergei Golubchik <sergii@pisem.net> | 2012-01-13 15:50:02 +0100 |
---|---|---|
committer | Sergei Golubchik <sergii@pisem.net> | 2012-01-13 15:50:02 +0100 |
commit | 4f435bddfd44d40999f88685c61cc04e319d8d6c (patch) | |
tree | f9d0655a0d901b87f918a736741144b502cba3f6 /storage/innobase | |
parent | 8c2bcdf85ff753bceeb5b235f3605e348e6f9e1d (diff) | |
parent | 6ca4ca7d37fed3b3da18666768de6a2f8c34bc7b (diff) | |
download | mariadb-git-4f435bddfd44d40999f88685c61cc04e319d8d6c.tar.gz |
5.3 merge
Diffstat (limited to 'storage/innobase')
24 files changed, 366 insertions, 513 deletions
diff --git a/storage/innobase/btr/btr0cur.c b/storage/innobase/btr/btr0cur.c index f63ca20ef24..625721133fd 100644 --- a/storage/innobase/btr/btr0cur.c +++ b/storage/innobase/btr/btr0cur.c @@ -1895,7 +1895,7 @@ btr_cur_update_in_place( was_delete_marked = rec_get_deleted_flag( rec, page_is_comp(buf_block_get_frame(block))); - is_hashed = block->is_hashed; + is_hashed = (block->index != NULL); if (is_hashed) { /* TO DO: Can we skip this if none of the fields @@ -3542,16 +3542,11 @@ btr_record_not_null_field_in_rec( } for (i = 0; i < n_unique; i++) { - ulint rec_len; - - rec_get_nth_field_offs(offsets, i, &rec_len); - - if (rec_len != UNIV_SQL_NULL) { - n_not_null[i]++; - } else { - /* Break if we hit the first NULL value */ + if (rec_offs_nth_sql_null(offsets, i)) { break; } + + n_not_null[i]++; } } @@ -3694,8 +3689,7 @@ btr_estimate_number_of_different_key_vals( if (n_not_null) { btr_record_not_null_field_in_rec( - n_cols, offsets_next_rec, - n_not_null); + n_cols, offsets_next_rec, n_not_null); } total_external_size diff --git a/storage/innobase/btr/btr0pcur.c b/storage/innobase/btr/btr0pcur.c index cbb0d21a7ed..57d9752649f 100644 --- a/storage/innobase/btr/btr0pcur.c +++ b/storage/innobase/btr/btr0pcur.c @@ -247,6 +247,8 @@ btr_pcur_restore_position_func( cursor->rel_pos == BTR_PCUR_BEFORE_FIRST_IN_TREE, index, latch_mode, btr_pcur_get_btr_cur(cursor), mtr); + cursor->latch_mode = latch_mode; + cursor->pos_state = BTR_PCUR_IS_POSITIONED; cursor->block_when_stored = btr_pcur_get_block(cursor); return(FALSE); diff --git a/storage/innobase/btr/btr0sea.c b/storage/innobase/btr/btr0sea.c index 7070420425c..ddc94064da9 100644 --- a/storage/innobase/btr/btr0sea.c +++ b/storage/innobase/btr/btr0sea.c @@ -44,12 +44,8 @@ Created 2/17/1996 Heikki Tuuri #include "ha0ha.h" /** Flag: has the search system been enabled? -Protected by btr_search_latch and btr_search_enabled_mutex. */ +Protected by btr_search_latch. */ UNIV_INTERN char btr_search_enabled = TRUE; -UNIV_INTERN ibool btr_search_fully_disabled = FALSE; - -/** Mutex protecting btr_search_enabled */ -static mutex_t btr_search_enabled_mutex; #ifdef UNIV_PFS_MUTEX /* Key to register btr_search_enabled_mutex with performance schema */ @@ -180,8 +176,6 @@ btr_search_sys_create( rw_lock_create(btr_search_latch_key, &btr_search_latch, SYNC_SEARCH_SYS); - mutex_create(btr_search_enabled_mutex_key, - &btr_search_enabled_mutex, SYNC_SEARCH_SYS_CONF); btr_search_sys = mem_alloc(sizeof(btr_search_sys_t)); @@ -211,27 +205,37 @@ void btr_search_disable(void) /*====================*/ { - mutex_enter(&btr_search_enabled_mutex); + dict_table_t* table; + + mutex_enter(&dict_sys->mutex); rw_lock_x_lock(&btr_search_latch); - /* Disable access to hash index, also tell ha_insert_for_fold() - stop adding new nodes to hash index, but still allow updating - existing nodes */ btr_search_enabled = FALSE; - /* Clear all block->is_hashed flags and remove all entries - from btr_search_sys->hash_index. */ - buf_pool_drop_hash_index(); + /* Clear the index->search_info->ref_count of every index in + the data dictionary cache. */ + for (table = UT_LIST_GET_FIRST(dict_sys->table_LRU); table; + table = UT_LIST_GET_NEXT(table_LRU, table)) { + + dict_index_t* index; - /* hash index has been cleaned up, disallow any operation to - the hash index */ - btr_search_fully_disabled = TRUE; + for (index = dict_table_get_first_index(table); index; + index = dict_table_get_next_index(index)) { + + index->search_info->ref_count = 0; + } + } - /* btr_search_enabled_mutex should guarantee this. */ - ut_ad(!btr_search_enabled); + mutex_exit(&dict_sys->mutex); + + /* Set all block->index = NULL. */ + buf_pool_clear_hash_index(); + + /* Clear the adaptive hash index. */ + hash_table_clear(btr_search_sys->hash_index); + mem_heap_empty(btr_search_sys->hash_index->heap); rw_lock_x_unlock(&btr_search_latch); - mutex_exit(&btr_search_enabled_mutex); } /********************************************************************//** @@ -241,14 +245,11 @@ void btr_search_enable(void) /*====================*/ { - mutex_enter(&btr_search_enabled_mutex); rw_lock_x_lock(&btr_search_latch); btr_search_enabled = TRUE; - btr_search_fully_disabled = FALSE; rw_lock_x_unlock(&btr_search_latch); - mutex_exit(&btr_search_enabled_mutex); } /*****************************************************************//** @@ -471,7 +472,7 @@ btr_search_update_block_hash_info( && (block->n_bytes == info->n_bytes) && (block->left_side == info->left_side)) { - if ((block->is_hashed) + if ((block->index) && (block->curr_n_fields == info->n_fields) && (block->curr_n_bytes == info->n_bytes) && (block->curr_left_side == info->left_side)) { @@ -500,7 +501,7 @@ btr_search_update_block_hash_info( / BTR_SEARCH_PAGE_BUILD_LIMIT) && (info->n_hash_potential >= BTR_SEARCH_BUILD_LIMIT)) { - if ((!block->is_hashed) + if ((!block->index) || (block->n_hash_helps > 2 * page_get_n_recs(block->frame)) || (block->n_fields != block->curr_n_fields) @@ -532,9 +533,9 @@ btr_search_update_hash_ref( buf_block_t* block, /*!< in: buffer block where cursor positioned */ btr_cur_t* cursor) /*!< in: cursor */ { + dict_index_t* index; ulint fold; - rec_t* rec; - index_id_t index_id; + const rec_t* rec; ut_ad(cursor->flag == BTR_CUR_HASH_FAIL); #ifdef UNIV_SYNC_DEBUG @@ -545,13 +546,15 @@ btr_search_update_hash_ref( ut_ad(page_align(btr_cur_get_rec(cursor)) == buf_block_get_frame(block)); - if (!block->is_hashed) { + index = block->index; + + if (!index) { return; } - ut_a(block->index == cursor->index); - ut_a(!dict_index_is_ibuf(cursor->index)); + ut_a(index == cursor->index); + ut_a(!dict_index_is_ibuf(index)); if ((info->n_hash_potential > 0) && (block->curr_n_fields == info->n_fields) @@ -568,12 +571,11 @@ btr_search_update_hash_ref( return; } - index_id = cursor->index->id; fold = rec_fold(rec, - rec_get_offsets(rec, cursor->index, offsets_, + rec_get_offsets(rec, index, offsets_, ULINT_UNDEFINED, &heap), block->curr_n_fields, - block->curr_n_bytes, index_id); + block->curr_n_bytes, index->id); if (UNIV_LIKELY_NULL(heap)) { mem_heap_free(heap); } @@ -837,7 +839,7 @@ btr_search_guess_on_hash( { buf_pool_t* buf_pool; buf_block_t* block; - rec_t* rec; + const rec_t* rec; ulint fold; index_id_t index_id; #ifdef notdefined @@ -923,7 +925,7 @@ btr_search_guess_on_hash( ut_ad(page_rec_is_user_rec(rec)); - btr_cur_position(index, rec, block, cursor); + btr_cur_position(index, (rec_t*) rec, block, cursor); /* Check the validity of the guess within the page */ @@ -1053,15 +1055,16 @@ btr_search_drop_page_hash_index( retry: rw_lock_s_lock(&btr_search_latch); - page = block->frame; + index = block->index; - if (UNIV_LIKELY(!block->is_hashed)) { + if (UNIV_LIKELY(!index)) { rw_lock_s_unlock(&btr_search_latch); return; } + ut_a(!dict_index_is_ibuf(index)); table = btr_search_sys->hash_index; #ifdef UNIV_SYNC_DEBUG @@ -1072,8 +1075,6 @@ retry: n_fields = block->curr_n_fields; n_bytes = block->curr_n_bytes; - index = block->index; - ut_a(!dict_index_is_ibuf(index)); /* NOTE: The fields of block must not be accessed after releasing btr_search_latch, as the index page might only @@ -1083,6 +1084,7 @@ retry: ut_a(n_fields + n_bytes > 0); + page = block->frame; n_recs = page_get_n_recs(page); /* Calculate and cache fold values into an array for fast deletion @@ -1131,7 +1133,7 @@ next_rec: rw_lock_x_lock(&btr_search_latch); - if (UNIV_UNLIKELY(!block->is_hashed)) { + if (UNIV_UNLIKELY(!block->index)) { /* Someone else has meanwhile dropped the hash index */ goto cleanup; @@ -1159,9 +1161,8 @@ next_rec: ut_a(index->search_info->ref_count > 0); index->search_info->ref_count--; - block->is_hashed = FALSE; block->index = NULL; - + cleanup: #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG if (UNIV_UNLIKELY(block->n_pointers)) { @@ -1187,8 +1188,8 @@ cleanup: } /********************************************************************//** -Drops a page hash index when a page is freed from a fseg to the file system. -Drops possible hash index if the page happens to be in the buffer pool. */ +Drops a possible page hash index when a page is evicted from the buffer pool +or freed in a file segment. */ UNIV_INTERN void btr_search_drop_page_hash_when_freed( @@ -1201,28 +1202,19 @@ btr_search_drop_page_hash_when_freed( buf_block_t* block; mtr_t mtr; - if (!buf_page_peek_if_search_hashed(space, page_no)) { - - return; - } - mtr_start(&mtr); - /* We assume that if the caller has a latch on the page, then the - caller has already dropped the hash index for the page, and we never - get here. Therefore we can acquire the s-latch to the page without - having to fear a deadlock. */ + /* If the caller has a latch on the page, then the caller must + have a x-latch on the page and it must have already dropped + the hash index for the page. Because of the x-latch that we + are possibly holding, we cannot s-latch the page, but must + (recursively) x-latch it, even though we are only reading. */ - block = buf_page_get_gen(space, zip_size, page_no, RW_S_LATCH, NULL, + block = buf_page_get_gen(space, zip_size, page_no, RW_X_LATCH, NULL, BUF_PEEK_IF_IN_POOL, __FILE__, __LINE__, &mtr); - /* Because the buffer pool mutex was released by - buf_page_peek_if_search_hashed(), it is possible that the - block was removed from the buffer pool by another thread - before buf_page_get_gen() got a chance to acquire the buffer - pool mutex again. Thus, we must check for a NULL return. */ - if (UNIV_LIKELY(block != NULL)) { + if (block && block->index) { buf_block_dbg_add_level(block, SYNC_TREE_NODE_FROM_HASH); @@ -1254,7 +1246,6 @@ btr_search_build_page_hash_index( rec_t* next_rec; ulint fold; ulint next_fold; - index_id_t index_id; ulint n_cached; ulint n_recs; ulint* folds; @@ -1268,9 +1259,6 @@ btr_search_build_page_hash_index( ut_ad(index); ut_a(!dict_index_is_ibuf(index)); - table = btr_search_sys->hash_index; - page = buf_block_get_frame(block); - #ifdef UNIV_SYNC_DEBUG ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX)); ut_ad(rw_lock_own(&(block->lock), RW_LOCK_SHARED) @@ -1279,9 +1267,17 @@ btr_search_build_page_hash_index( rw_lock_s_lock(&btr_search_latch); - if (block->is_hashed && ((block->curr_n_fields != n_fields) - || (block->curr_n_bytes != n_bytes) - || (block->curr_left_side != left_side))) { + if (!btr_search_enabled) { + rw_lock_s_unlock(&btr_search_latch); + return; + } + + table = btr_search_sys->hash_index; + page = buf_block_get_frame(block); + + if (block->index && ((block->curr_n_fields != n_fields) + || (block->curr_n_bytes != n_bytes) + || (block->curr_left_side != left_side))) { rw_lock_s_unlock(&btr_search_latch); @@ -1318,7 +1314,7 @@ btr_search_build_page_hash_index( n_cached = 0; - index_id = btr_page_get_index_id(page); + ut_a(index->id == btr_page_get_index_id(page)); rec = page_rec_get_next(page_get_infimum_rec(page)); @@ -1333,7 +1329,7 @@ btr_search_build_page_hash_index( } } - fold = rec_fold(rec, offsets, n_fields, n_bytes, index_id); + fold = rec_fold(rec, offsets, n_fields, n_bytes, index->id); if (left_side) { @@ -1360,7 +1356,7 @@ btr_search_build_page_hash_index( offsets = rec_get_offsets(next_rec, index, offsets, n_fields + (n_bytes > 0), &heap); next_fold = rec_fold(next_rec, offsets, n_fields, - n_bytes, index_id); + n_bytes, index->id); if (fold != next_fold) { /* Insert an entry into the hash index */ @@ -1385,13 +1381,13 @@ btr_search_build_page_hash_index( rw_lock_x_lock(&btr_search_latch); - if (UNIV_UNLIKELY(btr_search_fully_disabled)) { + if (UNIV_UNLIKELY(!btr_search_enabled)) { goto exit_func; } - if (block->is_hashed && ((block->curr_n_fields != n_fields) - || (block->curr_n_bytes != n_bytes) - || (block->curr_left_side != left_side))) { + if (block->index && ((block->curr_n_fields != n_fields) + || (block->curr_n_bytes != n_bytes) + || (block->curr_left_side != left_side))) { goto exit_func; } @@ -1400,11 +1396,10 @@ btr_search_build_page_hash_index( rebuild hash index for a page that is already hashed, we have to take care not to increment the counter in that case. */ - if (!block->is_hashed) { + if (!block->index) { index->search_info->ref_count++; } - block->is_hashed = TRUE; block->n_hash_helps = 0; block->curr_n_fields = n_fields; @@ -1452,14 +1447,15 @@ btr_search_move_or_delete_hash_entries( ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX)); ut_ad(rw_lock_own(&(new_block->lock), RW_LOCK_EX)); #endif /* UNIV_SYNC_DEBUG */ - ut_a(!new_block->is_hashed || new_block->index == index); - ut_a(!block->is_hashed || block->index == index); - ut_a(!(new_block->is_hashed || block->is_hashed) - || !dict_index_is_ibuf(index)); rw_lock_s_lock(&btr_search_latch); - if (new_block->is_hashed) { + ut_a(!new_block->index || new_block->index == index); + ut_a(!block->index || block->index == index); + ut_a(!(new_block->index || block->index) + || !dict_index_is_ibuf(index)); + + if (new_block->index) { rw_lock_s_unlock(&btr_search_latch); @@ -1468,7 +1464,7 @@ btr_search_move_or_delete_hash_entries( return; } - if (block->is_hashed) { + if (block->index) { n_fields = block->curr_n_fields; n_bytes = block->curr_n_bytes; @@ -1505,42 +1501,48 @@ btr_search_update_hash_on_delete( { hash_table_t* table; buf_block_t* block; - rec_t* rec; + const rec_t* rec; ulint fold; - index_id_t index_id; + dict_index_t* index; ulint offsets_[REC_OFFS_NORMAL_SIZE]; mem_heap_t* heap = NULL; rec_offs_init(offsets_); - rec = btr_cur_get_rec(cursor); - block = btr_cur_get_block(cursor); #ifdef UNIV_SYNC_DEBUG ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX)); #endif /* UNIV_SYNC_DEBUG */ - if (!block->is_hashed) { + index = block->index; + + if (!index) { return; } - ut_a(block->index == cursor->index); + ut_a(index == cursor->index); ut_a(block->curr_n_fields + block->curr_n_bytes > 0); - ut_a(!dict_index_is_ibuf(cursor->index)); + ut_a(!dict_index_is_ibuf(index)); table = btr_search_sys->hash_index; - index_id = cursor->index->id; - fold = rec_fold(rec, rec_get_offsets(rec, cursor->index, offsets_, + rec = btr_cur_get_rec(cursor); + + fold = rec_fold(rec, rec_get_offsets(rec, index, offsets_, ULINT_UNDEFINED, &heap), - block->curr_n_fields, block->curr_n_bytes, index_id); + block->curr_n_fields, block->curr_n_bytes, index->id); if (UNIV_LIKELY_NULL(heap)) { mem_heap_free(heap); } + rw_lock_x_lock(&btr_search_latch); - ha_search_and_delete_if_found(table, fold, rec); + if (block->index) { + ut_a(block->index == index); + + ha_search_and_delete_if_found(table, fold, rec); + } rw_lock_x_unlock(&btr_search_latch); } @@ -1558,6 +1560,7 @@ btr_search_update_hash_node_on_insert( { hash_table_t* table; buf_block_t* block; + dict_index_t* index; rec_t* rec; rec = btr_cur_get_rec(cursor); @@ -1568,16 +1571,25 @@ btr_search_update_hash_node_on_insert( ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX)); #endif /* UNIV_SYNC_DEBUG */ - if (!block->is_hashed) { + index = block->index; + + if (!index) { return; } - ut_a(block->index == cursor->index); - ut_a(!dict_index_is_ibuf(cursor->index)); + ut_a(cursor->index == index); + ut_a(!dict_index_is_ibuf(index)); rw_lock_x_lock(&btr_search_latch); + if (!block->index) { + + goto func_exit; + } + + ut_a(block->index == index); + if ((cursor->flag == BTR_CUR_HASH) && (cursor->n_fields == block->curr_n_fields) && (cursor->n_bytes == block->curr_n_bytes) @@ -1588,6 +1600,7 @@ btr_search_update_hash_node_on_insert( ha_search_and_update_if_found(table, cursor->fold, rec, block, page_rec_get_next(rec)); +func_exit: rw_lock_x_unlock(&btr_search_latch); } else { rw_lock_x_unlock(&btr_search_latch); @@ -1609,10 +1622,10 @@ btr_search_update_hash_on_insert( { hash_table_t* table; buf_block_t* block; + dict_index_t* index; rec_t* rec; rec_t* ins_rec; rec_t* next_rec; - index_id_t index_id; ulint fold; ulint ins_fold; ulint next_fold = 0; /* remove warning (??? bug ???) */ @@ -1637,15 +1650,15 @@ btr_search_update_hash_on_insert( ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX)); #endif /* UNIV_SYNC_DEBUG */ - if (!block->is_hashed) { + index = block->index; + + if (!index) { return; } - ut_a(block->index == cursor->index); - ut_a(!dict_index_is_ibuf(cursor->index)); - - index_id = cursor->index->id; + ut_a(index == cursor->index); + ut_a(!dict_index_is_ibuf(index)); n_fields = block->curr_n_fields; n_bytes = block->curr_n_bytes; @@ -1654,21 +1667,21 @@ btr_search_update_hash_on_insert( ins_rec = page_rec_get_next(rec); next_rec = page_rec_get_next(ins_rec); - offsets = rec_get_offsets(ins_rec, cursor->index, offsets, + offsets = rec_get_offsets(ins_rec, index, offsets, ULINT_UNDEFINED, &heap); - ins_fold = rec_fold(ins_rec, offsets, n_fields, n_bytes, index_id); + ins_fold = rec_fold(ins_rec, offsets, n_fields, n_bytes, index->id); if (!page_rec_is_supremum(next_rec)) { - offsets = rec_get_offsets(next_rec, cursor->index, offsets, + offsets = rec_get_offsets(next_rec, index, offsets, n_fields + (n_bytes > 0), &heap); next_fold = rec_fold(next_rec, offsets, n_fields, - n_bytes, index_id); + n_bytes, index->id); } if (!page_rec_is_infimum(rec)) { - offsets = rec_get_offsets(rec, cursor->index, offsets, + offsets = rec_get_offsets(rec, index, offsets, n_fields + (n_bytes > 0), &heap); - fold = rec_fold(rec, offsets, n_fields, n_bytes, index_id); + fold = rec_fold(rec, offsets, n_fields, n_bytes, index->id); } else { if (left_side) { @@ -1676,6 +1689,10 @@ btr_search_update_hash_on_insert( locked = TRUE; + if (!btr_search_enabled) { + goto function_exit; + } + ha_insert_for_fold(table, ins_fold, block, ins_rec); } @@ -1689,6 +1706,10 @@ btr_search_update_hash_on_insert( rw_lock_x_lock(&btr_search_latch); locked = TRUE; + + if (!btr_search_enabled) { + goto function_exit; + } } if (!left_side) { @@ -1707,6 +1728,10 @@ check_next_rec: rw_lock_x_lock(&btr_search_latch); locked = TRUE; + + if (!btr_search_enabled) { + goto function_exit; + } } ha_insert_for_fold(table, ins_fold, block, ins_rec); @@ -1722,6 +1747,10 @@ check_next_rec: rw_lock_x_lock(&btr_search_latch); locked = TRUE; + + if (!btr_search_enabled) { + goto function_exit; + } } if (!left_side) { @@ -1729,7 +1758,7 @@ check_next_rec: ha_insert_for_fold(table, ins_fold, block, ins_rec); /* fputs("Hash insert for ", stderr); - dict_index_name_print(stderr, cursor->index); + dict_index_name_print(stderr, index); fprintf(stderr, " fold %lu\n", ins_fold); */ } else { @@ -1832,21 +1861,20 @@ btr_search_validate(void) ut_a(!dict_index_is_ibuf(block->index)); - offsets = rec_get_offsets((const rec_t*) node->data, + page_index_id = btr_page_get_index_id(block->frame); + + offsets = rec_get_offsets(node->data, block->index, offsets, block->curr_n_fields + (block->curr_n_bytes > 0), &heap); - page_index_id = btr_page_get_index_id(block->frame); - - if (UNIV_UNLIKELY - (!block->is_hashed || node->fold - != rec_fold((rec_t*)(node->data), - offsets, - block->curr_n_fields, - block->curr_n_bytes, - page_index_id))) { + if (!block->index || node->fold + != rec_fold(node->data, + offsets, + block->curr_n_fields, + block->curr_n_bytes, + page_index_id)) { const page_t* page = block->frame; ok = FALSE; @@ -1862,20 +1890,19 @@ btr_search_validate(void) node->data, (ullint) page_index_id, (ulong) node->fold, - (ulong) rec_fold((rec_t*)(node->data), + (ulong) rec_fold(node->data, offsets, block->curr_n_fields, block->curr_n_bytes, page_index_id)); fputs("InnoDB: Record ", stderr); - rec_print_new(stderr, (rec_t*)node->data, - offsets); + rec_print_new(stderr, node->data, offsets); fprintf(stderr, "\nInnoDB: on that page." - " Page mem address %p, is hashed %lu," + " Page mem address %p, is hashed %p," " n fields %lu, n bytes %lu\n" "InnoDB: side %lu\n", - (void*) page, (ulong) block->is_hashed, + (void*) page, (void*) block->index, (ulong) block->curr_n_fields, (ulong) block->curr_n_bytes, (ulong) block->curr_left_side); diff --git a/storage/innobase/buf/buf0buf.c b/storage/innobase/buf/buf0buf.c index 33b4cd40215..b5004a0834c 100644 --- a/storage/innobase/buf/buf0buf.c +++ b/storage/innobase/buf/buf0buf.c @@ -839,6 +839,16 @@ pfs_register_buffer_block( rwlock->pfs_psi = (PSI_server) ? PSI_server->init_rwlock(buf_block_lock_key, rwlock) : NULL; + +# ifdef UNIV_SYNC_DEBUG + rwlock = &block->debug_latch; + ut_a(!rwlock->pfs_psi); + rwlock->pfs_psi = (PSI_server) + ? PSI_server->init_rwlock(buf_block_debug_latch_key, + rwlock) + : NULL; +# endif /* UNIV_SYNC_DEBUG */ + # endif /* UNIV_PFS_RWLOCK */ block++; } @@ -873,8 +883,6 @@ buf_block_init( block->check_index_page_at_flush = FALSE; block->index = NULL; - block->is_hashed = FALSE; - #ifdef UNIV_DEBUG block->page.in_page_hash = FALSE; block->page.in_zip_hash = FALSE; @@ -897,17 +905,24 @@ buf_block_init( mutex_create(PFS_NOT_INSTRUMENTED, &block->mutex, SYNC_BUF_BLOCK); rw_lock_create(PFS_NOT_INSTRUMENTED, &block->lock, SYNC_LEVEL_VARYING); + +# ifdef UNIV_SYNC_DEBUG + rw_lock_create(PFS_NOT_INSTRUMENTED, + &block->debug_latch, SYNC_NO_ORDER_CHECK); +# endif /* UNIV_SYNC_DEBUG */ + #else /* PFS_SKIP_BUFFER_MUTEX_RWLOCK || PFS_GROUP_BUFFER_SYNC */ mutex_create(buffer_block_mutex_key, &block->mutex, SYNC_BUF_BLOCK); rw_lock_create(buf_block_lock_key, &block->lock, SYNC_LEVEL_VARYING); + +# ifdef UNIV_SYNC_DEBUG + rw_lock_create(buf_block_debug_latch_key, + &block->debug_latch, SYNC_NO_ORDER_CHECK); +# endif /* UNIV_SYNC_DEBUG */ #endif /* PFS_SKIP_BUFFER_MUTEX_RWLOCK || PFS_GROUP_BUFFER_SYNC */ ut_ad(rw_lock_validate(&(block->lock))); -#ifdef UNIV_SYNC_DEBUG - rw_lock_create(buf_block_debug_latch_key, - &block->debug_latch, SYNC_NO_ORDER_CHECK); -#endif /* UNIV_SYNC_DEBUG */ } /********************************************************************//** @@ -974,11 +989,8 @@ buf_chunk_init( for (i = chunk->size; i--; ) { buf_block_init(buf_pool, block, frame); + UNIV_MEM_INVALID(block->frame, UNIV_PAGE_SIZE); -#ifdef HAVE_purify - /* Wipe contents of frame to eliminate a Purify warning */ - memset(block->frame, '\0', UNIV_PAGE_SIZE); -#endif /* Add the block to the free list */ UT_LIST_ADD_LAST(list, buf_pool->free, (&block->page)); @@ -1204,6 +1216,26 @@ buf_pool_free_instance( { buf_chunk_t* chunk; buf_chunk_t* chunks; + buf_page_t* bpage; + + bpage = UT_LIST_GET_LAST(buf_pool->LRU); + while (bpage != NULL) { + buf_page_t* prev_bpage = UT_LIST_GET_PREV(LRU, bpage); + enum buf_page_state state = buf_page_get_state(bpage); + + ut_ad(buf_page_in_file(bpage)); + ut_ad(bpage->in_LRU_list); + + if (state != BUF_BLOCK_FILE_PAGE) { + /* We must not have any dirty block except + when doing a fast shutdown. */ + ut_ad(state == BUF_BLOCK_ZIP_PAGE + || srv_fast_shutdown == 2); + buf_page_free_descriptor(bpage); + } + + bpage = prev_bpage; + } chunks = buf_pool->chunks; chunk = chunks + buf_pool->n_chunks; @@ -1279,108 +1311,47 @@ buf_pool_free( } /********************************************************************//** -Drops adaptive hash index for a buffer pool instance. */ -static -void -buf_pool_drop_hash_index_instance( -/*==============================*/ - buf_pool_t* buf_pool, /*!< in: buffer pool instance */ - ibool* released_search_latch) /*!< out: flag for signalling - whether the search latch was - released */ -{ - buf_chunk_t* chunks = buf_pool->chunks; - buf_chunk_t* chunk = chunks + buf_pool->n_chunks; - - while (--chunk >= chunks) { - ulint i; - buf_block_t* block = chunk->blocks; - - for (i = chunk->size; i--; block++) { - /* block->is_hashed cannot be modified - when we have an x-latch on btr_search_latch; - see the comment in buf0buf.h */ - - if (!block->is_hashed) { - continue; - } - - /* To follow the latching order, we - have to release btr_search_latch - before acquiring block->latch. */ - rw_lock_x_unlock(&btr_search_latch); - /* When we release the search latch, - we must rescan all blocks, because - some may become hashed again. */ - *released_search_latch = TRUE; - - rw_lock_x_lock(&block->lock); - - /* This should be guaranteed by the - callers, which will be holding - btr_search_enabled_mutex. */ - ut_ad(!btr_search_enabled); - - /* Because we did not buffer-fix the - block by calling buf_block_get_gen(), - it is possible that the block has been - allocated for some other use after - btr_search_latch was released above. - We do not care which file page the - block is mapped to. All we want to do - is to drop any hash entries referring - to the page. */ - - /* It is possible that - block->page.state != BUF_FILE_PAGE. - Even that does not matter, because - btr_search_drop_page_hash_index() will - check block->is_hashed before doing - anything. block->is_hashed can only - be set on uncompressed file pages. */ - - btr_search_drop_page_hash_index(block); - - rw_lock_x_unlock(&block->lock); - - rw_lock_x_lock(&btr_search_latch); - - ut_ad(!btr_search_enabled); - } - } -} - -/********************************************************************//** -Drops the adaptive hash index. To prevent a livelock, this function -is only to be called while holding btr_search_latch and while -btr_search_enabled == FALSE. */ +Clears the adaptive hash index on all pages in the buffer pool. */ UNIV_INTERN void -buf_pool_drop_hash_index(void) -/*==========================*/ +buf_pool_clear_hash_index(void) +/*===========================*/ { - ibool released_search_latch; + ulint p; #ifdef UNIV_SYNC_DEBUG ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX)); #endif /* UNIV_SYNC_DEBUG */ ut_ad(!btr_search_enabled); - do { - ulint i; + for (p = 0; p < srv_buf_pool_instances; p++) { + buf_pool_t* buf_pool = buf_pool_from_array(p); + buf_chunk_t* chunks = buf_pool->chunks; + buf_chunk_t* chunk = chunks + buf_pool->n_chunks; - released_search_latch = FALSE; + while (--chunk >= chunks) { + buf_block_t* block = chunk->blocks; + ulint i = chunk->size; - for (i = 0; i < srv_buf_pool_instances; i++) { - buf_pool_t* buf_pool; + for (; i--; block++) { + dict_index_t* index = block->index; - buf_pool = buf_pool_from_array(i); + /* We can set block->index = NULL + when we have an x-latch on btr_search_latch; + see the comment in buf0buf.h */ - buf_pool_drop_hash_index_instance( - buf_pool, &released_search_latch); - } + if (!index) { + /* Not hashed */ + continue; + } - } while (released_search_latch); + block->index = NULL; +# if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG + block->n_pointers = 0; +# endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ + } + } + } } /********************************************************************//** @@ -1740,38 +1711,6 @@ buf_reset_check_index_page_at_flush( buf_pool_mutex_exit(buf_pool); } -/********************************************************************//** -Returns the current state of is_hashed of a page. FALSE if the page is -not in the pool. NOTE that this operation does not fix the page in the -pool if it is found there. -@return TRUE if page hash index is built in search system */ -UNIV_INTERN -ibool -buf_page_peek_if_search_hashed( -/*===========================*/ - ulint space, /*!< in: space id */ - ulint offset) /*!< in: page number */ -{ - buf_block_t* block; - ibool is_hashed; - buf_pool_t* buf_pool = buf_pool_get(space, offset); - - buf_pool_mutex_enter(buf_pool); - - block = (buf_block_t*) buf_page_hash_get(buf_pool, space, offset); - - if (!block || buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE) { - is_hashed = FALSE; - } else { - ut_ad(!buf_pool_watch_is_sentinel(buf_pool, &block->page)); - is_hashed = block->is_hashed; - } - - buf_pool_mutex_exit(buf_pool); - - return(is_hashed); -} - #if defined UNIV_DEBUG_FILE_ACCESSES || defined UNIV_DEBUG /********************************************************************//** Sets file_page_was_freed TRUE if the page is found in the buffer pool. @@ -1981,7 +1920,6 @@ buf_block_init_low( block->index = NULL; block->n_hash_helps = 0; - block->is_hashed = FALSE; block->n_fields = 1; block->n_bytes = 0; block->left_side = TRUE; diff --git a/storage/innobase/buf/buf0lru.c b/storage/innobase/buf/buf0lru.c index b5ca21e14a6..510f6eefba5 100644 --- a/storage/innobase/buf/buf0lru.c +++ b/storage/innobase/buf/buf0lru.c @@ -273,7 +273,7 @@ next_page: mutex_enter(&((buf_block_t*) bpage)->mutex); is_fixed = bpage->buf_fix_count > 0 - || !((buf_block_t*) bpage)->is_hashed; + || !((buf_block_t*) bpage)->index; mutex_exit(&((buf_block_t*) bpage)->mutex); if (is_fixed) { @@ -405,7 +405,7 @@ scan_again: if (buf_page_get_state(bpage) != BUF_BLOCK_FILE_PAGE) { /* This is a compressed-only block descriptor. Do nothing. */ - } else if (((buf_block_t*) bpage)->is_hashed) { + } else if (((buf_block_t*) bpage)->index) { ulint page_no; ulint zip_size; @@ -417,7 +417,7 @@ scan_again: mutex_exit(block_mutex); /* Note that the following call will acquire - an S-latch on the page */ + and release an X-latch on the page. */ btr_search_drop_page_hash_when_freed( id, zip_size, page_no); diff --git a/storage/innobase/fsp/fsp0fsp.c b/storage/innobase/fsp/fsp0fsp.c index 3f09732a676..f31e6c03ba1 100644 --- a/storage/innobase/fsp/fsp0fsp.c +++ b/storage/innobase/fsp/fsp0fsp.c @@ -332,7 +332,7 @@ fseg_alloc_free_page_low( inserted there in order, into which direction they go alphabetically: FSP_DOWN, FSP_UP, FSP_NO_DIR */ - mtr_t* mtr); /*!< in: mtr handle */ + mtr_t* mtr); /*!< in/out: mini-transaction */ #endif /* !UNIV_HOTBACKUP */ /**********************************************************************//** @@ -1547,7 +1547,7 @@ fsp_alloc_free_page( ulint zip_size,/*!< in: compressed page size in bytes or 0 for uncompressed pages */ ulint hint, /*!< in: hint of which page would be desirable */ - mtr_t* mtr) /*!< in: mtr handle */ + mtr_t* mtr) /*!< in/out: mini-transaction */ { fsp_header_t* header; fil_addr_t first; @@ -2576,7 +2576,7 @@ fseg_alloc_free_page_low( inserted there in order, into which direction they go alphabetically: FSP_DOWN, FSP_UP, FSP_NO_DIR */ - mtr_t* mtr) /*!< in: mtr handle */ + mtr_t* mtr) /*!< in/out: mini-transaction */ { fsp_header_t* space_header; ulint space_size; @@ -2824,7 +2824,7 @@ fseg_alloc_free_page_general( with fsp_reserve_free_extents, then there is no need to do the check for this individual page */ - mtr_t* mtr) /*!< in: mtr handle */ + mtr_t* mtr) /*!< in/out: mini-transaction */ { fseg_inode_t* inode; ulint space; diff --git a/storage/innobase/ha/ha0ha.c b/storage/innobase/ha/ha0ha.c index 7f11917de0a..65046138275 100644 --- a/storage/innobase/ha/ha0ha.c +++ b/storage/innobase/ha/ha0ha.c @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2009, Innobase Oy. All Rights Reserved. +Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -89,40 +89,6 @@ ha_create_func( } /*************************************************************//** -Empties a hash table and frees the memory heaps. */ -UNIV_INTERN -void -ha_clear( -/*=====*/ - hash_table_t* table) /*!< in, own: hash table */ -{ - ulint i; - ulint n; - - ut_ad(table); - ut_ad(table->magic_n == HASH_TABLE_MAGIC_N); -#ifdef UNIV_SYNC_DEBUG - ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EXCLUSIVE)); -#endif /* UNIV_SYNC_DEBUG */ - -#ifndef UNIV_HOTBACKUP - /* Free the memory heaps. */ - n = table->n_mutexes; - - for (i = 0; i < n; i++) { - mem_heap_free(table->heaps[i]); - } -#endif /* !UNIV_HOTBACKUP */ - - /* Clear the hash table. */ - n = hash_get_n_cells(table); - - for (i = 0; i < n; i++) { - hash_get_nth_cell(table, i)->node = NULL; - } -} - -/*************************************************************//** Inserts an entry into a hash table. If an entry with the same fold number is found, its node is updated to point to the new data, and no new node is inserted. If btr_search_enabled is set to FALSE, we will only allow @@ -140,7 +106,7 @@ ha_insert_for_fold_func( #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG buf_block_t* block, /*!< in: buffer block containing the data */ #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ - void* data) /*!< in: data, must not be NULL */ + const rec_t* data) /*!< in: data, must not be NULL */ { hash_cell_t* cell; ha_node_t* node; @@ -153,7 +119,11 @@ ha_insert_for_fold_func( #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG ut_a(block->frame == page_align(data)); #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ +#ifdef UNIV_SYNC_DEBUG + ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX)); +#endif /* UNIV_SYNC_DEBUG */ ASSERT_HASH_MUTEX_OWN(table, fold); + ut_ad(btr_search_enabled); hash = hash_calc_hash(fold, table); @@ -173,7 +143,6 @@ ha_insert_for_fold_func( prev_block->n_pointers--; block->n_pointers++; } - ut_ad(!btr_search_fully_disabled); # endif /* !UNIV_HOTBACKUP */ prev_node->block = block; @@ -186,13 +155,6 @@ ha_insert_for_fold_func( prev_node = prev_node->next; } - /* We are in the process of disabling hash index, do not add - new chain node */ - if (!btr_search_enabled) { - ut_ad(!btr_search_fully_disabled); - return(TRUE); - } - /* We have to allocate a new chain node */ node = mem_heap_alloc(hash_get_heap(table, fold), sizeof(ha_node_t)); @@ -250,6 +212,10 @@ ha_delete_hash_node( { ut_ad(table); ut_ad(table->magic_n == HASH_TABLE_MAGIC_N); +#ifdef UNIV_SYNC_DEBUG + ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX)); +#endif /* UNIV_SYNC_DEBUG */ + ut_ad(btr_search_enabled); #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG # ifndef UNIV_HOTBACKUP if (table->adaptive) { @@ -272,11 +238,11 @@ ha_search_and_update_if_found_func( /*===============================*/ hash_table_t* table, /*!< in/out: hash table */ ulint fold, /*!< in: folded value of the searched data */ - void* data, /*!< in: pointer to the data */ + const rec_t* data, /*!< in: pointer to the data */ #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG buf_block_t* new_block,/*!< in: block containing new_data */ #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ - void* new_data)/*!< in: new pointer to the data */ + const rec_t* new_data)/*!< in: new pointer to the data */ { ha_node_t* node; @@ -286,6 +252,13 @@ ha_search_and_update_if_found_func( #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG ut_a(new_block->frame == page_align(new_data)); #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ +#ifdef UNIV_SYNC_DEBUG + ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX)); +#endif /* UNIV_SYNC_DEBUG */ + + if (!btr_search_enabled) { + return; + } node = ha_search_with_data(table, fold, data); @@ -322,6 +295,10 @@ ha_remove_all_nodes_to_page( ut_ad(table); ut_ad(table->magic_n == HASH_TABLE_MAGIC_N); ASSERT_HASH_MUTEX_OWN(table, fold); +#ifdef UNIV_SYNC_DEBUG + ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX)); +#endif /* UNIV_SYNC_DEBUG */ + ut_ad(btr_search_enabled); node = ha_chain_get_first(table, fold); diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index af730f57cb5..9a8bd67e8a4 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -2580,7 +2580,6 @@ innobase_change_buffering_inited_ok: /* Get the current high water mark format. */ innobase_file_format_max = (char*) trx_sys_file_format_max_get(); - btr_search_fully_disabled = (!btr_search_enabled); DBUG_RETURN(FALSE); error: DBUG_RETURN(TRUE); @@ -5091,8 +5090,7 @@ no_commit: switch (sql_command) { case SQLCOM_LOAD: - if ((trx->duplicates - & (TRX_DUP_IGNORE | TRX_DUP_REPLACE))) { + if (trx->duplicates) { goto set_max_autoinc; } @@ -5269,14 +5267,15 @@ calc_row_difference( /* The field has changed */ ufield = uvect->fields + n_changed; + UNIV_MEM_INVALID(ufield, sizeof *ufield); /* Let us use a dummy dfield to make the conversion from the MySQL column format to the InnoDB format */ - dict_col_copy_type(prebuilt->table->cols + i, - dfield_get_type(&dfield)); - if (n_len != UNIV_SQL_NULL) { + dict_col_copy_type(prebuilt->table->cols + i, + dfield_get_type(&dfield)); + buf = row_mysql_store_col_in_innobase_format( &dfield, (byte*)buf, @@ -5284,7 +5283,7 @@ calc_row_difference( new_mysql_row_col, col_pack_len, dict_table_is_comp(prebuilt->table)); - dfield_copy_data(&ufield->new_val, &dfield); + dfield_copy(&ufield->new_val, &dfield); } else { dfield_set_null(&ufield->new_val); } @@ -5367,8 +5366,7 @@ ha_innobase::update_row( && table->next_number_field && new_row == table->record[0] && thd_sql_command(user_thd) == SQLCOM_INSERT - && (trx->duplicates & (TRX_DUP_IGNORE | TRX_DUP_REPLACE)) - == TRX_DUP_IGNORE) { + && trx->duplicates) { ulonglong auto_inc; ulonglong col_max_value; @@ -5728,6 +5726,7 @@ ha_innobase::index_read( (byte*) key_ptr, (ulint) key_len, prebuilt->trx); + DBUG_ASSERT(prebuilt->search_tuple->n_fields > 0); } else { /* We position the cursor to the last or the first entry in the index */ @@ -7556,6 +7555,7 @@ ha_innobase::records_in_range( mem_heap_t* heap; DBUG_ENTER("records_in_range"); + DBUG_ASSERT(min_key || max_key); ut_a(prebuilt->trx == thd_to_trx(ha_thd())); @@ -7605,6 +7605,9 @@ ha_innobase::records_in_range( (const uchar*) 0), (ulint) (min_key ? min_key->length : 0), prebuilt->trx); + DBUG_ASSERT(min_key + ? range_start->n_fields > 0 + : range_start->n_fields == 0); row_sel_convert_mysql_key_to_innobase( range_end, (byte*) key_val_buff2, @@ -7613,6 +7616,9 @@ ha_innobase::records_in_range( (const uchar*) 0), (ulint) (max_key ? max_key->length : 0), prebuilt->trx); + DBUG_ASSERT(max_key + ? range_end->n_fields > 0 + : range_end->n_fields == 0); mode1 = convert_search_mode_to_innobase(min_key ? min_key->flag : HA_READ_KEY_EXACT); @@ -8865,6 +8871,7 @@ ha_innobase::extra( break; case HA_EXTRA_RESET_STATE: reset_template(prebuilt); + thd_to_trx(ha_thd())->duplicates = 0; break; case HA_EXTRA_NO_KEYREAD: prebuilt->read_just_key = 0; @@ -8882,19 +8889,18 @@ ha_innobase::extra( parameters below. We must not invoke update_thd() either, because the calling threads may change. CAREFUL HERE, OR MEMORY CORRUPTION MAY OCCUR! */ - case HA_EXTRA_IGNORE_DUP_KEY: + case HA_EXTRA_INSERT_WITH_UPDATE: thd_to_trx(ha_thd())->duplicates |= TRX_DUP_IGNORE; break; + case HA_EXTRA_NO_IGNORE_DUP_KEY: + thd_to_trx(ha_thd())->duplicates &= ~TRX_DUP_IGNORE; + break; case HA_EXTRA_WRITE_CAN_REPLACE: thd_to_trx(ha_thd())->duplicates |= TRX_DUP_REPLACE; break; case HA_EXTRA_WRITE_CANNOT_REPLACE: thd_to_trx(ha_thd())->duplicates &= ~TRX_DUP_REPLACE; break; - case HA_EXTRA_NO_IGNORE_DUP_KEY: - thd_to_trx(ha_thd())->duplicates &= - ~(TRX_DUP_IGNORE | TRX_DUP_REPLACE); - break; default:/* Do nothing */ ; } diff --git a/storage/innobase/ibuf/ibuf0ibuf.c b/storage/innobase/ibuf/ibuf0ibuf.c index 7f6acb2b042..0676a7be0f7 100644 --- a/storage/innobase/ibuf/ibuf0ibuf.c +++ b/storage/innobase/ibuf/ibuf0ibuf.c @@ -3955,7 +3955,7 @@ ibuf_insert_to_index_page( ut_ad(ibuf_inside(mtr)); ut_ad(dtuple_check_typed(entry)); - ut_ad(!buf_block_align(page)->is_hashed); + ut_ad(!buf_block_align(page)->index); if (UNIV_UNLIKELY(dict_table_is_comp(index->table) != (ibool)!!page_is_comp(page))) { diff --git a/storage/innobase/include/btr0pcur.h b/storage/innobase/include/btr0pcur.h index f605c476844..140f94466db 100644 --- a/storage/innobase/include/btr0pcur.h +++ b/storage/innobase/include/btr0pcur.h @@ -263,14 +263,6 @@ btr_pcur_commit_specify_mtr( /*========================*/ btr_pcur_t* pcur, /*!< in: persistent cursor */ mtr_t* mtr); /*!< in: mtr to commit */ -/**************************************************************//** -Tests if a cursor is detached: that is the latch mode is BTR_NO_LATCHES. -@return TRUE if detached */ -UNIV_INLINE -ibool -btr_pcur_is_detached( -/*=================*/ - btr_pcur_t* pcur); /*!< in: persistent cursor */ /*********************************************************//** Moves the persistent cursor to the next record in the tree. If no records are left, the cursor stays 'after last in tree'. diff --git a/storage/innobase/include/btr0pcur.ic b/storage/innobase/include/btr0pcur.ic index d86601e5a32..054ce753c7d 100644 --- a/storage/innobase/include/btr0pcur.ic +++ b/storage/innobase/include/btr0pcur.ic @@ -389,38 +389,6 @@ btr_pcur_commit_specify_mtr( } /**************************************************************//** -Sets the pcur latch mode to BTR_NO_LATCHES. */ -UNIV_INLINE -void -btr_pcur_detach( -/*============*/ - btr_pcur_t* pcur) /*!< in: persistent cursor */ -{ - ut_a(pcur->pos_state == BTR_PCUR_IS_POSITIONED); - - pcur->latch_mode = BTR_NO_LATCHES; - - pcur->pos_state = BTR_PCUR_WAS_POSITIONED; -} - -/**************************************************************//** -Tests if a cursor is detached: that is the latch mode is BTR_NO_LATCHES. -@return TRUE if detached */ -UNIV_INLINE -ibool -btr_pcur_is_detached( -/*=================*/ - btr_pcur_t* pcur) /*!< in: persistent cursor */ -{ - if (pcur->latch_mode == BTR_NO_LATCHES) { - - return(TRUE); - } - - return(FALSE); -} - -/**************************************************************//** Sets the old_rec_buf field to NULL. */ UNIV_INLINE void diff --git a/storage/innobase/include/btr0sea.h b/storage/innobase/include/btr0sea.h index 6493689a969..1f920471f7d 100644 --- a/storage/innobase/include/btr0sea.h +++ b/storage/innobase/include/btr0sea.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2009, Innobase Oy. All Rights Reserved. +Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -141,8 +141,8 @@ btr_search_drop_page_hash_index( for which we know that block->buf_fix_count == 0 */ /********************************************************************//** -Drops a page hash index when a page is freed from a fseg to the file system. -Drops possible hash index if the page happens to be in the buffer pool. */ +Drops a possible page hash index when a page is evicted from the buffer pool +or freed in a file segment. */ UNIV_INTERN void btr_search_drop_page_hash_when_freed( @@ -192,16 +192,6 @@ btr_search_validate(void); # define btr_search_validate() TRUE #endif /* defined UNIV_AHI_DEBUG || defined UNIV_DEBUG */ -/** Flag: has the search system been enabled? -Protected by btr_search_latch and btr_search_enabled_mutex. */ -extern char btr_search_enabled; - -/** Flag: whether the search system has completed its disabling process, -It is set to TRUE right after buf_pool_drop_hash_index() in -btr_search_disable(), indicating hash index entries are cleaned up. -Protected by btr_search_latch and btr_search_enabled_mutex. */ -extern ibool btr_search_fully_disabled; - /** The search info struct in an index */ struct btr_search_struct{ ulint ref_count; /*!< Number of blocks in this index tree @@ -270,24 +260,6 @@ struct btr_search_sys_struct{ /** The adaptive hash index */ extern btr_search_sys_t* btr_search_sys; -/** @brief The latch protecting the adaptive search system - -This latch protects the -(1) hash index; -(2) columns of a record to which we have a pointer in the hash index; - -but does NOT protect: - -(3) next record offset field in a record; -(4) next or previous records on the same page. - -Bear in mind (3) and (4) when using the hash index. -*/ -extern rw_lock_t* btr_search_latch_temp; - -/** The latch protecting the adaptive search system */ -#define btr_search_latch (*btr_search_latch_temp) - #ifdef UNIV_SEARCH_PERF_STAT /** Number of successful adaptive hash index lookups */ extern ulint btr_search_n_succ; diff --git a/storage/innobase/include/btr0types.h b/storage/innobase/include/btr0types.h index 07c06fb18d7..5adc858b931 100644 --- a/storage/innobase/include/btr0types.h +++ b/storage/innobase/include/btr0types.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2009, Innobase Oy. All Rights Reserved. +Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -30,6 +30,7 @@ Created 2/17/1996 Heikki Tuuri #include "rem0types.h" #include "page0types.h" +#include "sync0rw.h" /** Persistent cursor */ typedef struct btr_pcur_struct btr_pcur_t; @@ -38,6 +39,28 @@ typedef struct btr_cur_struct btr_cur_t; /** B-tree search information for the adaptive hash index */ typedef struct btr_search_struct btr_search_t; +/** @brief The latch protecting the adaptive search system + +This latch protects the +(1) hash index; +(2) columns of a record to which we have a pointer in the hash index; + +but does NOT protect: + +(3) next record offset field in a record; +(4) next or previous records on the same page. + +Bear in mind (3) and (4) when using the hash index. +*/ +extern rw_lock_t* btr_search_latch_temp; + +/** The latch protecting the adaptive search system */ +#define btr_search_latch (*btr_search_latch_temp) + +/** Flag: has the search system been enabled? +Protected by btr_search_latch. */ +extern char btr_search_enabled; + #ifdef UNIV_BLOB_DEBUG # include "buf0types.h" /** An index->blobs entry for keeping track of off-page column references */ diff --git a/storage/innobase/include/buf0buf.h b/storage/innobase/include/buf0buf.h index ccebb69a4fe..c0ff7b1766b 100644 --- a/storage/innobase/include/buf0buf.h +++ b/storage/innobase/include/buf0buf.h @@ -229,13 +229,11 @@ buf_pool_free( ulint n_instances); /*!< in: numbere of instances to free */ /********************************************************************//** -Drops the adaptive hash index. To prevent a livelock, this function -is only to be called while holding btr_search_latch and while -btr_search_enabled == FALSE. */ +Clears the adaptive hash index on all pages in the buffer pool. */ UNIV_INTERN void -buf_pool_drop_hash_index(void); -/*==========================*/ +buf_pool_clear_hash_index(void); +/*===========================*/ /********************************************************************//** Relocate a buffer control block. Relocates the block on the LRU list @@ -568,17 +566,6 @@ buf_page_peek_if_too_old( /*=====================*/ const buf_page_t* bpage); /*!< in: block to make younger */ /********************************************************************//** -Returns the current state of is_hashed of a page. FALSE if the page is -not in the pool. NOTE that this operation does not fix the page in the -pool if it is found there. -@return TRUE if page hash index is built in search system */ -UNIV_INTERN -ibool -buf_page_peek_if_search_hashed( -/*===========================*/ - ulint space, /*!< in: space id */ - ulint offset);/*!< in: page number */ -/********************************************************************//** Gets the youngest modification log sequence number for a frame. Returns zero if not file page or no modification occurred yet. @return newest modification to page */ @@ -1526,13 +1513,16 @@ struct buf_block_struct{ /* @} */ /** @name Hash search fields - These 6 fields may only be modified when we have + These 5 fields may only be modified when we have an x-latch on btr_search_latch AND - we are holding an s-latch or x-latch on buf_block_struct::lock or - we know that buf_block_struct::buf_fix_count == 0. An exception to this is when we init or create a page - in the buffer pool in buf0buf.c. */ + in the buffer pool in buf0buf.c. + + Another exception is that assigning block->index = NULL + is allowed whenever holding an x-latch on btr_search_latch. */ /* @{ */ @@ -1541,20 +1531,20 @@ struct buf_block_struct{ pointers in the adaptive hash index pointing to this frame */ #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ - unsigned is_hashed:1; /*!< TRUE if hash index has - already been built on this - page; note that it does not - guarantee that the index is - complete, though: there may - have been hash collisions, - record deletions, etc. */ unsigned curr_n_fields:10;/*!< prefix length for hash indexing: number of full fields */ unsigned curr_n_bytes:15;/*!< number of bytes in hash indexing */ unsigned curr_left_side:1;/*!< TRUE or FALSE in hash indexing */ - dict_index_t* index; /*!< Index for which the adaptive - hash index has been created. */ + dict_index_t* index; /*!< Index for which the + adaptive hash index has been + created, or NULL if the page + does not exist in the + index. Note that it does not + guarantee that the index is + complete, though: there may + have been hash collisions, + record deletions, etc. */ /* @} */ # ifdef UNIV_SYNC_DEBUG /** @name Debug fields */ diff --git a/storage/innobase/include/fsp0fsp.h b/storage/innobase/include/fsp0fsp.h index 7abd3914eda..6e70fbf6f66 100644 --- a/storage/innobase/include/fsp0fsp.h +++ b/storage/innobase/include/fsp0fsp.h @@ -210,7 +210,8 @@ fseg_alloc_free_page_general( with fsp_reserve_free_extents, then there is no need to do the check for this individual page */ - mtr_t* mtr); /*!< in: mtr handle */ + mtr_t* mtr) /*!< in/out: mini-transaction */ + __attribute__((warn_unused_result, nonnull(1,5))); /**********************************************************************//** Reserves free pages from a tablespace. All mini-transactions which may use several pages from the tablespace should call this function beforehand diff --git a/storage/innobase/include/ha0ha.h b/storage/innobase/include/ha0ha.h index 3299000bf3c..8bba564d153 100644 --- a/storage/innobase/include/ha0ha.h +++ b/storage/innobase/include/ha0ha.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2009, Innobase Oy. All Rights Reserved. +Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -31,13 +31,14 @@ Created 8/18/1994 Heikki Tuuri #include "hash0hash.h" #include "page0types.h" #include "buf0types.h" +#include "rem0types.h" /*************************************************************//** Looks for an element in a hash table. @return pointer to the data of the first hash table node in chain having the fold number, NULL if not found */ UNIV_INLINE -void* +const rec_t* ha_search_and_get_data( /*===================*/ hash_table_t* table, /*!< in: hash table */ @@ -51,11 +52,11 @@ ha_search_and_update_if_found_func( /*===============================*/ hash_table_t* table, /*!< in/out: hash table */ ulint fold, /*!< in: folded value of the searched data */ - void* data, /*!< in: pointer to the data */ + const rec_t* data, /*!< in: pointer to the data */ #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG buf_block_t* new_block,/*!< in: block containing new_data */ #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ - void* new_data);/*!< in: new pointer to the data */ + const rec_t* new_data);/*!< in: new pointer to the data */ #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG /** Looks for an element when we know the pointer to the data and @@ -114,14 +115,6 @@ chosen to be a slightly bigger prime number. #endif /* UNIV_SYNC_DEBUG */ /*************************************************************//** -Empties a hash table and frees the memory heaps. */ -UNIV_INTERN -void -ha_clear( -/*=====*/ - hash_table_t* table); /*!< in, own: hash table */ - -/*************************************************************//** Inserts an entry into a hash table. If an entry with the same fold number is found, its node is updated to point to the new data, and no new node is inserted. @@ -138,7 +131,7 @@ ha_insert_for_fold_func( #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG buf_block_t* block, /*!< in: buffer block containing the data */ #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ - void* data); /*!< in: data, must not be NULL */ + const rec_t* data); /*!< in: data, must not be NULL */ #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG /** @@ -174,7 +167,7 @@ ha_search_and_delete_if_found( /*==========================*/ hash_table_t* table, /*!< in: hash table */ ulint fold, /*!< in: folded value of the searched data */ - void* data); /*!< in: pointer to the data */ + const rec_t* data); /*!< in: pointer to the data */ #ifndef UNIV_HOTBACKUP /*****************************************************************//** Removes from the chain determined by fold all nodes whose data pointer @@ -217,7 +210,7 @@ struct ha_node_struct { #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG buf_block_t* block; /*!< buffer block containing the data, or NULL */ #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ - void* data; /*!< pointer to the data */ + const rec_t* data; /*!< pointer to the data */ ulint fold; /*!< fold value for the data */ }; diff --git a/storage/innobase/include/ha0ha.ic b/storage/innobase/include/ha0ha.ic index 734403c4cd9..5656e9b7eba 100644 --- a/storage/innobase/include/ha0ha.ic +++ b/storage/innobase/include/ha0ha.ic @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2009, Innobase Oy. All Rights Reserved. +Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -25,6 +25,7 @@ Created 8/18/1994 Heikki Tuuri #include "ut0rnd.h" #include "mem0mem.h" +#include "btr0types.h" /***********************************************************//** Deletes a hash node. */ @@ -39,10 +40,10 @@ ha_delete_hash_node( Gets a hash node data. @return pointer to the data */ UNIV_INLINE -void* +const rec_t* ha_node_get_data( /*=============*/ - ha_node_t* node) /*!< in: hash chain node */ + const ha_node_t* node) /*!< in: hash chain node */ { return(node->data); } @@ -57,7 +58,7 @@ ha_node_set_data_func( #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG buf_block_t* block, /*!< in: buffer block containing the data */ #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ - void* data) /*!< in: pointer to the data */ + const rec_t* data) /*!< in: pointer to the data */ { #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG node->block = block; @@ -107,39 +108,10 @@ ha_chain_get_first( /*************************************************************//** Looks for an element in a hash table. -@return pointer to the first hash table node in chain having the fold -number, NULL if not found */ -UNIV_INLINE -ha_node_t* -ha_search( -/*======*/ - hash_table_t* table, /*!< in: hash table */ - ulint fold) /*!< in: folded value of the searched data */ -{ - ha_node_t* node; - - ASSERT_HASH_MUTEX_OWN(table, fold); - - node = ha_chain_get_first(table, fold); - - while (node) { - if (node->fold == fold) { - - return(node); - } - - node = ha_chain_get_next(node); - } - - return(NULL); -} - -/*************************************************************//** -Looks for an element in a hash table. @return pointer to the data of the first hash table node in chain having the fold number, NULL if not found */ UNIV_INLINE -void* +const rec_t* ha_search_and_get_data( /*===================*/ hash_table_t* table, /*!< in: hash table */ @@ -148,6 +120,10 @@ ha_search_and_get_data( ha_node_t* node; ASSERT_HASH_MUTEX_OWN(table, fold); +#ifdef UNIV_SYNC_DEBUG + ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_SHARED)); +#endif /* UNIV_SYNC_DEBUG */ + ut_ad(btr_search_enabled); node = ha_chain_get_first(table, fold); @@ -172,12 +148,14 @@ ha_search_with_data( /*================*/ hash_table_t* table, /*!< in: hash table */ ulint fold, /*!< in: folded value of the searched data */ - void* data) /*!< in: pointer to the data */ + const rec_t* data) /*!< in: pointer to the data */ { ha_node_t* node; ASSERT_HASH_MUTEX_OWN(table, fold); + ut_ad(btr_search_enabled); + node = ha_chain_get_first(table, fold); while (node) { @@ -202,11 +180,15 @@ ha_search_and_delete_if_found( /*==========================*/ hash_table_t* table, /*!< in: hash table */ ulint fold, /*!< in: folded value of the searched data */ - void* data) /*!< in: pointer to the data */ + const rec_t* data) /*!< in: pointer to the data */ { ha_node_t* node; ASSERT_HASH_MUTEX_OWN(table, fold); +#ifdef UNIV_SYNC_DEBUG + ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX)); +#endif /* UNIV_SYNC_DEBUG */ + ut_ad(btr_search_enabled); node = ha_search_with_data(table, fold, data); diff --git a/storage/innobase/include/mtr0mtr.ic b/storage/innobase/include/mtr0mtr.ic index 1db4a4bd735..960c25d6051 100644 --- a/storage/innobase/include/mtr0mtr.ic +++ b/storage/innobase/include/mtr0mtr.ic @@ -44,7 +44,6 @@ mtr_start( mtr->log_mode = MTR_LOG_ALL; mtr->modifications = FALSE; - mtr->inside_ibuf = FALSE; mtr->n_log_recs = 0; ut_d(mtr->state = MTR_ACTIVE); diff --git a/storage/innobase/include/row0upd.ic b/storage/innobase/include/row0upd.ic index 11db82f64da..10646241125 100644 --- a/storage/innobase/include/row0upd.ic +++ b/storage/innobase/include/row0upd.ic @@ -28,7 +28,6 @@ Created 12/27/1996 Heikki Tuuri # include "trx0trx.h" # include "trx0undo.h" # include "row0row.h" -# include "btr0sea.h" #endif /* !UNIV_HOTBACKUP */ #include "page0zip.h" diff --git a/storage/innobase/include/sync0sync.h b/storage/innobase/include/sync0sync.h index d9dea0aa63d..b5bf30e758c 100644 --- a/storage/innobase/include/sync0sync.h +++ b/storage/innobase/include/sync0sync.h @@ -670,7 +670,6 @@ or row lock! */ #define SYNC_LOG_FLUSH_ORDER 147 #define SYNC_RECV 168 #define SYNC_WORK_QUEUE 162 -#define SYNC_SEARCH_SYS_CONF 161 /* for assigning btr_search_enabled */ #define SYNC_SEARCH_SYS 160 /* NOTE that if we have a memory heap that can be extended to the buffer pool, its logical level is diff --git a/storage/innobase/page/page0page.c b/storage/innobase/page/page0page.c index 6064d028ae1..102274d66f3 100644 --- a/storage/innobase/page/page0page.c +++ b/storage/innobase/page/page0page.c @@ -215,12 +215,6 @@ page_set_max_trx_id( { page_t* page = buf_block_get_frame(block); #ifndef UNIV_HOTBACKUP - const ibool is_hashed = block->is_hashed; - - if (is_hashed) { - rw_lock_x_lock(&btr_search_latch); - } - ut_ad(!mtr || mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX)); #endif /* !UNIV_HOTBACKUP */ @@ -241,12 +235,6 @@ page_set_max_trx_id( } else { mach_write_to_8(page + (PAGE_HEADER + PAGE_MAX_TRX_ID), trx_id); } - -#ifndef UNIV_HOTBACKUP - if (is_hashed) { - rw_lock_x_unlock(&btr_search_latch); - } -#endif /* !UNIV_HOTBACKUP */ } /************************************************************//** diff --git a/storage/innobase/row/row0ins.c b/storage/innobase/row/row0ins.c index 2925feb2904..6e311ce2e80 100644 --- a/storage/innobase/row/row0ins.c +++ b/storage/innobase/row/row0ins.c @@ -437,11 +437,9 @@ row_ins_cascade_calc_update_vec( dict_table_t* table = foreign->foreign_table; dict_index_t* index = foreign->foreign_index; upd_t* update; - upd_field_t* ufield; dict_table_t* parent_table; dict_index_t* parent_index; upd_t* parent_update; - upd_field_t* parent_ufield; ulint n_fields_updated; ulint parent_field_no; ulint i; @@ -477,13 +475,15 @@ row_ins_cascade_calc_update_vec( dict_index_get_nth_col_no(parent_index, i)); for (j = 0; j < parent_update->n_fields; j++) { - parent_ufield = parent_update->fields + j; + const upd_field_t* parent_ufield + = &parent_update->fields[j]; if (parent_ufield->field_no == parent_field_no) { ulint min_size; const dict_col_t* col; ulint ufield_len; + upd_field_t* ufield; col = dict_index_get_nth_col(index, i); @@ -496,6 +496,8 @@ row_ins_cascade_calc_update_vec( ufield->field_no = dict_table_get_nth_col_pos( table, dict_col_get_no(col)); + + ufield->orig_len = 0; ufield->exp = NULL; ufield->new_val = parent_ufield->new_val; @@ -983,10 +985,9 @@ row_ins_foreign_check_on_constraint( goto nonstandard_exit_func; } - if ((node->is_delete - && (foreign->type & DICT_FOREIGN_ON_DELETE_SET_NULL)) - || (!node->is_delete - && (foreign->type & DICT_FOREIGN_ON_UPDATE_SET_NULL))) { + if (node->is_delete + ? (foreign->type & DICT_FOREIGN_ON_DELETE_SET_NULL) + : (foreign->type & DICT_FOREIGN_ON_UPDATE_SET_NULL)) { /* Build the appropriate update vector which sets foreign->n_fields first fields in rec to SQL NULL */ @@ -995,6 +996,8 @@ row_ins_foreign_check_on_constraint( update->info_bits = 0; update->n_fields = foreign->n_fields; + UNIV_MEM_INVALID(update->fields, + update->n_fields * sizeof *update->fields); for (i = 0; i < foreign->n_fields; i++) { upd_field_t* ufield = &update->fields[i]; @@ -1652,7 +1655,7 @@ row_ins_scan_sec_index_for_duplicate( ulint n_fields_cmp; btr_pcur_t pcur; ulint err = DB_SUCCESS; - unsigned allow_duplicates; + ulint allow_duplicates; mtr_t mtr; mem_heap_t* heap = NULL; ulint offsets_[REC_OFFS_NORMAL_SIZE]; @@ -1683,7 +1686,7 @@ row_ins_scan_sec_index_for_duplicate( btr_pcur_open(index, entry, PAGE_CUR_GE, BTR_SEARCH_LEAF, &pcur, &mtr); - allow_duplicates = thr_get_trx(thr)->duplicates & TRX_DUP_IGNORE; + allow_duplicates = thr_get_trx(thr)->duplicates; /* Scan index records and check if there is a duplicate */ @@ -1817,7 +1820,7 @@ row_ins_duplicate_error_in_clust( sure that in roll-forward we get the same duplicate errors as in original execution */ - if (trx->duplicates & TRX_DUP_IGNORE) { + if (trx->duplicates) { /* If the SQL-query will update or replace duplicate key we will take X-lock for @@ -1861,7 +1864,7 @@ row_ins_duplicate_error_in_clust( offsets = rec_get_offsets(rec, cursor->index, offsets, ULINT_UNDEFINED, &heap); - if (trx->duplicates & TRX_DUP_IGNORE) { + if (trx->duplicates) { /* If the SQL-query will update or replace duplicate key we will take X-lock for diff --git a/storage/innobase/row/row0mysql.c b/storage/innobase/row/row0mysql.c index d0bedd69842..d06411e09f0 100644 --- a/storage/innobase/row/row0mysql.c +++ b/storage/innobase/row/row0mysql.c @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2000, 2010, Innobase Oy. All Rights Reserved. +Copyright (c) 2000, 2011, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -3995,6 +3995,7 @@ end: trx->error_state = DB_SUCCESS; trx_general_rollback_for_mysql(trx, NULL); trx->error_state = DB_SUCCESS; + err = DB_ERROR; goto funct_exit; } diff --git a/storage/innobase/sync/sync0sync.c b/storage/innobase/sync/sync0sync.c index 993d44173ee..81efe50d9bd 100644 --- a/storage/innobase/sync/sync0sync.c +++ b/storage/innobase/sync/sync0sync.c @@ -1216,7 +1216,6 @@ sync_thread_add_level( case SYNC_FILE_FORMAT_TAG: case SYNC_DOUBLEWRITE: case SYNC_SEARCH_SYS: - case SYNC_SEARCH_SYS_CONF: case SYNC_TRX_LOCK_HEAP: case SYNC_KERNEL: case SYNC_IBUF_BITMAP_MUTEX: |