diff options
Diffstat (limited to 'storage/innobase/buf/buf0lru.cc')
-rw-r--r-- | storage/innobase/buf/buf0lru.cc | 160 |
1 files changed, 0 insertions, 160 deletions
diff --git a/storage/innobase/buf/buf0lru.cc b/storage/innobase/buf/buf0lru.cc index 9feebd9a08e..6e2dba77513 100644 --- a/storage/innobase/buf/buf0lru.cc +++ b/storage/innobase/buf/buf0lru.cc @@ -222,166 +222,6 @@ buf_LRU_evict_from_unzip_LRU( } #ifdef BTR_CUR_HASH_ADAPT -/** Attempts to drop page hash index on a batch of pages belonging to a -particular space id. -@param[in] space_id space id -@param[in] arr array of page_no -@param[in] count number of entries in array */ -static -void -buf_LRU_drop_page_hash_batch(ulint space_id, const ulint* arr, ulint count) -{ - ut_ad(count <= BUF_LRU_DROP_SEARCH_SIZE); - - for (const ulint* const end = arr + count; arr != end; ) { - /* While our only caller - buf_LRU_drop_page_hash_for_tablespace() - is being executed for DROP TABLE or similar, - the table cannot be evicted from the buffer pool. */ - btr_search_drop_page_hash_when_freed( - page_id_t(space_id, *arr++)); - } -} - -/******************************************************************//** -When doing a DROP TABLE/DISCARD TABLESPACE we have to drop all page -hash index entries belonging to that table. This function tries to -do that in batch. Note that this is a 'best effort' attempt and does -not guarantee that ALL hash entries will be removed. */ -static -void -buf_LRU_drop_page_hash_for_tablespace( -/*==================================*/ - buf_pool_t* buf_pool, /*!< in: buffer pool instance */ - ulint id) /*!< in: space id */ -{ - ulint* page_arr = static_cast<ulint*>(ut_malloc_nokey( - sizeof(ulint) * BUF_LRU_DROP_SEARCH_SIZE)); - - ulint num_entries = 0; - - buf_pool_mutex_enter(buf_pool); - -scan_again: - for (buf_page_t* bpage = UT_LIST_GET_LAST(buf_pool->LRU); - bpage != NULL; - /* No op */) { - - buf_page_t* prev_bpage = UT_LIST_GET_PREV(LRU, bpage); - - ut_a(buf_page_in_file(bpage)); - - if (buf_page_get_state(bpage) != BUF_BLOCK_FILE_PAGE - || bpage->id.space() != id - || bpage->io_fix != BUF_IO_NONE) { - /* Compressed pages are never hashed. - Skip blocks of other tablespaces. - Skip I/O-fixed blocks (to be dealt with later). */ -next_page: - bpage = prev_bpage; - continue; - } - - buf_block_t* block = reinterpret_cast<buf_block_t*>(bpage); - - mutex_enter(&block->mutex); - - /* This debug check uses a dirty read that could - theoretically cause false positives while - buf_pool_clear_hash_index() is executing. - (Other conflicting access paths to the adaptive hash - index should not be possible, because when a - tablespace is being discarded or dropped, there must - be no concurrect access to the contained tables.) */ - assert_block_ahi_valid(block); - - bool skip = bpage->buf_fix_count > 0 || !block->index; - - mutex_exit(&block->mutex); - - if (skip) { - /* Skip this block, because there are - no adaptive hash index entries - pointing to it, or because we cannot - drop them due to the buffer-fix. */ - goto next_page; - } - - /* Store the page number so that we can drop the hash - index in a batch later. */ - page_arr[num_entries] = bpage->id.page_no(); - ut_a(num_entries < BUF_LRU_DROP_SEARCH_SIZE); - ++num_entries; - - if (num_entries < BUF_LRU_DROP_SEARCH_SIZE) { - goto next_page; - } - - /* Array full. We release the buf_pool->mutex to obey - the latching order. */ - buf_pool_mutex_exit(buf_pool); - - buf_LRU_drop_page_hash_batch(id, page_arr, num_entries); - - num_entries = 0; - - buf_pool_mutex_enter(buf_pool); - - /* Note that we released the buf_pool mutex above - after reading the prev_bpage during processing of a - page_hash_batch (i.e.: when the array was full). - Because prev_bpage could belong to a compressed-only - block, it may have been relocated, and thus the - pointer cannot be trusted. Because bpage is of type - buf_block_t, it is safe to dereference. - - bpage can change in the LRU list. This is OK because - this function is a 'best effort' to drop as many - search hash entries as possible and it does not - guarantee that ALL such entries will be dropped. */ - - /* If, however, bpage has been removed from LRU list - to the free list then we should restart the scan. - bpage->state is protected by buf_pool mutex. */ - if (bpage != NULL - && buf_page_get_state(bpage) != BUF_BLOCK_FILE_PAGE) { - - goto scan_again; - } - } - - buf_pool_mutex_exit(buf_pool); - - /* Drop any remaining batch of search hashed pages. */ - buf_LRU_drop_page_hash_batch(id, page_arr, num_entries); - ut_free(page_arr); -} - -/** Try to drop the adaptive hash index for a tablespace. -@param[in,out] table table -@return whether anything was dropped */ -bool buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table) -{ - for (dict_index_t* index = dict_table_get_first_index(table); - index != NULL; - index = dict_table_get_next_index(index)) { - if (btr_search_info_get_ref_count(btr_search_get_info(index), - index)) { - goto drop_ahi; - } - } - - return false; -drop_ahi: - ulint id = table->space; - for (ulint i = 0; i < srv_buf_pool_instances; i++) { - buf_LRU_drop_page_hash_for_tablespace(buf_pool_from_array(i), - id); - } - - return true; -} - /******************************************************************//** While flushing (or removing dirty) pages from a tablespace we don't want to hog the CPU and resources. Release the buffer pool and block |