summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarko Mäkelä <marko.makela@mariadb.com>2018-02-23 22:26:26 +0200
committerMarko Mäkelä <marko.makela@mariadb.com>2018-06-25 10:34:37 +0300
commit8aecd1496a65b5f48f4c1c372bd53be0bb76526b (patch)
tree0711544cfd8b1ae2625c3a0a38a549d54059df7f
parent375df6fa54b22305ca8f41962b41f330970fb9e9 (diff)
downloadmariadb-git-bb-10.3-MDEV-15058-2.tar.gz
MDEV-15058: Allocate buf_pool staticallybb-10.3-MDEV-15058-2
buf_pool_t::create(): Replaces buf_pool_init(). buf_pool_t::close(): Replaces buf_pool_free(). FIXME: Do not add buf_pool_t::is_initialised()
-rw-r--r--storage/innobase/btr/btr0cur.cc10
-rw-r--r--storage/innobase/btr/btr0sea.cc24
-rw-r--r--storage/innobase/buf/buf0buddy.cc128
-rw-r--r--storage/innobase/buf/buf0buf.cc1003
-rw-r--r--storage/innobase/buf/buf0dblwr.cc2
-rw-r--r--storage/innobase/buf/buf0dump.cc14
-rw-r--r--storage/innobase/buf/buf0flu.cc373
-rw-r--r--storage/innobase/buf/buf0lru.cc407
-rw-r--r--storage/innobase/buf/buf0rea.cc59
-rw-r--r--storage/innobase/fsp/fsp0fsp.cc2
-rw-r--r--storage/innobase/ha/ha0ha.cc2
-rw-r--r--storage/innobase/handler/ha_innodb.cc16
-rw-r--r--storage/innobase/handler/i_s.cc40
-rw-r--r--storage/innobase/include/btr0sea.h2
-rw-r--r--storage/innobase/include/buf0buddy.h12
-rw-r--r--storage/innobase/include/buf0buf.h165
-rw-r--r--storage/innobase/include/buf0buf.ic57
-rw-r--r--storage/innobase/include/buf0flu.ic8
-rw-r--r--storage/innobase/include/buf0lru.h37
-rw-r--r--storage/innobase/include/buf0types.h3
-rw-r--r--storage/innobase/include/srv0srv.h10
-rw-r--r--storage/innobase/lock/lock0lock.cc6
-rw-r--r--storage/innobase/log/log0log.cc2
-rw-r--r--storage/innobase/srv/srv0mon.cc28
-rw-r--r--storage/innobase/srv/srv0srv.cc34
-rw-r--r--storage/innobase/srv/srv0start.cc8
-rw-r--r--storage/innobase/sync/sync0debug.cc4
27 files changed, 1214 insertions, 1242 deletions
diff --git a/storage/innobase/btr/btr0cur.cc b/storage/innobase/btr/btr0cur.cc
index 2d2a161c81f..05d5a664658 100644
--- a/storage/innobase/btr/btr0cur.cc
+++ b/storage/innobase/btr/btr0cur.cc
@@ -1155,7 +1155,7 @@ btr_cur_search_to_nth_level_func(
Free blocks and read IO bandwidth should be prior
for them, when the history list is glowing huge. */
if (lock_intention == BTR_INTENTION_DELETE
- && buf_pool->n_pend_reads
+ && buf_pool.n_pend_reads
&& trx_sys.history_size() > BTR_CUR_FINE_HISTORY_LENGTH) {
mtr_x_lock(dict_index_get_lock(index), mtr);
} else if (dict_index_is_spatial(index)
@@ -2291,7 +2291,7 @@ btr_cur_open_at_index_side_func(
Free blocks and read IO bandwidth should be prior
for them, when the history list is glowing huge. */
if (lock_intention == BTR_INTENTION_DELETE
- && buf_pool->n_pend_reads
+ && buf_pool.n_pend_reads
&& trx_sys.history_size() > BTR_CUR_FINE_HISTORY_LENGTH) {
mtr_x_lock(dict_index_get_lock(index), mtr);
} else {
@@ -2636,7 +2636,7 @@ btr_cur_open_at_rnd_pos_func(
Free blocks and read IO bandwidth should be prior
for them, when the history list is glowing huge. */
if (lock_intention == BTR_INTENTION_DELETE
- && buf_pool->n_pend_reads
+ && buf_pool.n_pend_reads
&& trx_sys.history_size() > BTR_CUR_FINE_HISTORY_LENGTH) {
mtr_x_lock(dict_index_get_lock(index), mtr);
} else {
@@ -6931,7 +6931,7 @@ btr_blob_free(
mtr_commit(mtr);
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
/* Only free the block if it is still allocated to
the same file page. */
@@ -6950,7 +6950,7 @@ btr_blob_free(
}
}
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
}
/** Helper class used while writing blob pages, during insert or update. */
diff --git a/storage/innobase/btr/btr0sea.cc b/storage/innobase/btr/btr0sea.cc
index 3694881a7dc..9d35546b6fd 100644
--- a/storage/innobase/btr/btr0sea.cc
+++ b/storage/innobase/btr/btr0sea.cc
@@ -408,12 +408,12 @@ void btr_search_disable(bool need_mutex)
/** Enable the adaptive hash search system. */
void btr_search_enable()
{
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
if (srv_buf_pool_old_size != srv_buf_pool_size) {
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
return;
}
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
btr_search_x_lock_all();
btr_search_enabled = true;
@@ -1060,7 +1060,7 @@ fail:
/* Increment the page get statistics though we did not really
fix the page: for user info only */
- ++buf_pool->stat.n_page_gets;
+ ++buf_pool.stat.n_page_gets;
return(TRUE);
}
@@ -1069,7 +1069,7 @@ fail:
@param[in,out] block block containing index page, s- or x-latched, or an
index page for which we know that
block->buf_fix_count == 0 or it is an index page which
- has already been removed from the buf_pool->page_hash
+ has already been removed from the buf_pool.page_hash
i.e.: it is in state BUF_BLOCK_REMOVE_HASH */
void btr_search_drop_page_hash_index(buf_block_t* block)
{
@@ -1974,7 +1974,7 @@ btr_search_hash_table_validate(ulint hash_table_id)
rec_offs_init(offsets_);
btr_search_x_lock_all();
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
cell_count = hash_get_n_cells(
btr_search_sys->hash_tables[hash_table_id]);
@@ -1984,13 +1984,13 @@ btr_search_hash_table_validate(ulint hash_table_id)
give other queries a chance to run. */
if ((i != 0) && ((i % chunk_size) == 0)) {
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
btr_search_x_unlock_all();
os_thread_yield();
btr_search_x_lock_all();
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
ulint curr_cell_count = hash_get_n_cells(
btr_search_sys->hash_tables[hash_table_id]);
@@ -2034,7 +2034,7 @@ btr_search_hash_table_validate(ulint hash_table_id)
/* When a block is being freed,
buf_LRU_search_and_free_block() first
removes the block from
- buf_pool->page_hash by calling
+ buf_pool.page_hash by calling
buf_LRU_block_remove_hashed_page().
After that, it invokes
btr_search_drop_page_hash_index() to
@@ -2097,13 +2097,13 @@ btr_search_hash_table_validate(ulint hash_table_id)
/* We release search latches every once in a while to
give other queries a chance to run. */
if (i != 0) {
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
btr_search_x_unlock_all();
os_thread_yield();
btr_search_x_lock_all();
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
ulint curr_cell_count = hash_get_n_cells(
btr_search_sys->hash_tables[hash_table_id]);
@@ -2126,7 +2126,7 @@ btr_search_hash_table_validate(ulint hash_table_id)
}
}
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
btr_search_x_unlock_all();
if (UNIV_LIKELY_NULL(heap)) {
diff --git a/storage/innobase/buf/buf0buddy.cc b/storage/innobase/buf/buf0buddy.cc
index bdb3e96cf37..7842992a552 100644
--- a/storage/innobase/buf/buf0buddy.cc
+++ b/storage/innobase/buf/buf0buddy.cc
@@ -185,26 +185,26 @@ struct CheckZipFree {
static void buf_buddy_list_validate(ulint i)
{
CheckZipFree check(i);
- ut_list_validate(buf_pool->zip_free[i], check);
+ ut_list_validate(buf_pool.zip_free[i], check);
}
/**********************************************************************//**
Debug function to validate that a buffer is indeed free i.e.: in the
zip_free[].
@param[in] buf block to check
-@param[in] i index of buf_pool->zip_free[]
+@param[in] i index of buf_pool.zip_free[]
@return true if free */
static bool buf_buddy_check_free(const buf_buddy_free_t* buf, ulint i)
{
const ulint size = BUF_BUDDY_LOW << i;
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(!ut_align_offset(buf, size));
ut_ad(i >= buf_buddy_get_slot(UNIV_ZIP_SIZE_MIN));
buf_buddy_free_t* itr;
- for (itr = UT_LIST_GET_FIRST(buf_pool->zip_free[i]);
+ for (itr = UT_LIST_GET_FIRST(buf_pool.zip_free[i]);
itr && itr != buf;
itr = UT_LIST_GET_NEXT(list, itr)) {
}
@@ -224,7 +224,7 @@ buf_buddy_is_free(
/*==============*/
buf_buddy_free_t* buf, /*!< in: block to check */
ulint i) /*!< in: index of
- buf_pool->zip_free[] */
+ buf_pool.zip_free[] */
{
#ifdef UNIV_DEBUG
const ulint size = BUF_BUDDY_LOW << i;
@@ -262,51 +262,51 @@ buf_buddy_is_free(
/** Add a block to the head of the appropriate buddy free list.
@param[in,out] buf block to be freed
-@param[in] i index of buf_pool->zip_free[] */
+@param[in] i index of buf_pool.zip_free[] */
UNIV_INLINE
void
buf_buddy_add_to_free(buf_buddy_free_t* buf, ulint i)
{
- ut_ad(mutex_own(&buf_pool->mutex));
- ut_ad(buf_pool->zip_free[i].start != buf);
+ ut_ad(mutex_own(&buf_pool.mutex));
+ ut_ad(buf_pool.zip_free[i].start != buf);
buf_buddy_stamp_free(buf, i);
- UT_LIST_ADD_FIRST(buf_pool->zip_free[i], buf);
+ UT_LIST_ADD_FIRST(buf_pool.zip_free[i], buf);
ut_d(buf_buddy_list_validate(i));
}
/** Remove a block from the appropriate buddy free list.
@param[in,out] buf block to be freed
-@param[in] i index of buf_pool->zip_free[] */
+@param[in] i index of buf_pool.zip_free[] */
UNIV_INLINE
void
buf_buddy_remove_from_free(buf_buddy_free_t* buf, ulint i)
{
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(buf_buddy_check_free(buf, i));
- UT_LIST_REMOVE(buf_pool->zip_free[i], buf);
+ UT_LIST_REMOVE(buf_pool.zip_free[i], buf);
buf_buddy_stamp_nonfree(buf, i);
}
-/** Try to allocate a block from buf_pool->zip_free[].
-@param[in] i index of buf_pool->zip_free[]
-@return allocated block, or NULL if buf_pool->zip_free[] was empty */
+/** Try to allocate a block from buf_pool.zip_free[].
+@param[in] i index of buf_pool.zip_free[]
+@return allocated block, or NULL if buf_pool.zip_free[] was empty */
static buf_buddy_free_t* buf_buddy_alloc_zip(ulint i)
{
buf_buddy_free_t* buf;
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
ut_a(i < BUF_BUDDY_SIZES);
ut_a(i >= buf_buddy_get_slot(UNIV_ZIP_SIZE_MIN));
ut_d(buf_buddy_list_validate(i));
- buf = UT_LIST_GET_FIRST(buf_pool->zip_free[i]);
+ buf = UT_LIST_GET_FIRST(buf_pool.zip_free[i]);
- if (buf_pool->curr_size < buf_pool->old_size
- && UT_LIST_GET_LEN(buf_pool->withdraw)
- < buf_pool->withdraw_target) {
+ if (buf_pool.curr_size < buf_pool.old_size
+ && UT_LIST_GET_LEN(buf_pool.withdraw)
+ < buf_pool.withdraw_target) {
while (buf != NULL
&& buf_frame_will_be_withdrawn(
@@ -357,11 +357,11 @@ buf_buddy_block_free(void* buf)
buf_page_t* bpage;
buf_block_t* block;
- ut_ad(mutex_own(&buf_pool->mutex));
- ut_ad(!mutex_own(&buf_pool->zip_mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
+ ut_ad(!mutex_own(&buf_pool.zip_mutex));
ut_a(!ut_align_offset(buf, srv_page_size));
- HASH_SEARCH(hash, buf_pool->zip_hash, fold, buf_page_t*, bpage,
+ HASH_SEARCH(hash, buf_pool.zip_hash, fold, buf_page_t*, bpage,
ut_ad(buf_page_get_state(bpage) == BUF_BLOCK_MEMORY
&& bpage->in_zip_hash && !bpage->in_page_hash),
((buf_block_t*) bpage)->frame == buf);
@@ -370,7 +370,7 @@ buf_buddy_block_free(void* buf)
ut_ad(!bpage->in_page_hash);
ut_ad(bpage->in_zip_hash);
ut_d(bpage->in_zip_hash = FALSE);
- HASH_DELETE(buf_page_t, hash, buf_pool->zip_hash, fold, bpage);
+ HASH_DELETE(buf_page_t, hash, buf_pool.zip_hash, fold, bpage);
ut_d(memset(buf, 0, srv_page_size));
UNIV_MEM_INVALID(buf, srv_page_size);
@@ -380,8 +380,8 @@ buf_buddy_block_free(void* buf)
buf_LRU_block_free_non_file_page(block);
buf_page_mutex_exit(block);
- ut_ad(buf_pool->buddy_n_frames > 0);
- ut_d(buf_pool->buddy_n_frames--);
+ ut_ad(buf_pool.buddy_n_frames > 0);
+ ut_d(buf_pool.buddy_n_frames--);
}
/**********************************************************************//**
@@ -393,8 +393,8 @@ buf_buddy_block_register(
buf_block_t* block) /*!< in: buffer frame to allocate */
{
const ulint fold = BUF_POOL_ZIP_FOLD(block);
- ut_ad(mutex_own(&buf_pool->mutex));
- ut_ad(!mutex_own(&buf_pool->zip_mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
+ ut_ad(!mutex_own(&buf_pool.zip_mutex));
ut_ad(buf_block_get_state(block) == BUF_BLOCK_READY_FOR_USE);
buf_block_set_state(block, BUF_BLOCK_MEMORY);
@@ -405,15 +405,15 @@ buf_buddy_block_register(
ut_ad(!block->page.in_page_hash);
ut_ad(!block->page.in_zip_hash);
ut_d(block->page.in_zip_hash = TRUE);
- HASH_INSERT(buf_page_t, hash, buf_pool->zip_hash, fold, &block->page);
+ HASH_INSERT(buf_page_t, hash, buf_pool.zip_hash, fold, &block->page);
- ut_d(buf_pool->buddy_n_frames++);
+ ut_d(buf_pool.buddy_n_frames++);
}
/** Allocate a block from a bigger object.
@param[in] buf a block that is free to use
-@param[in] i index of buf_pool->zip_free[]
-@param[in] j size of buf as an index of buf_pool->zip_free[]
+@param[in] i index of buf_pool.zip_free[]
+@param[in] j size of buf as an index of buf_pool.zip_free[]
@return allocated block */
static
void*
@@ -442,15 +442,15 @@ buf_buddy_alloc_from(void* buf, ulint i, ulint j)
}
/** Allocate a block.
-@param[in] i index of buf_pool->zip_free[] or BUF_BUDDY_SIZES
-@param[out] lru whether buf_pool->mutex was temporarily released
+@param[in] i index of buf_pool.zip_free[] or BUF_BUDDY_SIZES
+@param[out] lru whether buf_pool.mutex was temporarily released
@return allocated block, never NULL */
byte* buf_buddy_alloc_low(ulint i, bool& lru)
{
buf_block_t* block;
- ut_ad(mutex_own(&buf_pool->mutex));
- ut_ad(!mutex_own(&buf_pool->zip_mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
+ ut_ad(!mutex_own(&buf_pool.zip_mutex));
ut_ad(i >= buf_buddy_get_slot(UNIV_ZIP_SIZE_MIN));
if (i < BUF_BUDDY_SIZES) {
@@ -462,7 +462,7 @@ byte* buf_buddy_alloc_low(ulint i, bool& lru)
}
}
- /* Try allocating from the buf_pool->free list. */
+ /* Try allocating from the buf_pool.free list. */
block = buf_LRU_get_free_only();
if (block) {
@@ -470,10 +470,10 @@ byte* buf_buddy_alloc_low(ulint i, bool& lru)
}
/* Try replacing an uncompressed page in the buffer pool. */
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
block = buf_LRU_get_free_block();
lru = true;
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
alloc_big:
buf_buddy_block_register(block);
@@ -482,7 +482,7 @@ alloc_big:
block->frame, i, BUF_BUDDY_SIZES);
func_exit:
- buf_pool->buddy_stat[i].used++;
+ buf_pool.buddy_stat[i].used++;
return reinterpret_cast<byte*>(block);
}
@@ -490,7 +490,7 @@ func_exit:
function will release and lock it again.
@param[in] src block to relocate
@param[in] dst free block to relocated to
-@param[in] i index of buf_pool->zip_free[]
+@param[in] i index of buf_pool.zip_free[]
@param[in] force true if we must relocated always
@return true if relocated */
static bool buf_buddy_relocate(void* src, void* dst, ulint i, bool force)
@@ -500,8 +500,8 @@ static bool buf_buddy_relocate(void* src, void* dst, ulint i, bool force)
ulint space;
ulint offset;
- ut_ad(mutex_own(&buf_pool->mutex));
- ut_ad(!mutex_own(&buf_pool->zip_mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
+ ut_ad(!mutex_own(&buf_pool.zip_mutex));
ut_ad(!ut_align_offset(src, size));
ut_ad(!ut_align_offset(dst, size));
ut_ad(i >= buf_buddy_get_slot(UNIV_ZIP_SIZE_MIN));
@@ -530,7 +530,7 @@ static bool buf_buddy_relocate(void* src, void* dst, ulint i, bool force)
if (!bpage || bpage->zip.data != src) {
/* The block has probably been freshly
allocated by buf_LRU_get_free_block() but not
- added to buf_pool->page_hash yet. Obviously,
+ added to buf_pool.page_hash yet. Obviously,
it cannot be relocated. */
rw_lock_x_unlock(hash_lock);
@@ -542,7 +542,7 @@ static bool buf_buddy_relocate(void* src, void* dst, ulint i, bool force)
/* It might be just uninitialized page.
We should search from LRU list also. */
- bpage = UT_LIST_GET_FIRST(buf_pool->LRU);
+ bpage = UT_LIST_GET_FIRST(buf_pool.LRU);
while (bpage != NULL) {
if (bpage->zip.data == src) {
hash_lock = buf_page_hash_lock_get(bpage->id);
@@ -592,7 +592,7 @@ static bool buf_buddy_relocate(void* src, void* dst, ulint i, bool force)
buf_buddy_mem_invalid(
reinterpret_cast<buf_buddy_free_t*>(src), i);
- buf_buddy_stat_t* buddy_stat = &buf_pool->buddy_stat[i];
+ buf_buddy_stat_t* buddy_stat = &buf_pool.buddy_stat[i];
buddy_stat->relocated++;
buddy_stat->relocated_usec += ut_time_us(NULL) - usec;
return(true);
@@ -607,18 +607,18 @@ static bool buf_buddy_relocate(void* src, void* dst, ulint i, bool force)
/** Deallocate a block.
@param[in] buf block to be freed, must not be pointed to
by the buffer pool
-@param[in] i index of buf_pool->zip_free[], or BUF_BUDDY_SIZES */
+@param[in] i index of buf_pool.zip_free[], or BUF_BUDDY_SIZES */
void buf_buddy_free_low(void* buf, ulint i)
{
buf_buddy_free_t* buddy;
- ut_ad(mutex_own(&buf_pool->mutex));
- ut_ad(!mutex_own(&buf_pool->zip_mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
+ ut_ad(!mutex_own(&buf_pool.zip_mutex));
ut_ad(i <= BUF_BUDDY_SIZES);
ut_ad(i >= buf_buddy_get_slot(UNIV_ZIP_SIZE_MIN));
- ut_ad(buf_pool->buddy_stat[i].used > 0);
-
- buf_pool->buddy_stat[i].used--;
+ ut_ad(buf_pool.buddy_stat[i].used > 0);
+ /* FIXME: use atomic operation */
+ buf_pool.buddy_stat[i].used--;
recombine:
UNIV_MEM_ALLOC(buf, BUF_BUDDY_LOW << i);
@@ -634,8 +634,8 @@ recombine:
/* Do not recombine blocks if there are few free blocks.
We may waste up to 15360*max_len bytes to free blocks
(1024 + 2048 + 4096 + 8192 = 15360) */
- if (UT_LIST_GET_LEN(buf_pool->zip_free[i]) < 16
- && buf_pool->curr_size >= buf_pool->old_size) {
+ if (UT_LIST_GET_LEN(buf_pool.zip_free[i]) < 16
+ && buf_pool.curr_size >= buf_pool.old_size) {
goto func_exit;
}
@@ -661,7 +661,7 @@ buddy_is_free:
/* The buddy is not free. Is there a free block of
this size? */
if (buf_buddy_free_t* zip_buf =
- UT_LIST_GET_FIRST(buf_pool->zip_free[i])) {
+ UT_LIST_GET_FIRST(buf_pool.zip_free[i])) {
/* Remove the block from the free list, because
a successful buf_buddy_relocate() will overwrite
@@ -699,8 +699,8 @@ buf_buddy_realloc(void* buf, ulint size)
buf_block_t* block = NULL;
ulint i = buf_buddy_get_slot(size);
- ut_ad(mutex_own(&buf_pool->mutex));
- ut_ad(!mutex_own(&buf_pool->zip_mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
+ ut_ad(!mutex_own(&buf_pool.zip_mutex));
ut_ad(i <= BUF_BUDDY_SIZES);
ut_ad(i >= buf_buddy_get_slot(UNIV_ZIP_SIZE_MIN));
@@ -710,7 +710,7 @@ buf_buddy_realloc(void* buf, ulint size)
}
if (block == NULL) {
- /* Try allocating from the buf_pool->free list. */
+ /* Try allocating from the buf_pool.free list. */
block = buf_LRU_get_free_only();
if (block == NULL) {
@@ -724,7 +724,7 @@ buf_buddy_realloc(void* buf, ulint size)
block->frame, i, BUF_BUDDY_SIZES));
}
- buf_pool->buddy_stat[i].used++;
+ buf_pool.buddy_stat[i].used++;
/* Try to relocate the buddy of buf to the free block. */
if (buf_buddy_relocate(buf, block, i, true)) {
@@ -741,12 +741,12 @@ buf_buddy_realloc(void* buf, ulint size)
/** Combine all pairs of free buddies. */
void buf_buddy_condense_free()
{
- ut_ad(mutex_own(&buf_pool->mutex));
- ut_ad(buf_pool->curr_size < buf_pool->old_size);
+ ut_ad(mutex_own(&buf_pool.mutex));
+ ut_ad(buf_pool.curr_size < buf_pool.old_size);
- for (ulint i = 0; i < UT_ARR_SIZE(buf_pool->zip_free); ++i) {
+ for (ulint i = 0; i < UT_ARR_SIZE(buf_pool.zip_free); ++i) {
buf_buddy_free_t* buf =
- UT_LIST_GET_FIRST(buf_pool->zip_free[i]);
+ UT_LIST_GET_FIRST(buf_pool.zip_free[i]);
/* seek to withdraw target */
while (buf != NULL
@@ -785,7 +785,7 @@ void buf_buddy_condense_free()
/* Both buf and buddy are free.
Try to combine them. */
buf_buddy_remove_from_free(buf, i);
- buf_pool->buddy_stat[i].used++;
+ buf_pool.buddy_stat[i].used++;
buf_buddy_free_low(buf, i);
}
diff --git a/storage/innobase/buf/buf0buf.cc b/storage/innobase/buf/buf0buf.cc
index ce8f9de4f49..b010701cab7 100644
--- a/storage/innobase/buf/buf0buf.cc
+++ b/storage/innobase/buf/buf0buf.cc
@@ -146,21 +146,18 @@ in the file along with the file page, resides in the control block.
The buffer buf_pool contains a single mutex which protects all the
control data structures of the buf_pool. The content of a buffer frame is
protected by a separate read-write lock in its control block, though.
-These locks can be locked and unlocked without owning the buf_pool->mutex.
+These locks can be locked and unlocked without owning the buf_pool.mutex.
The OS events in the buf_pool struct can be waited for without owning the
-buf_pool->mutex.
+buf_pool.mutex.
-The buf_pool->mutex is a hot-spot in main memory, causing a lot of
+The buf_pool.mutex is a hot-spot in main memory, causing a lot of
memory bus traffic on multiprocessor systems when processors
-alternately access the mutex. On our Pentium, the mutex is accessed
-maybe every 10 microseconds. We gave up the solution to have mutexes
-for each control block, for instance, because it seemed to be
-complicated.
+alternately access the mutex.
-A solution to reduce mutex contention of the buf_pool->mutex is to
+A solution to reduce mutex contention of the buf_pool.mutex is to
create a separate mutex for the page hash table. On Pentium,
accessing the hash table takes 2 microseconds, about half
-of the total buf_pool->mutex hold time.
+of the total buf_pool.mutex hold time.
Control blocks
--------------
@@ -204,7 +201,7 @@ in the database, using tables whose size is a power of 2.
There are several lists of control blocks.
-The free list (buf_pool->free) contains blocks which are currently not
+The free list (buf_pool.free) contains blocks which are currently not
used.
The common LRU list contains all the blocks holding a file page
@@ -230,20 +227,20 @@ holds. The blocks in unzip_LRU will be in same order as they are in
the common LRU list. That is, each manipulation of the common LRU
list will result in the same manipulation of the unzip_LRU list.
-The chain of modified blocks (buf_pool->flush_list) contains the blocks
+The chain of modified blocks (buf_pool.flush_list) contains the blocks
holding file pages that have been modified in the memory
but not written to disk yet. The block with the oldest modification
which has not yet been written to disk is at the end of the chain.
-The access to this list is protected by buf_pool->flush_list_mutex.
+The access to this list is protected by buf_pool.flush_list_mutex.
-The chain of unmodified compressed blocks (buf_pool->zip_clean)
+The chain of unmodified compressed blocks (buf_pool.zip_clean)
contains the control blocks (buf_page_t) of those compressed pages
-that are not in buf_pool->flush_list and for which no uncompressed
+that are not in buf_pool.flush_list and for which no uncompressed
page has been allocated in the buffer pool. The control blocks for
uncompressed pages are accessible via buf_block_t objects that are
-reachable via buf_pool->chunks[].
+reachable via buf_pool.chunks[].
-The chains of free memory blocks (buf_pool->zip_free[]) are used by
+The chains of free memory blocks (buf_pool.zip_free[]) are used by
the buddy allocator (buf0buddy.cc) to keep track of currently unused
memory blocks of size sizeof(buf_page_t)..srv_page_size / 2. These
blocks are inside the srv_page_size-sized memory blocks of type
@@ -314,8 +311,8 @@ static const ulint BUF_READ_AHEAD_PAGES = 64;
read-ahead buffer. (Divide buf_pool size by this amount) */
static const ulint BUF_READ_AHEAD_PORTION = 32;
-/** The buffer pool of the database */
-buf_pool_t* buf_pool;
+/** The InnoDB buffer pool */
+buf_pool_t buf_pool;
/** true when resizing buffer pool is in the critical path. */
volatile bool buf_pool_resizing;
@@ -386,8 +383,8 @@ on the io_type */
@return reserved buffer slot */
static buf_tmp_buffer_t* buf_pool_reserve_tmp_slot()
{
- for (ulint i = 0; i < buf_pool->tmp_arr->n_slots; i++) {
- buf_tmp_buffer_t* slot = &buf_pool->tmp_arr->slots[i];
+ for (ulint i = 0; i < buf_pool.tmp_arr->n_slots; i++) {
+ buf_tmp_buffer_t* slot = &buf_pool.tmp_arr->slots[i];
if (slot->acquire()) {
return slot;
}
@@ -533,7 +530,7 @@ decrypt_failed:
lsn_t
buf_pool_get_oldest_modification()
{
- mutex_enter(&buf_pool->flush_list_mutex);
+ mutex_enter(&buf_pool.flush_list_mutex);
buf_page_t* bpage;
@@ -541,14 +538,14 @@ buf_pool_get_oldest_modification()
list. We would only need to write out temporary pages if the
page is about to be evicted from the buffer pool, and the page
contents is still needed (the page has not been freed). */
- for (bpage = UT_LIST_GET_LAST(buf_pool->flush_list);
+ for (bpage = UT_LIST_GET_LAST(buf_pool.flush_list);
bpage != NULL && fsp_is_system_temporary(bpage->id.space());
bpage = UT_LIST_GET_PREV(list, bpage)) {
ut_ad(bpage->in_flush_list);
}
lsn_t oldest_lsn = bpage ? bpage->oldest_modification : 0;
- mutex_exit(&buf_pool->flush_list_mutex);
+ mutex_exit(&buf_pool.flush_list_mutex);
/* The returned answer may be out of date: the flush_list can
change after the mutex has been released. */
@@ -1158,14 +1155,14 @@ buf_madvise_do_dump()
ret+= madvise(recv_sys->buf, recv_sys->len, MADV_DODUMP);
}
- mutex_enter(&buf_pool->mutex);
- buf_chunk_t* chunk = buf_pool->chunks;
+ mutex_enter(&buf_pool.mutex);
+ buf_chunk_t* chunk = buf_pool.chunks;
- for (ulint n = buf_pool->n_chunks; n--; chunk++) {
+ for (ulint n = buf_pool.n_chunks; n--; chunk++) {
ret+= madvise(chunk->mem, chunk->mem_size(), MADV_DODUMP);
}
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
return ret;
}
#endif
@@ -1492,7 +1489,7 @@ static buf_chunk_t* buf_chunk_init(buf_chunk_t* chunk, ulint mem_size)
DBUG_EXECUTE_IF("ib_buf_chunk_init_fails", return(NULL););
- chunk->mem = buf_pool->allocator.allocate_large(mem_size,
+ chunk->mem = buf_pool.allocator.allocate_large(mem_size,
&chunk->mem_pfx, true);
if (UNIV_UNLIKELY(chunk->mem == NULL)) {
@@ -1537,7 +1534,7 @@ static buf_chunk_t* buf_chunk_init(buf_chunk_t* chunk, ulint mem_size)
UNIV_MEM_INVALID(block->frame, srv_page_size);
/* Add the block to the free list */
- UT_LIST_ADD_LAST(buf_pool->free, &block->page);
+ UT_LIST_ADD_LAST(buf_pool.free, &block->page);
ut_d(block->page.in_free_list = TRUE);
@@ -1586,10 +1583,10 @@ buf_chunk_contains_zip(
@retval NULL if not found */
buf_block_t* buf_pool_contains_zip(const void* data)
{
- buf_chunk_t* chunk = buf_pool->chunks;
+ buf_chunk_t* chunk = buf_pool.chunks;
- ut_ad(mutex_own(&buf_pool->mutex));
- for (ulint n = buf_pool->n_chunks; n--; chunk++) {
+ ut_ad(mutex_own(&buf_pool.mutex));
+ for (ulint n = buf_pool.n_chunks; n--; chunk++) {
if (buf_block_t* block = buf_chunk_contains_zip(chunk, data)) {
return(block);
}
@@ -1665,64 +1662,60 @@ buf_chunk_not_freed(
/** Create the buffer pool.
@return whether the creation failed */
-bool buf_pool_init()
+bool buf_pool_t::create()
{
ulint i;
ulint chunk_size;
buf_chunk_t* chunk;
+ ut_ad(this == &buf_pool);
+ ut_ad(!m_initialised);
ut_ad(srv_buf_pool_size % srv_buf_pool_chunk_unit == 0);
- ut_ad(!buf_pool);
buf_pool_resizing = false;
buf_pool_withdrawing = false;
buf_withdraw_clock = 0;
buf_chunk_map_reg = UT_NEW_NOKEY(buf_pool_chunk_map_t());
- buf_pool = (buf_pool_t*) ut_zalloc_nokey(sizeof *buf_pool);
/* 1. Initialize general fields
------------------------------- */
- mutex_create(LATCH_ID_BUF_POOL, &buf_pool->mutex);
+ mutex_create(LATCH_ID_BUF_POOL, &mutex);
+ mutex_create(LATCH_ID_BUF_POOL_ZIP, &zip_mutex);
- mutex_create(LATCH_ID_BUF_POOL_ZIP, &buf_pool->zip_mutex);
+ new(&allocator) ut_allocator<unsigned char>(mem_key_buf_buf_pool);
- new(&buf_pool->allocator)
- ut_allocator<unsigned char>(mem_key_buf_buf_pool);
-
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&mutex);
if (srv_buf_pool_size > 0) {
- buf_pool->n_chunks
- = srv_buf_pool_size / srv_buf_pool_chunk_unit;
+ n_chunks = srv_buf_pool_size / srv_buf_pool_chunk_unit;
chunk_size = srv_buf_pool_chunk_unit;
- buf_pool->chunks =
- reinterpret_cast<buf_chunk_t*>(ut_zalloc_nokey(
- buf_pool->n_chunks * sizeof(*chunk)));
- buf_pool->chunks_old = NULL;
+ chunks = reinterpret_cast<buf_chunk_t*>(
+ ut_zalloc_nokey(n_chunks * sizeof *chunk));
+ chunks_old = NULL;
- UT_LIST_INIT(buf_pool->LRU, &buf_page_t::LRU);
- UT_LIST_INIT(buf_pool->free, &buf_page_t::list);
- UT_LIST_INIT(buf_pool->withdraw, &buf_page_t::list);
- buf_pool->withdraw_target = 0;
- UT_LIST_INIT(buf_pool->flush_list, &buf_page_t::list);
- UT_LIST_INIT(buf_pool->unzip_LRU, &buf_block_t::unzip_LRU);
+ UT_LIST_INIT(LRU, &buf_page_t::LRU);
+ UT_LIST_INIT(free, &buf_page_t::list);
+ UT_LIST_INIT(withdraw, &buf_page_t::list);
+ withdraw_target = 0;
+ UT_LIST_INIT(flush_list, &buf_page_t::list);
+ UT_LIST_INIT(unzip_LRU, &buf_block_t::unzip_LRU);
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
- UT_LIST_INIT(buf_pool->zip_clean, &buf_page_t::list);
+ UT_LIST_INIT(zip_clean, &buf_page_t::list);
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
- for (i = 0; i < UT_ARR_SIZE(buf_pool->zip_free); ++i) {
+ for (i = 0; i < UT_ARR_SIZE(zip_free); ++i) {
UT_LIST_INIT(
- buf_pool->zip_free[i], &buf_buddy_free_t::list);
+ zip_free[i], &buf_buddy_free_t::list);
}
- buf_pool->curr_size = 0;
- chunk = buf_pool->chunks;
+ curr_size = 0;
+ chunk = chunks;
do {
if (!buf_chunk_init(chunk, chunk_size)) {
- while (--chunk >= buf_pool->chunks) {
+ while (--chunk >= chunks) {
buf_block_t* block = chunk->blocks;
for (i = chunk->size; i--; block++) {
@@ -1730,31 +1723,30 @@ bool buf_pool_init()
rw_lock_free(&block->lock);
ut_d(rw_lock_free(
- &block->debug_latch));
+ &block->debug_latch));
}
- buf_pool->allocator.deallocate_large(
+ allocator.deallocate_large(
chunk->mem, &chunk->mem_pfx, chunk->mem_size(),
true);
}
- ut_free(buf_pool->chunks);
- mutex_exit(&buf_pool->mutex);
+ ut_free(chunks);
+ mutex_exit(&mutex);
return true;
}
- buf_pool->curr_size += chunk->size;
- } while (++chunk < buf_pool->chunks + buf_pool->n_chunks);
+ curr_size += chunk->size;
+ } while (++chunk < chunks + n_chunks);
- buf_pool->read_ahead_area =
+ read_ahead_area =
ut_min(BUF_READ_AHEAD_PAGES,
- ut_2_power_up(buf_pool->curr_size /
+ ut_2_power_up(curr_size /
BUF_READ_AHEAD_PORTION));
- buf_pool->curr_pool_size = buf_pool->curr_size
- << srv_page_size_shift;
+ curr_pool_size = curr_size << srv_page_size_shift;
- buf_pool->old_size = buf_pool->curr_size;
- buf_pool->n_chunks_new = buf_pool->n_chunks;
+ old_size = curr_size;
+ n_chunks_new = n_chunks;
/* Number of locks protecting page_hash must be a
power of two */
@@ -1763,86 +1755,73 @@ bool buf_pool_init()
ut_a(srv_n_page_hash_locks != 0);
ut_a(srv_n_page_hash_locks <= MAX_PAGE_HASH_LOCKS);
- buf_pool->page_hash = ib_create(
- 2 * buf_pool->curr_size,
+ page_hash = ib_create(
+ 2 * curr_size,
LATCH_ID_HASH_TABLE_RW_LOCK,
srv_n_page_hash_locks, MEM_HEAP_FOR_PAGE_HASH);
- buf_pool->page_hash_old = NULL;
+ page_hash_old = NULL;
- buf_pool->zip_hash = hash_create(2 * buf_pool->curr_size);
+ zip_hash = hash_create(2 * curr_size);
- buf_pool->last_printout_time = ut_time();
+ last_printout_time = ut_time();
}
/* 2. Initialize flushing fields
-------------------------------- */
- mutex_create(LATCH_ID_FLUSH_LIST, &buf_pool->flush_list_mutex);
+ mutex_create(LATCH_ID_FLUSH_LIST, &flush_list_mutex);
for (i = BUF_FLUSH_LRU; i < BUF_FLUSH_N_TYPES; i++) {
- buf_pool->no_flush[i] = os_event_create(0);
+ no_flush[i] = os_event_create(0);
}
- buf_pool->watch = (buf_page_t*) ut_zalloc_nokey(
- sizeof(*buf_pool->watch) * BUF_POOL_WATCH_SIZE);
+ watch = (buf_page_t*) ut_zalloc_nokey(
+ sizeof(*watch) * BUF_POOL_WATCH_SIZE);
/* All fields are initialized by ut_zalloc_nokey(). */
- buf_pool->try_LRU_scan = TRUE;
-
- /* Initialize the hazard pointer for flush_list batches */
- new(&buf_pool->flush_hp) FlushHp(&buf_pool->flush_list_mutex);
-
- /* Initialize the hazard pointer for LRU batches */
- new(&buf_pool->lru_hp) LRUHp(&buf_pool->mutex);
-
- /* Initialize the iterator for LRU scan search */
- new(&buf_pool->lru_scan_itr) LRUItr(&buf_pool->mutex);
-
- /* Initialize the iterator for single page scan search */
- new(&buf_pool->single_scan_itr) LRUItr(&buf_pool->mutex);
+ try_LRU_scan = TRUE;
/* Initialize the temporal memory array and slots */
- buf_pool->tmp_arr = (buf_tmp_array_t *)ut_malloc_nokey(sizeof(buf_tmp_array_t));
- memset(buf_pool->tmp_arr, 0, sizeof(buf_tmp_array_t));
+ tmp_arr = (buf_tmp_array_t *)ut_malloc_nokey(sizeof(buf_tmp_array_t));
+ memset(tmp_arr, 0, sizeof(buf_tmp_array_t));
ulint n_slots = (srv_n_read_io_threads + srv_n_write_io_threads) * (8 * OS_AIO_N_PENDING_IOS_PER_THREAD);
- buf_pool->tmp_arr->n_slots = n_slots;
- buf_pool->tmp_arr->slots = (buf_tmp_buffer_t*)ut_malloc_nokey(sizeof(buf_tmp_buffer_t) * n_slots);
- memset(buf_pool->tmp_arr->slots, 0, (sizeof(buf_tmp_buffer_t) * n_slots));
+ tmp_arr->n_slots = n_slots;
+ tmp_arr->slots = (buf_tmp_buffer_t*)ut_malloc_nokey(sizeof(buf_tmp_buffer_t) * n_slots);
+ memset(tmp_arr->slots, 0, (sizeof(buf_tmp_buffer_t) * n_slots));
/* FIXME: remove some of these variables */
- srv_buf_pool_curr_size = buf_pool->curr_pool_size;
+ srv_buf_pool_curr_size = curr_pool_size;
srv_buf_pool_old_size = srv_buf_pool_size;
srv_buf_pool_base_size = srv_buf_pool_size;
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&mutex);
DBUG_EXECUTE_IF("buf_pool_init_instance_force_oom", return true;);
buf_chunk_map_ref = buf_chunk_map_reg;
buf_LRU_old_ratio_update(100 * 3 / 8, false);
btr_search_sys_create(srv_buf_pool_curr_size / sizeof(void*) / 64);
+ m_initialised = true;
return false;
}
/** Free the buffer pool at shutdown.
This must not be invoked before freeing all mutexes. */
-void buf_pool_free()
+void buf_pool_t::close()
{
- buf_chunk_t* chunk;
- buf_chunk_t* chunks;
- buf_page_t* bpage;
- buf_page_t* prev_bpage = 0;
+ if (!m_initialised) return;
+ m_initialised = false;
- mutex_free(&buf_pool->mutex);
- mutex_free(&buf_pool->zip_mutex);
- mutex_free(&buf_pool->flush_list_mutex);
+ mutex_free(&mutex);
+ mutex_free(&zip_mutex);
+ mutex_free(&flush_list_mutex);
- if (buf_pool->flush_rbt) {
- rbt_free(buf_pool->flush_rbt);
- buf_pool->flush_rbt = NULL;
+ if (flush_rbt) {
+ rbt_free(flush_rbt);
+ flush_rbt = NULL;
}
- for (bpage = UT_LIST_GET_LAST(buf_pool->LRU);
+ for (buf_page_t *bpage = UT_LIST_GET_LAST(LRU), *prev_bpage = 0;
bpage != NULL;
bpage = prev_bpage) {
@@ -1861,13 +1840,11 @@ void buf_pool_free()
}
}
- ut_free(buf_pool->watch);
- buf_pool->watch = NULL;
-
- chunks = buf_pool->chunks;
- chunk = chunks + buf_pool->n_chunks;
+ ut_free(watch);
+ watch = NULL;
- while (--chunk >= chunks) {
+ for (buf_chunk_t* chunk = chunks + n_chunks;
+ --chunk >= chunks; ) {
buf_block_t* block = chunk->blocks;
for (ulint i = chunk->size; i--; block++) {
@@ -1877,23 +1854,23 @@ void buf_pool_free()
ut_d(rw_lock_free(&block->debug_latch));
}
- buf_pool->allocator.deallocate_large(
+ allocator.deallocate_large(
chunk->mem, &chunk->mem_pfx, true);
}
for (ulint i = BUF_FLUSH_LRU; i < BUF_FLUSH_N_TYPES; ++i) {
- os_event_destroy(buf_pool->no_flush[i]);
+ os_event_destroy(no_flush[i]);
}
- ut_free(buf_pool->chunks);
- ha_clear(buf_pool->page_hash);
- hash_table_free(buf_pool->page_hash);
- hash_table_free(buf_pool->zip_hash);
+ ut_free(chunks);
+ ha_clear(page_hash);
+ hash_table_free(page_hash);
+ hash_table_free(zip_hash);
/* Free all used temporary slots */
- if (buf_pool->tmp_arr) {
- for(ulint i = 0; i < buf_pool->tmp_arr->n_slots; i++) {
- buf_tmp_buffer_t* slot = &(buf_pool->tmp_arr->slots[i]);
+ if (tmp_arr) {
+ for(ulint i = 0; i < tmp_arr->n_slots; i++) {
+ buf_tmp_buffer_t* slot = &(tmp_arr->slots[i]);
if (slot && slot->crypt_buf) {
aligned_free(slot->crypt_buf);
slot->crypt_buf = NULL;
@@ -1905,17 +1882,13 @@ void buf_pool_free()
}
}
- ut_free(buf_pool->tmp_arr->slots);
- ut_free(buf_pool->tmp_arr);
- buf_pool->tmp_arr = NULL;
+ ut_free(tmp_arr->slots);
+ ut_free(tmp_arr);
+ tmp_arr = NULL;
}
- buf_pool->allocator.~ut_allocator();
UT_DELETE(buf_chunk_map_reg);
buf_chunk_map_reg = buf_chunk_map_ref = NULL;
-
- ut_free(buf_pool);
- buf_pool = NULL;
}
/** Reallocate a control block.
@@ -1926,13 +1899,13 @@ static bool buf_page_realloc(buf_block_t* block)
buf_block_t* new_block;
ut_ad(buf_pool_withdrawing);
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
new_block = buf_LRU_get_free_only();
if (new_block == NULL) {
- return(false); /* buf_pool->free list was not enough */
+ return(false); /* buf_pool.free list was not enough */
}
rw_lock_t* hash_lock = buf_page_hash_lock_get(block->page.id);
@@ -1953,16 +1926,16 @@ static bool buf_page_realloc(buf_block_t* block)
buf_LRU_adjust_hp(&block->page);
buf_page_t* prev_b = UT_LIST_GET_PREV(LRU, &block->page);
- UT_LIST_REMOVE(buf_pool->LRU, &block->page);
+ UT_LIST_REMOVE(buf_pool.LRU, &block->page);
if (prev_b != NULL) {
- UT_LIST_INSERT_AFTER(buf_pool->LRU, prev_b, &new_block->page);
+ UT_LIST_INSERT_AFTER(buf_pool.LRU, prev_b, &new_block->page);
} else {
- UT_LIST_ADD_FIRST(buf_pool->LRU, &new_block->page);
+ UT_LIST_ADD_FIRST(buf_pool.LRU, &new_block->page);
}
- if (buf_pool->LRU_old == &block->page) {
- buf_pool->LRU_old = &new_block->page;
+ if (buf_pool.LRU_old == &block->page) {
+ buf_pool.LRU_old = &new_block->page;
}
ut_ad(new_block->page.in_LRU_list);
@@ -1975,30 +1948,30 @@ static bool buf_page_realloc(buf_block_t* block)
page_zip_get_size(&new_block->page.zip));
buf_block_t* prev_block = UT_LIST_GET_PREV(unzip_LRU, block);
- UT_LIST_REMOVE(buf_pool->unzip_LRU, block);
+ UT_LIST_REMOVE(buf_pool.unzip_LRU, block);
ut_d(block->in_unzip_LRU_list = FALSE);
block->page.zip.data = NULL;
page_zip_set_size(&block->page.zip, 0);
if (prev_block != NULL) {
- UT_LIST_INSERT_AFTER(buf_pool->unzip_LRU, prev_block, new_block);
+ UT_LIST_INSERT_AFTER(buf_pool.unzip_LRU, prev_block, new_block);
} else {
- UT_LIST_ADD_FIRST(buf_pool->unzip_LRU, new_block);
+ UT_LIST_ADD_FIRST(buf_pool.unzip_LRU, new_block);
}
} else {
ut_ad(!block->in_unzip_LRU_list);
ut_d(new_block->in_unzip_LRU_list = FALSE);
}
- /* relocate buf_pool->page_hash */
+ /* relocate buf_pool.page_hash */
ut_ad(block->page.in_page_hash);
ut_ad(&block->page == buf_page_hash_get_low(block->page.id));
ut_d(block->page.in_page_hash = FALSE);
ulint fold = block->page.id.fold();
ut_ad(fold == new_block->page.id.fold());
- HASH_DELETE(buf_page_t, hash, buf_pool->page_hash, fold, (&block->page));
- HASH_INSERT(buf_page_t, hash, buf_pool->page_hash, fold, (&new_block->page));
+ HASH_DELETE(buf_page_t, hash, buf_pool.page_hash, fold, (&block->page));
+ HASH_INSERT(buf_page_t, hash, buf_pool.page_hash, fold, (&new_block->page));
ut_ad(new_block->page.in_page_hash);
@@ -2009,7 +1982,7 @@ static bool buf_page_realloc(buf_block_t* block)
buf_block_set_state(block, BUF_BLOCK_REMOVE_HASH);
block->page.id.reset();
- /* Relocate buf_pool->flush_list. */
+ /* Relocate buf_pool.flush_list. */
if (block->page.oldest_modification) {
buf_flush_relocate_on_flush_list(
&block->page, &new_block->page);
@@ -2085,13 +2058,13 @@ buf_resize_status(
@retval true if will be withdrawn */
bool buf_block_will_be_withdrawn(const buf_block_t* block)
{
- ut_ad(buf_pool->curr_size < buf_pool->old_size);
- ut_ad(!buf_pool_resizing || mutex_own(&buf_pool->mutex));
+ ut_ad(buf_pool.curr_size < buf_pool.old_size);
+ ut_ad(!buf_pool_resizing || mutex_own(&buf_pool.mutex));
const buf_chunk_t* chunk
- = buf_pool->chunks + buf_pool->n_chunks_new;
+ = buf_pool.chunks + buf_pool.n_chunks_new;
const buf_chunk_t* echunk
- = buf_pool->chunks + buf_pool->n_chunks;
+ = buf_pool.chunks + buf_pool.n_chunks;
while (chunk < echunk) {
if (block >= chunk->blocks
@@ -2110,13 +2083,13 @@ bool buf_block_will_be_withdrawn(const buf_block_t* block)
bool
buf_frame_will_be_withdrawn(const byte* ptr)
{
- ut_ad(buf_pool->curr_size < buf_pool->old_size);
- ut_ad(!buf_pool_resizing || mutex_own(&buf_pool->mutex));
+ ut_ad(buf_pool.curr_size < buf_pool.old_size);
+ ut_ad(!buf_pool_resizing || mutex_own(&buf_pool.mutex));
const buf_chunk_t* chunk
- = buf_pool->chunks + buf_pool->n_chunks_new;
+ = buf_pool.chunks + buf_pool.n_chunks_new;
const buf_chunk_t* echunk
- = buf_pool->chunks + buf_pool->n_chunks;
+ = buf_pool.chunks + buf_pool.n_chunks;
while (chunk < echunk) {
if (ptr >= chunk->blocks->frame
@@ -2131,7 +2104,7 @@ buf_frame_will_be_withdrawn(const byte* ptr)
}
/** Withdraw the buffer pool blocks from the end of the buffer pool
-until withdrawn by buf_pool->withdraw_target.
+until withdrawn by buf_pool.withdraw_target.
@retval true if retry is needed */
static bool buf_pool_withdraw_blocks()
{
@@ -2139,25 +2112,23 @@ static bool buf_pool_withdraw_blocks()
ulint loop_count = 0;
ib::info() << "start to withdraw the last "
- << buf_pool->withdraw_target << " blocks";
+ << buf_pool.withdraw_target << " blocks";
- /* Minimize buf_pool->zip_free[i] lists */
- mutex_enter(&buf_pool->mutex);
+ /* Minimize buf_pool.zip_free[i] lists */
+ mutex_enter(&buf_pool.mutex);
buf_buddy_condense_free();
- mutex_exit(&buf_pool->mutex);
-
- while (UT_LIST_GET_LEN(buf_pool->withdraw)
- < buf_pool->withdraw_target) {
+ mutex_exit(&buf_pool.mutex);
+ while (UT_LIST_GET_LEN(buf_pool.withdraw) < buf_pool.withdraw_target) {
/* try to withdraw from free_list */
ulint count1 = 0;
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
block = reinterpret_cast<buf_block_t*>(
- UT_LIST_GET_FIRST(buf_pool->free));
+ UT_LIST_GET_FIRST(buf_pool.free));
while (block != NULL
- && UT_LIST_GET_LEN(buf_pool->withdraw)
- < buf_pool->withdraw_target) {
+ && UT_LIST_GET_LEN(buf_pool.withdraw)
+ < buf_pool.withdraw_target) {
ut_ad(block->page.in_free_list);
ut_ad(!block->page.in_flush_list);
ut_ad(!block->page.in_LRU_list);
@@ -2171,10 +2142,10 @@ static bool buf_pool_withdraw_blocks()
if (buf_block_will_be_withdrawn(block)) {
/* This should be withdrawn */
UT_LIST_REMOVE(
- buf_pool->free,
+ buf_pool.free,
&block->page);
UT_LIST_ADD_LAST(
- buf_pool->withdraw,
+ buf_pool.withdraw,
&block->page);
ut_d(block->in_withdraw_list = TRUE);
count1++;
@@ -2182,22 +2153,22 @@ static bool buf_pool_withdraw_blocks()
block = next_block;
}
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
/* reserve free_list length */
- if (UT_LIST_GET_LEN(buf_pool->withdraw)
- < buf_pool->withdraw_target) {
+ if (UT_LIST_GET_LEN(buf_pool.withdraw)
+ < buf_pool.withdraw_target) {
ulint scan_depth;
flush_counters_t n;
/* cap scan_depth with current LRU size. */
- mutex_enter(&buf_pool->mutex);
- scan_depth = UT_LIST_GET_LEN(buf_pool->LRU);
- mutex_exit(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
+ scan_depth = UT_LIST_GET_LEN(buf_pool.LRU);
+ mutex_exit(&buf_pool.mutex);
scan_depth = ut_min(
- ut_max(buf_pool->withdraw_target
- - UT_LIST_GET_LEN(buf_pool->withdraw),
+ ut_max(buf_pool.withdraw_target
+ - UT_LIST_GET_LEN(buf_pool.withdraw),
static_cast<ulint>(srv_LRU_scan_depth)),
scan_depth);
@@ -2216,9 +2187,9 @@ static bool buf_pool_withdraw_blocks()
/* relocate blocks/buddies in withdrawn area */
ulint count2 = 0;
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
buf_page_t* bpage;
- bpage = UT_LIST_GET_FIRST(buf_pool->LRU);
+ bpage = UT_LIST_GET_FIRST(buf_pool.LRU);
while (bpage != NULL) {
BPageMutex* block_mutex;
buf_page_t* next_bpage;
@@ -2273,18 +2244,18 @@ static bool buf_pool_withdraw_blocks()
bpage = next_bpage;
}
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
buf_resize_status(
"withdrawing blocks. (" ULINTPF "/" ULINTPF ")",
- UT_LIST_GET_LEN(buf_pool->withdraw),
- buf_pool->withdraw_target);
+ UT_LIST_GET_LEN(buf_pool.withdraw),
+ buf_pool.withdraw_target);
ib::info() << "withdrew "
<< count1 << " blocks from free list."
<< " Tried to relocate " << count2 << " pages ("
- << UT_LIST_GET_LEN(buf_pool->withdraw) << "/"
- << buf_pool->withdraw_target << ")";
+ << UT_LIST_GET_LEN(buf_pool.withdraw) << "/"
+ << buf_pool.withdraw_target << ")";
if (++loop_count >= 10) {
/* give up for now.
@@ -2299,9 +2270,9 @@ static bool buf_pool_withdraw_blocks()
/* confirm withdrawn enough */
const buf_chunk_t* chunk
- = buf_pool->chunks + buf_pool->n_chunks_new;
+ = buf_pool.chunks + buf_pool.n_chunks_new;
const buf_chunk_t* echunk
- = buf_pool->chunks + buf_pool->n_chunks;
+ = buf_pool.chunks + buf_pool.n_chunks;
while (chunk < echunk) {
block = chunk->blocks;
@@ -2317,7 +2288,7 @@ static bool buf_pool_withdraw_blocks()
}
ib::info() << "withdrawn target: "
- << UT_LIST_GET_LEN(buf_pool->withdraw) << " blocks";
+ << UT_LIST_GET_LEN(buf_pool.withdraw) << " blocks";
/* retry is not needed */
++buf_withdraw_clock;
@@ -2330,18 +2301,18 @@ static void buf_pool_resize_hash()
{
hash_table_t* new_hash_table;
- ut_ad(buf_pool->page_hash_old == NULL);
+ ut_ad(buf_pool.page_hash_old == NULL);
/* recreate page_hash */
new_hash_table = ib_recreate(
- buf_pool->page_hash, 2 * buf_pool->curr_size);
+ buf_pool.page_hash, 2 * buf_pool.curr_size);
- for (ulint i = 0; i < hash_get_n_cells(buf_pool->page_hash); i++) {
+ for (ulint i = 0; i < hash_get_n_cells(buf_pool.page_hash); i++) {
buf_page_t* bpage;
bpage = static_cast<buf_page_t*>(
HASH_GET_FIRST(
- buf_pool->page_hash, i));
+ buf_pool.page_hash, i));
while (bpage) {
buf_page_t* prev_bpage = bpage;
@@ -2354,7 +2325,7 @@ static void buf_pool_resize_hash()
fold = prev_bpage->id.fold();
HASH_DELETE(buf_page_t, hash,
- buf_pool->page_hash, fold,
+ buf_pool.page_hash, fold,
prev_bpage);
HASH_INSERT(buf_page_t, hash,
@@ -2363,17 +2334,17 @@ static void buf_pool_resize_hash()
}
}
- buf_pool->page_hash_old = buf_pool->page_hash;
- buf_pool->page_hash = new_hash_table;
+ buf_pool.page_hash_old = buf_pool.page_hash;
+ buf_pool.page_hash = new_hash_table;
/* recreate zip_hash */
- new_hash_table = hash_create(2 * buf_pool->curr_size);
+ new_hash_table = hash_create(2 * buf_pool.curr_size);
- for (ulint i = 0; i < hash_get_n_cells(buf_pool->zip_hash); i++) {
+ for (ulint i = 0; i < hash_get_n_cells(buf_pool.zip_hash); i++) {
buf_page_t* bpage;
bpage = static_cast<buf_page_t*>(
- HASH_GET_FIRST(buf_pool->zip_hash, i));
+ HASH_GET_FIRST(buf_pool.zip_hash, i));
while (bpage) {
buf_page_t* prev_bpage = bpage;
@@ -2388,7 +2359,7 @@ static void buf_pool_resize_hash()
prev_bpage));
HASH_DELETE(buf_page_t, hash,
- buf_pool->zip_hash, fold,
+ buf_pool.zip_hash, fold,
prev_bpage);
HASH_INSERT(buf_page_t, hash,
@@ -2397,8 +2368,8 @@ static void buf_pool_resize_hash()
}
}
- hash_table_free(buf_pool->zip_hash);
- buf_pool->zip_hash = new_hash_table;
+ hash_table_free(buf_pool.zip_hash);
+ buf_pool.zip_hash = new_hash_table;
}
#ifndef DBUG_OFF
@@ -2435,19 +2406,19 @@ static void buf_pool_resize()
srv_buf_pool_old_size, srv_buf_pool_size,
srv_buf_pool_chunk_unit);
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
- ut_ad(buf_pool->curr_size == buf_pool->old_size);
- ut_ad(buf_pool->n_chunks_new == buf_pool->n_chunks);
- ut_ad(UT_LIST_GET_LEN(buf_pool->withdraw) == 0);
- ut_ad(buf_pool->flush_rbt == NULL);
+ ut_ad(buf_pool.curr_size == buf_pool.old_size);
+ ut_ad(buf_pool.n_chunks_new == buf_pool.n_chunks);
+ ut_ad(UT_LIST_GET_LEN(buf_pool.withdraw) == 0);
+ ut_ad(buf_pool.flush_rbt == NULL);
- buf_pool->curr_size = new_instance_size;
+ buf_pool.curr_size = new_instance_size;
- buf_pool->n_chunks_new = (new_instance_size << srv_page_size_shift)
+ buf_pool.n_chunks_new = (new_instance_size << srv_page_size_shift)
/ srv_buf_pool_chunk_unit;
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
#ifdef BTR_CUR_HASH_ADAPT
/* disable AHI if needed */
bool btr_search_disabled = false;
@@ -2469,22 +2440,22 @@ static void buf_pool_resize()
}
#endif /* BTR_CUR_HASH_ADAPT */
- if (buf_pool->curr_size < buf_pool->old_size) {
+ if (buf_pool.curr_size < buf_pool.old_size) {
/* set withdraw target */
ulint withdraw_target = 0;
const buf_chunk_t* chunk
- = buf_pool->chunks + buf_pool->n_chunks_new;
+ = buf_pool.chunks + buf_pool.n_chunks_new;
const buf_chunk_t* echunk
- = buf_pool->chunks + buf_pool->n_chunks;
+ = buf_pool.chunks + buf_pool.n_chunks;
while (chunk < echunk) {
withdraw_target += chunk->size;
++chunk;
}
- ut_ad(buf_pool->withdraw_target == 0);
- buf_pool->withdraw_target = withdraw_target;
+ ut_ad(buf_pool.withdraw_target == 0);
+ buf_pool.withdraw_target = withdraw_target;
buf_pool_withdrawing = true;
}
@@ -2497,7 +2468,7 @@ static void buf_pool_resize()
withdraw_retry:
/* wait for the number of blocks fit to the new size (if needed)*/
bool should_retry_withdraw
- = buf_pool->curr_size < buf_pool->old_size
+ = buf_pool.curr_size < buf_pool.old_size
&& buf_pool_withdraw_blocks();
if (srv_shutdown_state != SRV_SHUTDOWN_NONE) {
@@ -2587,8 +2558,8 @@ withdraw_retry:
/* Indicate critical path */
buf_pool_resizing = true;
- mutex_enter(&buf_pool->mutex);
- hash_lock_x_all(buf_pool->page_hash);
+ mutex_enter(&buf_pool.mutex);
+ hash_lock_x_all(buf_pool.page_hash);
buf_chunk_map_reg = UT_NEW_NOKEY(buf_pool_chunk_map_t());
/* add/delete chunks */
@@ -2597,12 +2568,12 @@ withdraw_retry:
buf_resize_status("buffer pool resizing with chunks "
ULINTPF " to " ULINTPF ".",
- buf_pool->n_chunks, buf_pool->n_chunks_new);
+ buf_pool.n_chunks, buf_pool.n_chunks_new);
- if (buf_pool->n_chunks_new < buf_pool->n_chunks) {
+ if (buf_pool.n_chunks_new < buf_pool.n_chunks) {
/* delete chunks */
- chunk = buf_pool->chunks + buf_pool->n_chunks_new;
- echunk = buf_pool->chunks + buf_pool->n_chunks;
+ chunk = buf_pool.chunks + buf_pool.n_chunks_new;
+ echunk = buf_pool.chunks + buf_pool.n_chunks;
ulint sum_freed = 0;
@@ -2617,27 +2588,27 @@ withdraw_retry:
&block->debug_latch));
}
- buf_pool->allocator.deallocate_large(
+ buf_pool.allocator.deallocate_large(
chunk->mem, &chunk->mem_pfx, true);
sum_freed += chunk->size;
++chunk;
}
/* discard withdraw list */
- UT_LIST_INIT(buf_pool->withdraw, &buf_page_t::list);
- buf_pool->withdraw_target = 0;
+ UT_LIST_INIT(buf_pool.withdraw, &buf_page_t::list);
+ buf_pool.withdraw_target = 0;
- ib::info() << buf_pool->n_chunks - buf_pool->n_chunks_new
+ ib::info() << buf_pool.n_chunks - buf_pool.n_chunks_new
<< " chunks (" << sum_freed
<< " blocks) were freed.";
- buf_pool->n_chunks = buf_pool->n_chunks_new;
+ buf_pool.n_chunks = buf_pool.n_chunks_new;
}
{
- /* reallocate buf_pool->chunks */
+ /* reallocate buf_pool.chunks */
const ulint new_chunks_size
- = buf_pool->n_chunks_new * sizeof *chunk;
+ = buf_pool.n_chunks_new * sizeof *chunk;
buf_chunk_t* new_chunks = reinterpret_cast<buf_chunk_t*>(
ut_zalloc_nokey_nofatal(new_chunks_size));
@@ -2648,33 +2619,33 @@ withdraw_retry:
if (!new_chunks) {
ib::error() << "failed to allocate"
" the chunk array.";
- buf_pool->n_chunks_new = buf_pool->n_chunks;
+ buf_pool.n_chunks_new = buf_pool.n_chunks;
warning = true;
- buf_pool->chunks_old = NULL;
+ buf_pool.chunks_old = NULL;
goto calc_buf_pool_size;
}
- ulint n_chunks_copy = ut_min(buf_pool->n_chunks_new,
- buf_pool->n_chunks);
+ ulint n_chunks_copy = ut_min(buf_pool.n_chunks_new,
+ buf_pool.n_chunks);
- memcpy(new_chunks, buf_pool->chunks,
+ memcpy(new_chunks, buf_pool.chunks,
n_chunks_copy * sizeof *chunk);
for (ulint j = 0; j < n_chunks_copy; j++) {
buf_pool_register_chunk(&new_chunks[j]);
}
- buf_pool->chunks_old = buf_pool->chunks;
- buf_pool->chunks = new_chunks;
+ buf_pool.chunks_old = buf_pool.chunks;
+ buf_pool.chunks = new_chunks;
}
- if (buf_pool->n_chunks_new > buf_pool->n_chunks) {
+ if (buf_pool.n_chunks_new > buf_pool.n_chunks) {
/* add chunks */
- chunk = buf_pool->chunks + buf_pool->n_chunks;
- echunk = buf_pool->chunks + buf_pool->n_chunks_new;
+ chunk = buf_pool.chunks + buf_pool.n_chunks;
+ echunk = buf_pool.chunks + buf_pool.n_chunks_new;
ulint sum_added = 0;
- ulint n_chunks = buf_pool->n_chunks;
+ ulint n_chunks = buf_pool.n_chunks;
while (chunk < echunk) {
ulong unit = srv_buf_pool_chunk_unit;
@@ -2684,7 +2655,7 @@ withdraw_retry:
" memory for buffer pool chunk";
warning = true;
- buf_pool->n_chunks_new = n_chunks;
+ buf_pool.n_chunks_new = n_chunks;
break;
}
@@ -2693,40 +2664,40 @@ withdraw_retry:
++chunk;
}
- ib::info() << buf_pool->n_chunks_new - buf_pool->n_chunks
+ ib::info() << buf_pool.n_chunks_new - buf_pool.n_chunks
<< " chunks (" << sum_added
<< " blocks) were added.";
- buf_pool->n_chunks = n_chunks;
+ buf_pool.n_chunks = n_chunks;
}
calc_buf_pool_size:
- /* recalc buf_pool->curr_size */
+ /* recalc buf_pool.curr_size */
ulint new_size = 0;
- chunk = buf_pool->chunks;
+ chunk = buf_pool.chunks;
do {
new_size += chunk->size;
- } while (++chunk < buf_pool->chunks + buf_pool->n_chunks);
+ } while (++chunk < buf_pool.chunks + buf_pool.n_chunks);
- buf_pool->curr_size = new_size;
- buf_pool->n_chunks_new = buf_pool->n_chunks;
+ buf_pool.curr_size = new_size;
+ buf_pool.n_chunks_new = buf_pool.n_chunks;
- if (buf_pool->chunks_old) {
- ut_free(buf_pool->chunks_old);
- buf_pool->chunks_old = NULL;
+ if (buf_pool.chunks_old) {
+ ut_free(buf_pool.chunks_old);
+ buf_pool.chunks_old = NULL;
}
buf_pool_chunk_map_t* chunk_map_old = buf_chunk_map_ref;
buf_chunk_map_ref = buf_chunk_map_reg;
/* set size */
- ut_ad(UT_LIST_GET_LEN(buf_pool->withdraw) == 0);
- buf_pool->read_ahead_area = ut_min(
+ ut_ad(UT_LIST_GET_LEN(buf_pool.withdraw) == 0);
+ buf_pool.read_ahead_area = ut_min(
BUF_READ_AHEAD_PAGES,
- ut_2_power_up(buf_pool->curr_size / BUF_READ_AHEAD_PORTION));
- buf_pool->curr_pool_size = buf_pool->curr_size << srv_page_size_shift;
- srv_buf_pool_curr_size = buf_pool->curr_pool_size;/* FIXME: remove*/
- buf_pool->old_size = buf_pool->curr_size;
+ ut_2_power_up(buf_pool.curr_size / BUF_READ_AHEAD_PORTION));
+ buf_pool.curr_pool_size = buf_pool.curr_size << srv_page_size_shift;
+ srv_buf_pool_curr_size = buf_pool.curr_pool_size;/* FIXME: remove*/
+ buf_pool.old_size = buf_pool.curr_size;
innodb_set_buf_pool_size(buf_pool_size_align(srv_buf_pool_curr_size));
const bool new_size_too_diff
@@ -2741,12 +2712,12 @@ calc_buf_pool_size:
ib::info() << "hash tables were resized";
}
- hash_unlock_x_all(buf_pool->page_hash);
- mutex_exit(&buf_pool->mutex);
+ hash_unlock_x_all(buf_pool.page_hash);
+ mutex_exit(&buf_pool.mutex);
- if (buf_pool->page_hash_old != NULL) {
- hash_table_free(buf_pool->page_hash_old);
- buf_pool->page_hash_old = NULL;
+ if (buf_pool.page_hash_old != NULL) {
+ hash_table_free(buf_pool.page_hash_old);
+ buf_pool.page_hash_old = NULL;
}
UT_DELETE(chunk_map_old);
@@ -2833,9 +2804,9 @@ DECLARE_THREAD(buf_resize_thread)(void*)
break;
}
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
if (srv_buf_pool_old_size == srv_buf_pool_size) {
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
std::ostringstream sout;
sout << "Size did not change (old size = new size = "
<< srv_buf_pool_size << ". Nothing to do.";
@@ -2844,7 +2815,7 @@ DECLARE_THREAD(buf_resize_thread)(void*)
/* nothing to do */
continue;
}
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
buf_pool_resize();
}
@@ -2866,8 +2837,8 @@ buf_pool_clear_hash_index()
ut_ad(!buf_pool_resizing);
ut_ad(!btr_search_enabled);
- buf_chunk_t* chunks = buf_pool->chunks;
- buf_chunk_t* chunk = chunks + buf_pool->n_chunks;
+ buf_chunk_t* chunks = buf_pool.chunks;
+ buf_chunk_t* chunk = chunks + buf_pool.n_chunks;
while (--chunk >= chunks) {
buf_block_t* block = chunk->blocks;
@@ -2912,10 +2883,12 @@ buf_pool_clear_hash_index()
}
#endif /* BTR_CUR_HASH_ADAPT */
-/********************************************************************//**
-Relocate a buffer control block. Relocates the block on the LRU list
-and in buf_pool->page_hash. Does not relocate bpage->list.
-The caller must take care of relocating bpage->list. */
+/** Relocate a buffer control block. Relocates the block on the LRU list
+and in buf_pool.page_hash. Does not relocate bpage->list.
+The caller must take care of relocating bpage->list.
+@param[in,out] bpage control block being relocated, buf_page_get_state()
+ must be BUF_BLOCK_ZIP_DIRTY or BUF_BLOCK_ZIP_PAGE
+@param[in,out] dpage destination control block */
static
void
buf_relocate(
@@ -2927,7 +2900,7 @@ buf_relocate(
{
buf_page_t* b;
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(buf_page_hash_lock_held_x(bpage));
ut_ad(mutex_own(buf_page_get_mutex(bpage)));
ut_a(buf_page_get_io_fix(bpage) == BUF_IO_NONE);
@@ -2961,26 +2934,26 @@ buf_relocate(
ut_d(bpage->in_LRU_list = FALSE);
ut_d(bpage->in_page_hash = FALSE);
- /* relocate buf_pool->LRU */
+ /* relocate buf_pool.LRU */
b = UT_LIST_GET_PREV(LRU, bpage);
- UT_LIST_REMOVE(buf_pool->LRU, bpage);
+ UT_LIST_REMOVE(buf_pool.LRU, bpage);
if (b != NULL) {
- UT_LIST_INSERT_AFTER(buf_pool->LRU, b, dpage);
+ UT_LIST_INSERT_AFTER(buf_pool.LRU, b, dpage);
} else {
- UT_LIST_ADD_FIRST(buf_pool->LRU, dpage);
+ UT_LIST_ADD_FIRST(buf_pool.LRU, dpage);
}
- if (UNIV_UNLIKELY(buf_pool->LRU_old == bpage)) {
- buf_pool->LRU_old = dpage;
+ if (UNIV_UNLIKELY(buf_pool.LRU_old == bpage)) {
+ buf_pool.LRU_old = dpage;
#ifdef UNIV_LRU_DEBUG
- /* buf_pool->LRU_old must be the first item in the LRU list
+ /* buf_pool.LRU_old must be the first item in the LRU list
whose "old" flag is set. */
- ut_a(buf_pool->LRU_old->old);
- ut_a(!UT_LIST_GET_PREV(LRU, buf_pool->LRU_old)
- || !UT_LIST_GET_PREV(LRU, buf_pool->LRU_old)->old);
- ut_a(!UT_LIST_GET_NEXT(LRU, buf_pool->LRU_old)
- || UT_LIST_GET_NEXT(LRU, buf_pool->LRU_old)->old);
+ ut_a(buf_pool.LRU_old->old);
+ ut_a(!UT_LIST_GET_PREV(LRU, buf_pool.LRU_old)
+ || !UT_LIST_GET_PREV(LRU, buf_pool.LRU_old)->old);
+ ut_a(!UT_LIST_GET_NEXT(LRU, buf_pool.LRU_old)
+ || UT_LIST_GET_NEXT(LRU, buf_pool.LRU_old)->old);
} else {
/* Check that the "old" flag is consistent in
the block and its neighbours. */
@@ -2990,11 +2963,11 @@ buf_relocate(
ut_d(CheckInLRUList::validate());
- /* relocate buf_pool->page_hash */
+ /* relocate buf_pool.page_hash */
ulint fold = bpage->id.fold();
ut_ad(fold == dpage->id.fold());
- HASH_DELETE(buf_page_t, hash, buf_pool->page_hash, fold, bpage);
- HASH_INSERT(buf_page_t, hash, buf_pool->page_hash, fold, dpage);
+ HASH_DELETE(buf_page_t, hash, buf_pool.page_hash, fold, bpage);
+ HASH_INSERT(buf_page_t, hash, buf_pool.page_hash, fold, dpage);
}
/** Hazard Pointer implementation. */
@@ -3053,8 +3026,8 @@ bool buf_pool_watch_is_sentinel(const buf_page_t* bpage)
ut_ad(buf_page_hash_lock_held_s_or_x(bpage));
ut_ad(buf_page_in_file(bpage));
- if (bpage < &buf_pool->watch[0]
- || bpage >= &buf_pool->watch[BUF_POOL_WATCH_SIZE]) {
+ if (bpage < &buf_pool.watch[0]
+ || bpage >= &buf_pool.watch[BUF_POOL_WATCH_SIZE]) {
ut_ad(buf_page_get_state(bpage) != BUF_BLOCK_ZIP_PAGE
|| bpage->zip.data != NULL);
@@ -3107,27 +3080,27 @@ page_found:
hash_locks. buf_pool mutex is needed because any changes to
the page_hash must be covered by it and hash_locks are needed
because we don't want to read any stale information in
- buf_pool->watch[]. However, it is not in the critical code path
+ buf_pool.watch[]. However, it is not in the critical code path
as this function will be called only by the purge thread. */
/* To obey latching order first release the hash_lock. */
rw_lock_x_unlock(*hash_lock);
- mutex_enter(&buf_pool->mutex);
- hash_lock_x_all(buf_pool->page_hash);
+ mutex_enter(&buf_pool.mutex);
+ hash_lock_x_all(buf_pool.page_hash);
/* We have to recheck that the page
was not loaded or a watch set by some other
purge thread. This is because of the small
time window between when we release the
- hash_lock to acquire buf_pool->mutex above. */
+ hash_lock to acquire buf_pool.mutex above. */
*hash_lock = buf_page_hash_lock_get(page_id);
bpage = buf_page_hash_get_low(page_id);
if (UNIV_LIKELY_NULL(bpage)) {
- mutex_exit(&buf_pool->mutex);
- hash_unlock_x_all_but(buf_pool->page_hash, *hash_lock);
+ mutex_exit(&buf_pool.mutex);
+ hash_unlock_x_all_but(buf_pool.page_hash, *hash_lock);
goto page_found;
}
@@ -3135,7 +3108,7 @@ page_found:
BUF_POOL_WATCH_SIZE. So there is no way for purge thread
instance to hold a watch when setting another watch. */
for (i = 0; i < BUF_POOL_WATCH_SIZE; i++) {
- bpage = &buf_pool->watch[i];
+ bpage = &buf_pool.watch[i];
ut_ad(bpage->access_time == 0);
ut_ad(bpage->newest_modification == 0);
@@ -3148,24 +3121,24 @@ page_found:
ut_ad(!bpage->in_page_hash);
ut_ad(bpage->buf_fix_count == 0);
- /* bpage is pointing to buf_pool->watch[],
- which is protected by buf_pool->mutex.
+ /* bpage is pointing to buf_pool.watch[],
+ which is protected by buf_pool.mutex.
Normally, buf_page_t objects are protected by
- buf_block_t::mutex or buf_pool->zip_mutex or both. */
+ buf_block_t::mutex or buf_pool.zip_mutex or both. */
bpage->state = BUF_BLOCK_ZIP_PAGE;
bpage->id.copy_from(page_id);
bpage->buf_fix_count = 1;
ut_d(bpage->in_page_hash = TRUE);
- HASH_INSERT(buf_page_t, hash, buf_pool->page_hash,
+ HASH_INSERT(buf_page_t, hash, buf_pool.page_hash,
page_id.fold(), bpage);
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
/* Once the sentinel is in the page_hash we can
safely release all locks except just the
relevant hash_lock */
- hash_unlock_x_all_but(buf_pool->page_hash,
+ hash_unlock_x_all_but(buf_pool.page_hash,
*hash_lock);
return(NULL);
@@ -3203,9 +3176,9 @@ buf_pool_watch_remove(buf_page_t* watch)
ut_ad(rw_lock_own(hash_lock, RW_LOCK_X));
#endif /* UNIV_DEBUG */
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
- HASH_DELETE(buf_page_t, hash, buf_pool->page_hash, watch->id.fold(),
+ HASH_DELETE(buf_page_t, hash, buf_pool.page_hash, watch->id.fold(),
watch);
ut_d(watch->in_page_hash = FALSE);
watch->buf_fix_count = 0;
@@ -3220,12 +3193,12 @@ buf_pool_watch_unset(
const page_id_t& page_id)
{
buf_page_t* bpage;
- /* We only need to have buf_pool->mutex in case where we end
+ /* We only need to have buf_pool.mutex in case where we end
up calling buf_pool_watch_remove but to obey latching order
we acquire it here before acquiring hash_lock. This should
not cause too much grief as this function is only ever
called from the purge thread. */
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
rw_lock_t* hash_lock = buf_page_hash_lock_get(page_id);
rw_lock_x_lock(hash_lock);
@@ -3239,7 +3212,7 @@ buf_pool_watch_unset(
buf_pool_watch_remove(bpage);
}
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
rw_lock_x_unlock(hash_lock);
}
@@ -3278,13 +3251,13 @@ the buffer pool.
@param[in,out] bpage buffer block of a file page */
void buf_page_make_young(buf_page_t* bpage)
{
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
ut_a(buf_page_in_file(bpage));
buf_LRU_make_block_young(bpage);
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
}
/********************************************************************//**
@@ -3294,7 +3267,7 @@ slipping out of the buffer pool. The page must be fixed to the buffer pool.
@param[in,out] bpage buffer block of a file page */
static void buf_page_make_young_if_needed(buf_page_t* bpage)
{
- ut_ad(!mutex_own(&buf_pool->mutex));
+ ut_ad(!mutex_own(&buf_pool.mutex));
ut_ad(bpage->buf_fix_count > 0);
ut_a(buf_page_in_file(bpage));
@@ -3369,13 +3342,13 @@ buf_block_try_discard_uncompressed(
{
buf_page_t* bpage;
- /* Since we need to acquire buf_pool mutex to discard
+ /* Since we need to acquire buf_pool.mutex to discard
the uncompressed frame and because page_hash mutex resides
- below buf_pool mutex in sync ordering therefore we must
+ below buf_pool.mutex in sync ordering, we must
first release the page_hash mutex. This means that the
block in question can move out of page_hash. Therefore
we need to check again if the block is still in page_hash. */
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
bpage = buf_page_hash_get(page_id);
@@ -3383,7 +3356,7 @@ buf_block_try_discard_uncompressed(
buf_LRU_free_page(bpage, false);
}
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
}
/** Get read access to a compressed page (usually of type
@@ -3407,7 +3380,7 @@ buf_page_get_zip(
ibool discard_attempted = FALSE;
ibool must_read;
- buf_pool->stat.n_page_gets++;
+ buf_pool.stat.n_page_gets++;
for (;;) {
lookup:
@@ -3459,7 +3432,7 @@ err_exit:
case BUF_BLOCK_ZIP_PAGE:
case BUF_BLOCK_ZIP_DIRTY:
buf_block_fix(bpage);
- block_mutex = &buf_pool->zip_mutex;
+ block_mutex = &buf_pool.zip_mutex;
mutex_enter(block_mutex);
goto got_block;
case BUF_BLOCK_FILE_PAGE:
@@ -3707,11 +3680,11 @@ the buf_block_t itself or a member of it.
@return whether the ptr belongs to a buf_block_t struct */
bool buf_pointer_is_block_field(const void* ptr)
{
- const buf_chunk_t* chunk = buf_pool->chunks;
+ const buf_chunk_t* chunk = buf_pool.chunks;
const buf_chunk_t* const echunk = chunk + ut_min(
- buf_pool->n_chunks, buf_pool->n_chunks_new);
+ buf_pool.n_chunks, buf_pool.n_chunks_new);
- /* TODO: protect buf_pool->chunks with a mutex (the older pointer will
+ /* TODO: protect buf_pool.chunks with a mutex (the older pointer will
currently remain while during buf_pool_resize()) */
while (chunk < echunk) {
if (ptr >= (void*) chunk->blocks
@@ -3728,7 +3701,7 @@ bool buf_pointer_is_block_field(const void* ptr)
/** Determine if a buffer block was created by buf_chunk_init().
@param[in] block block descriptor (not dereferenced)
-@return whether block has been added to buf_pool->free by buf_chunk_init() */
+@return whether block has been added to buf_pool.free by buf_chunk_init() */
static bool buf_block_is_uncompressed(const buf_block_t* block)
{
/* The pointer should be aligned. */
@@ -3870,7 +3843,7 @@ buf_page_get_gen(
ut_ad(!mtr || !ibuf_inside(mtr)
|| ibuf_page_low(page_id, page_size, FALSE, file, line, NULL));
- buf_pool->stat.n_page_gets++;
+ buf_pool.stat.n_page_gets++;
hash_lock = buf_page_hash_lock_get(page_id);
loop:
block = guess;
@@ -4100,14 +4073,14 @@ got_block:
if (UNIV_UNLIKELY(mode == BUF_EVICT_IF_IN_POOL)) {
evict_from_pool:
ut_ad(!fix_block->page.oldest_modification);
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
buf_block_unfix(fix_block);
if (!buf_LRU_free_page(&fix_block->page, true)) {
ut_ad(0);
}
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
return(NULL);
}
break;
@@ -4152,7 +4125,7 @@ evict_from_pool:
block = buf_LRU_get_free_block();
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
hash_lock = buf_page_hash_lock_get(page_id);
@@ -4164,22 +4137,22 @@ evict_from_pool:
buf_block_unfix(fix_block);
buf_page_mutex_enter(block);
- mutex_enter(&buf_pool->zip_mutex);
+ mutex_enter(&buf_pool.zip_mutex);
fix_block = block;
if (bpage->buf_fix_count > 0
|| buf_page_get_io_fix(bpage) != BUF_IO_NONE) {
- mutex_exit(&buf_pool->zip_mutex);
+ mutex_exit(&buf_pool.zip_mutex);
/* The block was buffer-fixed or I/O-fixed while
- buf_pool->mutex was not held by this thread.
+ buf_pool.mutex was not held by this thread.
Free the block that was allocated and retry.
This should be extremely unlikely, for example,
if buf_page_get_zip() was invoked. */
buf_LRU_block_free_non_file_page(block);
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
rw_lock_x_unlock(hash_lock);
buf_page_mutex_exit(block);
@@ -4208,11 +4181,11 @@ evict_from_pool:
if (buf_page_get_state(&block->page) == BUF_BLOCK_ZIP_PAGE) {
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
- UT_LIST_REMOVE(buf_pool->zip_clean, &block->page);
+ UT_LIST_REMOVE(buf_pool.zip_clean, &block->page);
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
ut_ad(!block->page.in_flush_list);
} else {
- /* Relocate buf_pool->flush_list. */
+ /* Relocate buf_pool.flush_list. */
buf_flush_relocate_on_flush_list(bpage, &block->page);
}
@@ -4230,9 +4203,9 @@ evict_from_pool:
UNIV_MEM_INVALID(bpage, sizeof *bpage);
rw_lock_x_unlock(hash_lock);
- buf_pool->n_pend_unzip++;
- mutex_exit(&buf_pool->zip_mutex);
- mutex_exit(&buf_pool->mutex);
+ buf_pool.n_pend_unzip++;
+ mutex_exit(&buf_pool.zip_mutex);
+ mutex_exit(&buf_pool.mutex);
access_time = buf_page_is_accessed(&block->page);
@@ -4241,20 +4214,20 @@ evict_from_pool:
buf_page_free_descriptor(bpage);
/* Decompress the page while not holding
- buf_pool->mutex or block->mutex. */
+ buf_pool.mutex or block->mutex. */
{
bool success = buf_zip_decompress(block, TRUE);
if (!success) {
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
buf_page_mutex_enter(fix_block);
buf_block_set_io_fix(fix_block, BUF_IO_NONE);
buf_page_mutex_exit(fix_block);
- --buf_pool->n_pend_unzip;
+ --buf_pool.n_pend_unzip;
buf_block_unfix(fix_block);
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
rw_lock_x_unlock(&fix_block->lock);
*err = DB_PAGE_CORRUPTED;
@@ -4273,7 +4246,7 @@ evict_from_pool:
}
}
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
buf_page_mutex_enter(fix_block);
@@ -4281,9 +4254,9 @@ evict_from_pool:
buf_page_mutex_exit(fix_block);
- --buf_pool->n_pend_unzip;
+ --buf_pool.n_pend_unzip;
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
rw_lock_x_unlock(&block->lock);
@@ -4314,18 +4287,18 @@ evict_from_pool:
/* Try to evict the block from the buffer pool, to use the
insert buffer (change buffer) as much as possible. */
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
buf_block_unfix(fix_block);
- /* Now we are only holding the buf_pool->mutex,
+ /* Now we are only holding the buf_pool.mutex,
not block->mutex or hash_lock. Blocks cannot be
relocated or enter or exit the buf_pool while we
- are holding the buf_pool->mutex. */
+ are holding the buf_pool.mutex. */
if (buf_LRU_free_page(&fix_block->page, true)) {
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
/* page_hash can be changed. */
hash_lock = buf_page_hash_lock_get(page_id);
@@ -4377,7 +4350,7 @@ evict_from_pool:
/* Failed to evict the page; change it directly */
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
}
#endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */
@@ -4590,7 +4563,7 @@ buf_page_optimistic_get(
ut_a(ibuf_count_get(block->page.id) == 0);
#endif /* UNIV_IBUF_COUNT_DEBUG */
- buf_pool->stat.n_page_gets++;
+ buf_pool.stat.n_page_gets++;
return(TRUE);
}
@@ -4695,7 +4668,7 @@ buf_page_get_known_nowait(
#ifdef UNIV_IBUF_COUNT_DEBUG
ut_a((mode == BUF_KEEP_OLD) || ibuf_count_get(block->page.id) == 0);
#endif
- buf_pool->stat.n_page_gets++;
+ buf_pool.stat.n_page_gets++;
return(TRUE);
}
@@ -4779,7 +4752,7 @@ buf_page_try_get_func(
buf_block_dbg_add_level(block, SYNC_NO_ORDER_CHECK);
- buf_pool->stat.n_page_gets++;
+ buf_pool.stat.n_page_gets++;
#ifdef UNIV_IBUF_COUNT_DEBUG
ut_a(ibuf_count_get(block->page.id) == 0);
@@ -4827,7 +4800,7 @@ buf_page_init(
{
buf_page_t* hash_page;
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(buf_page_mutex_own(block));
ut_a(buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE);
ut_ad(rw_lock_own(buf_page_hash_lock_get(page_id), RW_LOCK_X));
@@ -4873,7 +4846,7 @@ buf_page_init(
<< hash_page << ", " << block;
ut_d(buf_page_mutex_exit(block));
- ut_d(mutex_exit(&buf_pool->mutex));
+ ut_d(mutex_exit(&buf_pool.mutex));
ut_d(buf_print());
ut_d(buf_LRU_print());
ut_d(buf_validate());
@@ -4888,7 +4861,7 @@ buf_page_init(
block->page.id.copy_from(page_id);
block->page.size.copy_from(page_size);
- HASH_INSERT(buf_page_t, hash, buf_pool->page_hash,
+ HASH_INSERT(buf_page_t, hash, buf_pool.page_hash,
page_id.fold(), &block->page);
if (page_size.is_compressed()) {
@@ -4954,7 +4927,7 @@ buf_page_init_for_read(
ut_ad(block);
}
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
hash_lock = buf_page_hash_lock_get(page_id);
rw_lock_x_lock(hash_lock);
@@ -5004,14 +4977,14 @@ buf_page_init_for_read(
rw_lock_x_lock_gen(&block->lock, BUF_IO_READ);
if (page_size.is_compressed()) {
- /* buf_pool->mutex may be released and
+ /* buf_pool.mutex may be released and
reacquired by buf_buddy_alloc(). Thus, we
must release block->mutex in order not to
break the latching order in the reacquisition
- of buf_pool->mutex. We also must defer this
+ of buf_pool.mutex. We also must defer this
operation until after the block descriptor has
- been added to buf_pool->LRU and
- buf_pool->page_hash. */
+ been added to buf_pool.LRU and
+ buf_pool.page_hash. */
buf_page_mutex_exit(block);
data = buf_buddy_alloc(page_size.physical(), lru);
buf_page_mutex_enter(block);
@@ -5039,7 +5012,7 @@ buf_page_init_for_read(
rw_lock_x_lock(hash_lock);
/* If buf_buddy_alloc() allocated storage from the LRU list,
- it released and reacquired buf_pool->mutex. Thus, we must
+ it released and reacquired buf_pool.mutex. Thus, we must
check the page_hash again, as it may have been modified. */
if (UNIV_UNLIKELY(lru)) {
watch_page = buf_page_hash_get_low(page_id);
@@ -5065,7 +5038,7 @@ buf_page_init_for_read(
bpage->size.copy_from(page_size);
- mutex_enter(&buf_pool->zip_mutex);
+ mutex_enter(&buf_pool.zip_mutex);
UNIV_MEM_DESC(bpage->zip.data, bpage->size.physical());
buf_page_init_low(bpage);
@@ -5097,7 +5070,7 @@ buf_page_init_for_read(
buf_pool_watch_remove(watch_page);
}
- HASH_INSERT(buf_page_t, hash, buf_pool->page_hash,
+ HASH_INSERT(buf_page_t, hash, buf_pool.page_hash,
bpage->id.fold(), bpage);
rw_lock_x_unlock(hash_lock);
@@ -5111,12 +5084,12 @@ buf_page_init_for_read(
buf_page_set_io_fix(bpage, BUF_IO_READ);
- mutex_exit(&buf_pool->zip_mutex);
+ mutex_exit(&buf_pool.zip_mutex);
}
- buf_pool->n_pend_reads++;
+ buf_pool.n_pend_reads++;
func_exit:
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
if (mode == BUF_READ_IBUF_PAGES_ONLY) {
@@ -5154,7 +5127,7 @@ buf_page_create(
free_block = buf_LRU_get_free_block();
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
hash_lock = buf_page_hash_lock_get(page_id);
rw_lock_x_lock(hash_lock);
@@ -5172,7 +5145,7 @@ buf_page_create(
ut_d(block->page.file_page_was_freed = FALSE);
/* Page can be found in buf_pool */
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
rw_lock_x_unlock(hash_lock);
buf_block_free(free_block);
@@ -5197,26 +5170,26 @@ buf_page_create(
buf_LRU_add_block(&block->page, FALSE);
buf_block_buf_fix_inc(block, __FILE__, __LINE__);
- buf_pool->stat.n_pages_created++;
+ buf_pool.stat.n_pages_created++;
if (page_size.is_compressed()) {
void* data;
bool lru;
/* Prevent race conditions during buf_buddy_alloc(),
- which may release and reacquire buf_pool->mutex,
+ which may release and reacquire buf_pool.mutex,
by IO-fixing and X-latching the block. */
buf_page_set_io_fix(&block->page, BUF_IO_READ);
rw_lock_x_lock(&block->lock);
buf_page_mutex_exit(block);
- /* buf_pool->mutex may be released and reacquired by
+ /* buf_pool.mutex may be released and reacquired by
buf_buddy_alloc(). Thus, we must release block->mutex
in order not to break the latching order in
- the reacquisition of buf_pool->mutex. We also must
+ the reacquisition of buf_pool.mutex. We also must
defer this operation until after the block descriptor
- has been added to buf_pool->LRU and buf_pool->page_hash. */
+ has been added to buf_pool.LRU and buf_pool.page_hash. */
data = buf_buddy_alloc(page_size.physical(), lru);
buf_page_mutex_enter(block);
block->page.zip.data = (page_zip_t*) data;
@@ -5233,7 +5206,7 @@ buf_page_create(
rw_lock_x_unlock(&block->lock);
}
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
mtr_memo_push(mtr, block, MTR_MEMO_BUF_FIX);
@@ -5386,7 +5359,7 @@ buf_mark_space_corrupt(buf_page_t* bpage, const fil_space_t* space)
== BUF_BLOCK_FILE_PAGE);
/* First unfix and release lock on the bpage */
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
mutex_enter(buf_page_get_mutex(bpage));
ut_ad(buf_page_get_io_fix(bpage) == BUF_IO_READ);
ut_ad(bpage->buf_fix_count == 0);
@@ -5415,10 +5388,10 @@ buf_mark_space_corrupt(buf_page_t* bpage, const fil_space_t* space)
/* After this point bpage can't be referenced. */
buf_LRU_free_one_page(bpage);
- ut_ad(buf_pool->n_pend_reads > 0);
- buf_pool->n_pend_reads--;
+ ut_ad(buf_pool.n_pend_reads > 0);
+ buf_pool.n_pend_reads--;
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
}
/** Check if page is maybe compressed, encrypted or both when we encounter
@@ -5557,10 +5530,10 @@ buf_page_io_complete(buf_page_t* bpage, bool dblwr, bool evict)
}
if (bpage->zip.data && uncompressed) {
- my_atomic_addlint(&buf_pool->n_pend_unzip, 1);
+ my_atomic_addlint(&buf_pool.n_pend_unzip, 1);
ibool ok = buf_zip_decompress((buf_block_t*) bpage,
FALSE);
- my_atomic_addlint(&buf_pool->n_pend_unzip, ulint(-1));
+ my_atomic_addlint(&buf_pool.n_pend_unzip, ulint(-1));
if (!ok) {
ib::info() << "Page "
@@ -5713,7 +5686,7 @@ database_corrupted:
}
BPageMutex* block_mutex = buf_page_get_mutex(bpage);
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
mutex_enter(block_mutex);
#ifdef UNIV_IBUF_COUNT_DEBUG
@@ -5737,9 +5710,9 @@ database_corrupted:
the x-latch to this OS thread: do not let this confuse you in
debugging! */
- ut_ad(buf_pool->n_pend_reads > 0);
- buf_pool->n_pend_reads--;
- buf_pool->stat.n_pages_read++;
+ ut_ad(buf_pool.n_pend_reads > 0);
+ buf_pool.n_pend_reads--;
+ buf_pool.stat.n_pages_read++;
if (uncompressed) {
rw_lock_x_unlock_gen(&((buf_block_t*) bpage)->lock,
@@ -5758,7 +5731,7 @@ database_corrupted:
BUF_IO_WRITE);
}
- buf_pool->stat.n_pages_written++;
+ buf_pool.stat.n_pages_written++;
/* We decide whether or not to evict the page from the
LRU list based on the flush_type.
@@ -5781,7 +5754,7 @@ database_corrupted:
io_type == BUF_IO_READ ? "read" : "wrote",
bpage->id.space(), bpage->id.page_no()));
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
return DB_SUCCESS;
}
@@ -5789,10 +5762,10 @@ database_corrupted:
/** Assert that all buffer pool pages are in a replaceable state */
void buf_assert_all_freed()
{
- mutex_enter(&buf_pool->mutex);
- buf_chunk_t* chunk = buf_pool->chunks;
+ mutex_enter(&buf_pool.mutex);
+ buf_chunk_t* chunk = buf_pool.chunks;
- for (ulint i = buf_pool->n_chunks; i--; chunk++) {
+ for (ulint i = buf_pool.n_chunks; i--; chunk++) {
if (const buf_block_t* block = buf_chunk_not_freed(chunk)) {
ib::fatal() << "Page " << block->page.id
@@ -5800,21 +5773,21 @@ void buf_assert_all_freed()
}
}
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
}
/** Refresh the statistics used to print per-second averages. */
void buf_refresh_io_stats()
{
- buf_pool->last_printout_time = ut_time();
- buf_pool->old_stat = buf_pool->stat;
+ buf_pool.last_printout_time = ut_time();
+ buf_pool.old_stat = buf_pool.stat;
}
/** Invalidate all pages in the buffer pool.
All pages must be in a replaceable state (not modified or latched). */
void buf_pool_invalidate()
{
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
for (unsigned i = BUF_FLUSH_LRU; i < BUF_FLUSH_N_TYPES; i++) {
@@ -5823,37 +5796,37 @@ void buf_pool_invalidate()
is single threaded (apart from IO helper threads) at
this stage. No new write batch can be in intialization
stage at this point. */
- ut_ad(!buf_pool->init_flush[i]);
+ ut_ad(!buf_pool.init_flush[i]);
/* However, it is possible that a write batch that has
been posted earlier is still not complete. For buffer
pool invalidation to proceed we must ensure there is NO
write activity happening. */
- if (buf_pool->n_flush[i] > 0) {
+ if (buf_pool.n_flush[i] > 0) {
buf_flush_t type = buf_flush_t(i);
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
buf_flush_wait_batch_end(type);
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
}
}
- ut_d(mutex_exit(&buf_pool->mutex));
+ ut_d(mutex_exit(&buf_pool.mutex));
ut_d(buf_assert_all_freed());
- ut_d(mutex_enter(&buf_pool->mutex));
+ ut_d(mutex_enter(&buf_pool.mutex));
while (buf_LRU_scan_and_free_block(true));
- ut_ad(UT_LIST_GET_LEN(buf_pool->LRU) == 0);
- ut_ad(UT_LIST_GET_LEN(buf_pool->unzip_LRU) == 0);
+ ut_ad(UT_LIST_GET_LEN(buf_pool.LRU) == 0);
+ ut_ad(UT_LIST_GET_LEN(buf_pool.unzip_LRU) == 0);
- buf_pool->freed_page_clock = 0;
- buf_pool->LRU_old = NULL;
- buf_pool->LRU_old_len = 0;
+ buf_pool.freed_page_clock = 0;
+ buf_pool.LRU_old = NULL;
+ buf_pool.LRU_old_len = 0;
- memset(&buf_pool->stat, 0x00, sizeof(buf_pool->stat));
+ memset(&buf_pool.stat, 0x00, sizeof(buf_pool.stat));
buf_refresh_io_stats();
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
}
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
@@ -5871,14 +5844,14 @@ void buf_validate()
ulint n_free = 0;
ulint n_zip = 0;
- mutex_enter(&buf_pool->mutex);
- hash_lock_x_all(buf_pool->page_hash);
+ mutex_enter(&buf_pool.mutex);
+ hash_lock_x_all(buf_pool.page_hash);
- chunk = buf_pool->chunks;
+ chunk = buf_pool.chunks;
/* Check the uncompressed blocks. */
- for (i = buf_pool->n_chunks; i--; chunk++) {
+ for (i = buf_pool.n_chunks; i--; chunk++) {
ulint j;
buf_block_t* block = chunk->blocks;
@@ -5958,11 +5931,11 @@ assert_s_latched:
}
}
- mutex_enter(&buf_pool->zip_mutex);
+ mutex_enter(&buf_pool.zip_mutex);
/* Check clean compressed-only blocks. */
- for (b = UT_LIST_GET_FIRST(buf_pool->zip_clean); b;
+ for (b = UT_LIST_GET_FIRST(buf_pool.zip_clean); b;
b = UT_LIST_GET_NEXT(list, b)) {
ut_ad(buf_page_get_state(b) == BUF_BLOCK_ZIP_PAGE);
switch (buf_page_get_io_fix(b)) {
@@ -5982,7 +5955,7 @@ assert_s_latched:
}
/* It is OK to read oldest_modification here because
- we have acquired buf_pool->zip_mutex above which acts
+ we have acquired buf_pool.zip_mutex above which acts
as the 'block->mutex' for these bpages. */
ut_ad(!b->oldest_modification);
ut_ad(buf_page_hash_get_low(b->id) == b);
@@ -5992,8 +5965,8 @@ assert_s_latched:
/* Check dirty blocks. */
- mutex_enter(&buf_pool->flush_list_mutex);
- for (b = UT_LIST_GET_FIRST(buf_pool->flush_list); b;
+ mutex_enter(&buf_pool.flush_list_mutex);
+ for (b = UT_LIST_GET_FIRST(buf_pool.flush_list); b;
b = UT_LIST_GET_NEXT(list, b)) {
ut_ad(b->in_flush_list);
ut_ad(b->oldest_modification);
@@ -6040,36 +6013,36 @@ assert_s_latched:
ut_ad(buf_page_hash_get_low(b->id) == b);
}
- ut_ad(UT_LIST_GET_LEN(buf_pool->flush_list) == n_flush);
+ ut_ad(UT_LIST_GET_LEN(buf_pool.flush_list) == n_flush);
- hash_unlock_x_all(buf_pool->page_hash);
- mutex_exit(&buf_pool->flush_list_mutex);
+ hash_unlock_x_all(buf_pool.page_hash);
+ mutex_exit(&buf_pool.flush_list_mutex);
- mutex_exit(&buf_pool->zip_mutex);
+ mutex_exit(&buf_pool.zip_mutex);
- if (buf_pool->curr_size == buf_pool->old_size
- && n_lru + n_free > buf_pool->curr_size + n_zip) {
+ if (buf_pool.curr_size == buf_pool.old_size
+ && n_lru + n_free > buf_pool.curr_size + n_zip) {
ib::fatal() << "n_LRU " << n_lru << ", n_free " << n_free
- << ", pool " << buf_pool->curr_size
+ << ", pool " << buf_pool.curr_size
<< " zip " << n_zip << ". Aborting...";
}
- ut_ad(UT_LIST_GET_LEN(buf_pool->LRU) == n_lru);
+ ut_ad(UT_LIST_GET_LEN(buf_pool.LRU) == n_lru);
- if (buf_pool->curr_size == buf_pool->old_size
- && UT_LIST_GET_LEN(buf_pool->free) != n_free) {
+ if (buf_pool.curr_size == buf_pool.old_size
+ && UT_LIST_GET_LEN(buf_pool.free) != n_free) {
ib::fatal() << "Free list len "
- << UT_LIST_GET_LEN(buf_pool->free)
+ << UT_LIST_GET_LEN(buf_pool.free)
<< ", free blocks " << n_free << ". Aborting...";
}
- ut_ad(buf_pool->n_flush[BUF_FLUSH_LIST] == n_list_flush);
- ut_ad(buf_pool->n_flush[BUF_FLUSH_LRU] == n_lru_flush);
- ut_ad(buf_pool->n_flush[BUF_FLUSH_SINGLE_PAGE] == n_page_flush);
+ ut_ad(buf_pool.n_flush[BUF_FLUSH_LIST] == n_list_flush);
+ ut_ad(buf_pool.n_flush[BUF_FLUSH_LRU] == n_lru_flush);
+ ut_ad(buf_pool.n_flush[BUF_FLUSH_SINGLE_PAGE] == n_page_flush);
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
ut_d(buf_LRU_validate());
ut_d(buf_flush_validate());
@@ -6090,42 +6063,42 @@ void buf_print()
buf_chunk_t* chunk;
dict_index_t* index;
- size = buf_pool->curr_size;
+ size = buf_pool.curr_size;
index_ids = static_cast<index_id_t*>(
ut_malloc_nokey(size * sizeof *index_ids));
counts = static_cast<ulint*>(ut_malloc_nokey(sizeof(ulint) * size));
- mutex_enter(&buf_pool->mutex);
- mutex_enter(&buf_pool->flush_list_mutex);
+ mutex_enter(&buf_pool.mutex);
+ mutex_enter(&buf_pool.flush_list_mutex);
ib::info()
- << "[buffer pool: size=" << buf_pool->curr_size
- << ", database pages=" << UT_LIST_GET_LEN(buf_pool->LRU)
- << ", free pages=" << UT_LIST_GET_LEN(buf_pool->free)
+ << "[buffer pool: size=" << buf_pool.curr_size
+ << ", database pages=" << UT_LIST_GET_LEN(buf_pool.LRU)
+ << ", free pages=" << UT_LIST_GET_LEN(buf_pool.free)
<< ", modified database pages="
- << UT_LIST_GET_LEN(buf_pool->flush_list)
- << ", n pending decompressions=" << buf_pool->n_pend_unzip
- << ", n pending reads=" << buf_pool->n_pend_reads
- << ", n pending flush LRU=" << buf_pool->n_flush[BUF_FLUSH_LRU]
- << " list=" << buf_pool->n_flush[BUF_FLUSH_LIST]
- << " single page=" << buf_pool->n_flush[BUF_FLUSH_SINGLE_PAGE]
- << ", pages made young=" << buf_pool->stat.n_pages_made_young
- << ", not young=" << buf_pool->stat.n_pages_not_made_young
- << ", pages read=" << buf_pool->stat.n_pages_read
- << ", created=" << buf_pool->stat.n_pages_created
- << ", written=" << buf_pool->stat.n_pages_written << "]";
-
- mutex_exit(&buf_pool->flush_list_mutex);
+ << UT_LIST_GET_LEN(buf_pool.flush_list)
+ << ", n pending decompressions=" << buf_pool.n_pend_unzip
+ << ", n pending reads=" << buf_pool.n_pend_reads
+ << ", n pending flush LRU=" << buf_pool.n_flush[BUF_FLUSH_LRU]
+ << " list=" << buf_pool.n_flush[BUF_FLUSH_LIST]
+ << " single page=" << buf_pool.n_flush[BUF_FLUSH_SINGLE_PAGE]
+ << ", pages made young=" << buf_pool.stat.n_pages_made_young
+ << ", not young=" << buf_pool.stat.n_pages_not_made_young
+ << ", pages read=" << buf_pool.stat.n_pages_read
+ << ", created=" << buf_pool.stat.n_pages_created
+ << ", written=" << buf_pool.stat.n_pages_written << "]";
+
+ mutex_exit(&buf_pool.flush_list_mutex);
/* Count the number of blocks belonging to each index in the buffer */
n_found = 0;
- chunk = buf_pool->chunks;
+ chunk = buf_pool.chunks;
- for (i = buf_pool->n_chunks; i--; chunk++) {
+ for (i = buf_pool.n_chunks; i--; chunk++) {
buf_block_t* block = chunk->blocks;
ulint n_blocks = chunk->size;
@@ -6158,7 +6131,7 @@ void buf_print()
}
}
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
for (i = 0; i < n_found; i++) {
index = dict_index_get_if_in_cache(index_ids[i]);
@@ -6191,11 +6164,11 @@ ulint buf_get_latched_pages_number()
buf_chunk_t* chunk;
ulint fixed_pages_number = 0;
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
- chunk = buf_pool->chunks;
+ chunk = buf_pool.chunks;
- for (i = buf_pool->n_chunks; i--; chunk++) {
+ for (i = buf_pool.n_chunks; i--; chunk++) {
buf_block_t* block;
ulint j;
@@ -6220,11 +6193,11 @@ ulint buf_get_latched_pages_number()
}
}
- mutex_enter(&buf_pool->zip_mutex);
+ mutex_enter(&buf_pool.zip_mutex);
/* Traverse the lists of clean and dirty compressed-only blocks. */
- for (b = UT_LIST_GET_FIRST(buf_pool->zip_clean); b;
+ for (b = UT_LIST_GET_FIRST(buf_pool.zip_clean); b;
b = UT_LIST_GET_NEXT(list, b)) {
ut_a(buf_page_get_state(b) == BUF_BLOCK_ZIP_PAGE);
ut_a(buf_page_get_io_fix(b) != BUF_IO_WRITE);
@@ -6235,8 +6208,8 @@ ulint buf_get_latched_pages_number()
}
}
- mutex_enter(&buf_pool->flush_list_mutex);
- for (b = UT_LIST_GET_FIRST(buf_pool->flush_list); b;
+ mutex_enter(&buf_pool.flush_list_mutex);
+ for (b = UT_LIST_GET_FIRST(buf_pool.flush_list); b;
b = UT_LIST_GET_NEXT(list, b)) {
ut_ad(b->in_flush_list);
@@ -6261,9 +6234,9 @@ ulint buf_get_latched_pages_number()
}
}
- mutex_exit(&buf_pool->flush_list_mutex);
- mutex_exit(&buf_pool->zip_mutex);
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.flush_list_mutex);
+ mutex_exit(&buf_pool.zip_mutex);
+ mutex_exit(&buf_pool.mutex);
return(fixed_pages_number);
}
@@ -6277,108 +6250,108 @@ buf_stats_get_pool_info(buf_pool_info_t* pool_info)
time_t current_time;
double time_elapsed;
- mutex_enter(&buf_pool->mutex);
- mutex_enter(&buf_pool->flush_list_mutex);
+ mutex_enter(&buf_pool.mutex);
+ mutex_enter(&buf_pool.flush_list_mutex);
- pool_info->pool_size = buf_pool->curr_size;
+ pool_info->pool_size = buf_pool.curr_size;
- pool_info->lru_len = UT_LIST_GET_LEN(buf_pool->LRU);
+ pool_info->lru_len = UT_LIST_GET_LEN(buf_pool.LRU);
- pool_info->old_lru_len = buf_pool->LRU_old_len;
+ pool_info->old_lru_len = buf_pool.LRU_old_len;
- pool_info->free_list_len = UT_LIST_GET_LEN(buf_pool->free);
+ pool_info->free_list_len = UT_LIST_GET_LEN(buf_pool.free);
- pool_info->flush_list_len = UT_LIST_GET_LEN(buf_pool->flush_list);
+ pool_info->flush_list_len = UT_LIST_GET_LEN(buf_pool.flush_list);
- pool_info->n_pend_unzip = UT_LIST_GET_LEN(buf_pool->unzip_LRU);
+ pool_info->n_pend_unzip = UT_LIST_GET_LEN(buf_pool.unzip_LRU);
- pool_info->n_pend_reads = buf_pool->n_pend_reads;
+ pool_info->n_pend_reads = buf_pool.n_pend_reads;
pool_info->n_pending_flush_lru =
- (buf_pool->n_flush[BUF_FLUSH_LRU]
- + buf_pool->init_flush[BUF_FLUSH_LRU]);
+ (buf_pool.n_flush[BUF_FLUSH_LRU]
+ + buf_pool.init_flush[BUF_FLUSH_LRU]);
pool_info->n_pending_flush_list =
- (buf_pool->n_flush[BUF_FLUSH_LIST]
- + buf_pool->init_flush[BUF_FLUSH_LIST]);
+ (buf_pool.n_flush[BUF_FLUSH_LIST]
+ + buf_pool.init_flush[BUF_FLUSH_LIST]);
pool_info->n_pending_flush_single_page =
- (buf_pool->n_flush[BUF_FLUSH_SINGLE_PAGE]
- + buf_pool->init_flush[BUF_FLUSH_SINGLE_PAGE]);
+ (buf_pool.n_flush[BUF_FLUSH_SINGLE_PAGE]
+ + buf_pool.init_flush[BUF_FLUSH_SINGLE_PAGE]);
- mutex_exit(&buf_pool->flush_list_mutex);
+ mutex_exit(&buf_pool.flush_list_mutex);
current_time = time(NULL);
time_elapsed = 0.001 + difftime(current_time,
- buf_pool->last_printout_time);
+ buf_pool.last_printout_time);
- pool_info->n_pages_made_young = buf_pool->stat.n_pages_made_young;
+ pool_info->n_pages_made_young = buf_pool.stat.n_pages_made_young;
pool_info->n_pages_not_made_young =
- buf_pool->stat.n_pages_not_made_young;
+ buf_pool.stat.n_pages_not_made_young;
- pool_info->n_pages_read = buf_pool->stat.n_pages_read;
+ pool_info->n_pages_read = buf_pool.stat.n_pages_read;
- pool_info->n_pages_created = buf_pool->stat.n_pages_created;
+ pool_info->n_pages_created = buf_pool.stat.n_pages_created;
- pool_info->n_pages_written = buf_pool->stat.n_pages_written;
+ pool_info->n_pages_written = buf_pool.stat.n_pages_written;
- pool_info->n_page_gets = buf_pool->stat.n_page_gets;
+ pool_info->n_page_gets = buf_pool.stat.n_page_gets;
- pool_info->n_ra_pages_read_rnd = buf_pool->stat.n_ra_pages_read_rnd;
- pool_info->n_ra_pages_read = buf_pool->stat.n_ra_pages_read;
+ pool_info->n_ra_pages_read_rnd = buf_pool.stat.n_ra_pages_read_rnd;
+ pool_info->n_ra_pages_read = buf_pool.stat.n_ra_pages_read;
- pool_info->n_ra_pages_evicted = buf_pool->stat.n_ra_pages_evicted;
+ pool_info->n_ra_pages_evicted = buf_pool.stat.n_ra_pages_evicted;
pool_info->page_made_young_rate =
- (buf_pool->stat.n_pages_made_young
- - buf_pool->old_stat.n_pages_made_young) / time_elapsed;
+ (buf_pool.stat.n_pages_made_young
+ - buf_pool.old_stat.n_pages_made_young) / time_elapsed;
pool_info->page_not_made_young_rate =
- (buf_pool->stat.n_pages_not_made_young
- - buf_pool->old_stat.n_pages_not_made_young) / time_elapsed;
+ (buf_pool.stat.n_pages_not_made_young
+ - buf_pool.old_stat.n_pages_not_made_young) / time_elapsed;
pool_info->pages_read_rate =
- (buf_pool->stat.n_pages_read
- - buf_pool->old_stat.n_pages_read) / time_elapsed;
+ (buf_pool.stat.n_pages_read
+ - buf_pool.old_stat.n_pages_read) / time_elapsed;
pool_info->pages_created_rate =
- (buf_pool->stat.n_pages_created
- - buf_pool->old_stat.n_pages_created) / time_elapsed;
+ (buf_pool.stat.n_pages_created
+ - buf_pool.old_stat.n_pages_created) / time_elapsed;
pool_info->pages_written_rate =
- (buf_pool->stat.n_pages_written
- - buf_pool->old_stat.n_pages_written) / time_elapsed;
+ (buf_pool.stat.n_pages_written
+ - buf_pool.old_stat.n_pages_written) / time_elapsed;
- pool_info->n_page_get_delta = buf_pool->stat.n_page_gets
- - buf_pool->old_stat.n_page_gets;
+ pool_info->n_page_get_delta = buf_pool.stat.n_page_gets
+ - buf_pool.old_stat.n_page_gets;
if (pool_info->n_page_get_delta) {
- pool_info->page_read_delta = buf_pool->stat.n_pages_read
- - buf_pool->old_stat.n_pages_read;
+ pool_info->page_read_delta = buf_pool.stat.n_pages_read
+ - buf_pool.old_stat.n_pages_read;
pool_info->young_making_delta =
- buf_pool->stat.n_pages_made_young
- - buf_pool->old_stat.n_pages_made_young;
+ buf_pool.stat.n_pages_made_young
+ - buf_pool.old_stat.n_pages_made_young;
pool_info->not_young_making_delta =
- buf_pool->stat.n_pages_not_made_young
- - buf_pool->old_stat.n_pages_not_made_young;
+ buf_pool.stat.n_pages_not_made_young
+ - buf_pool.old_stat.n_pages_not_made_young;
}
pool_info->pages_readahead_rnd_rate =
- (buf_pool->stat.n_ra_pages_read_rnd
- - buf_pool->old_stat.n_ra_pages_read_rnd) / time_elapsed;
+ (buf_pool.stat.n_ra_pages_read_rnd
+ - buf_pool.old_stat.n_ra_pages_read_rnd) / time_elapsed;
pool_info->pages_readahead_rate =
- (buf_pool->stat.n_ra_pages_read
- - buf_pool->old_stat.n_ra_pages_read) / time_elapsed;
+ (buf_pool.stat.n_ra_pages_read
+ - buf_pool.old_stat.n_ra_pages_read) / time_elapsed;
pool_info->pages_evicted_rate =
- (buf_pool->stat.n_ra_pages_evicted
- - buf_pool->old_stat.n_ra_pages_evicted) / time_elapsed;
+ (buf_pool.stat.n_ra_pages_evicted
+ - buf_pool.old_stat.n_ra_pages_evicted) / time_elapsed;
- pool_info->unzip_lru_len = UT_LIST_GET_LEN(buf_pool->unzip_LRU);
+ pool_info->unzip_lru_len = UT_LIST_GET_LEN(buf_pool.unzip_LRU);
pool_info->io_sum = buf_LRU_stat_sum.io;
@@ -6389,7 +6362,7 @@ buf_stats_get_pool_info(buf_pool_info_t* pool_info)
pool_info->unzip_cur = buf_LRU_stat_cur.unzip;
buf_refresh_io_stats();
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
}
/*********************************************************************//**
@@ -6504,13 +6477,13 @@ buf_print_io(
ulint buf_pool_check_no_pending_io()
{
/* FIXME: use atomics, no mutex */
- ulint pending_io = buf_pool->n_pend_reads;
- mutex_enter(&buf_pool->mutex);
+ ulint pending_io = buf_pool.n_pend_reads;
+ mutex_enter(&buf_pool.mutex);
pending_io +=
- + buf_pool->n_flush[BUF_FLUSH_LRU]
- + buf_pool->n_flush[BUF_FLUSH_SINGLE_PAGE]
- + buf_pool->n_flush[BUF_FLUSH_LIST];
- mutex_exit(&buf_pool->mutex);
+ + buf_pool.n_flush[BUF_FLUSH_LRU]
+ + buf_pool.n_flush[BUF_FLUSH_SINGLE_PAGE]
+ + buf_pool.n_flush[BUF_FLUSH_LIST];
+ mutex_exit(&buf_pool.mutex);
return(pending_io);
}
diff --git a/storage/innobase/buf/buf0dblwr.cc b/storage/innobase/buf/buf0dblwr.cc
index 8c473b0c658..60d0e5264a9 100644
--- a/storage/innobase/buf/buf0dblwr.cc
+++ b/storage/innobase/buf/buf0dblwr.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2013, 2017, MariaDB Corporation.
+Copyright (c) 2013, 2018, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
diff --git a/storage/innobase/buf/buf0dump.cc b/storage/innobase/buf/buf0dump.cc
index 5a47b3eee7d..b4905ab266f 100644
--- a/storage/innobase/buf/buf0dump.cc
+++ b/storage/innobase/buf/buf0dump.cc
@@ -300,13 +300,13 @@ buf_dump(
ulint n_pages;
ulint j;
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
- n_pages = UT_LIST_GET_LEN(buf_pool->LRU);
+ n_pages = UT_LIST_GET_LEN(buf_pool.LRU);
/* skip empty buffer pools */
if (n_pages == 0) {
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
goto done;
}
@@ -315,7 +315,7 @@ buf_dump(
/* limit the number of total pages dumped to X% of the
total number of pages */
- t_pages = buf_pool->curr_size * srv_buf_pool_dump_pct / 100;
+ t_pages = buf_pool.curr_size * srv_buf_pool_dump_pct / 100;
if (n_pages > t_pages) {
buf_dump_status(STATUS_INFO,
"Restricted to " ULINTPF
@@ -334,7 +334,7 @@ buf_dump(
n_pages * sizeof(*dump)));
if (dump == NULL) {
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
fclose(f);
buf_dump_status(STATUS_ERR,
"Cannot allocate " ULINTPF " bytes: %s",
@@ -344,7 +344,7 @@ buf_dump(
return;
}
- for (bpage = UT_LIST_GET_FIRST(buf_pool->LRU), j = 0;
+ for (bpage = UT_LIST_GET_FIRST(buf_pool.LRU), j = 0;
bpage != NULL && j < n_pages;
bpage = UT_LIST_GET_NEXT(LRU, bpage)) {
@@ -359,7 +359,7 @@ buf_dump(
bpage->id.page_no());
}
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
ut_a(j <= n_pages);
n_pages = j;
diff --git a/storage/innobase/buf/buf0flu.cc b/storage/innobase/buf/buf0flu.cc
index 93aa7f1ae99..1fbfda550df 100644
--- a/storage/innobase/buf/buf0flu.cc
+++ b/storage/innobase/buf/buf0flu.cc
@@ -192,9 +192,9 @@ in thrashing. */
static inline void incr_flush_list_size_in_bytes(const buf_block_t* block)
{
/* FIXME: use atomics! */
- ut_ad(mutex_own(&buf_pool->flush_list_mutex));
- buf_pool->stat.flush_list_bytes += block->page.size.physical();
- ut_ad(buf_pool->stat.flush_list_bytes <= buf_pool->curr_pool_size);
+ ut_ad(mutex_own(&buf_pool.flush_list_mutex));
+ buf_pool.stat.flush_list_bytes += block->page.size.physical();
+ ut_ad(buf_pool.stat.flush_list_bytes <= buf_pool.curr_pool_size);
}
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
@@ -240,14 +240,14 @@ buf_flush_insert_in_flush_rbt(
buf_page_t* prev = NULL;
ut_ad(srv_shutdown_state != SRV_SHUTDOWN_FLUSH_PHASE);
- ut_ad(mutex_own(&buf_pool->flush_list_mutex));
+ ut_ad(mutex_own(&buf_pool.flush_list_mutex));
/* Insert this buffer into the rbt. */
- c_node = rbt_insert(buf_pool->flush_rbt, &bpage, &bpage);
+ c_node = rbt_insert(buf_pool.flush_rbt, &bpage, &bpage);
ut_a(c_node != NULL);
/* Get the predecessor. */
- p_node = rbt_prev(buf_pool->flush_rbt, c_node);
+ p_node = rbt_prev(buf_pool.flush_rbt, c_node);
if (p_node != NULL) {
buf_page_t** value;
@@ -267,12 +267,12 @@ buf_flush_delete_from_flush_rbt(
/*============================*/
buf_page_t* bpage) /*!< in: bpage to be removed. */
{
- ut_ad(mutex_own(&buf_pool->flush_list_mutex));
+ ut_ad(mutex_own(&buf_pool.flush_list_mutex));
#ifdef UNIV_DEBUG
ibool ret =
#endif /* UNIV_DEBUG */
- rbt_delete(buf_pool->flush_rbt, &bpage);
+ rbt_delete(buf_pool.flush_rbt, &bpage);
ut_ad(ret);
}
@@ -282,7 +282,7 @@ Compare two modified blocks in the buffer pool. The key for comparison
is:
key = <oldest_modification, space, offset>
This comparison is used to maintian ordering of blocks in the
-buf_pool->flush_rbt.
+buf_pool.flush_rbt.
Note that for the purpose of flush_rbt, we only need to order blocks
on the oldest_modification. The other two fields are used to uniquely
identify the blocks.
@@ -301,7 +301,7 @@ buf_flush_block_cmp(
ut_ad(b1 != NULL);
ut_ad(b2 != NULL);
- ut_ad(mutex_own(&buf_pool->flush_list_mutex));
+ ut_ad(mutex_own(&buf_pool.flush_list_mutex));
ut_ad(b1->in_flush_list);
ut_ad(b2->in_flush_list);
@@ -327,12 +327,12 @@ void
buf_flush_init_flush_rbt(void)
/*==========================*/
{
- mutex_enter(&buf_pool->flush_list_mutex);
- ut_ad(buf_pool->flush_rbt == NULL);
+ mutex_enter(&buf_pool.flush_list_mutex);
+ ut_ad(buf_pool.flush_rbt == NULL);
/* Create red black tree for speedy insertions in flush list. */
- buf_pool->flush_rbt = rbt_create(
+ buf_pool.flush_rbt = rbt_create(
sizeof(buf_page_t*), buf_flush_block_cmp);
- mutex_exit(&buf_pool->flush_list_mutex);
+ mutex_exit(&buf_pool.flush_list_mutex);
}
/********************************************************************//**
@@ -341,13 +341,13 @@ void
buf_flush_free_flush_rbt(void)
/*==========================*/
{
- mutex_enter(&buf_pool->flush_list_mutex);
+ mutex_enter(&buf_pool.flush_list_mutex);
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
buf_flush_validate_low();
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
- rbt_free(buf_pool->flush_rbt);
- buf_pool->flush_rbt = NULL;
- mutex_exit(&buf_pool->flush_list_mutex);
+ rbt_free(buf_pool.flush_rbt);
+ buf_pool.flush_rbt = NULL;
+ mutex_exit(&buf_pool.flush_list_mutex);
}
/** Insert a modified block into the flush list.
@@ -355,21 +355,21 @@ buf_flush_free_flush_rbt(void)
@param[in] lsn oldest modification */
void buf_flush_insert_into_flush_list(buf_block_t* block, lsn_t lsn)
{
- ut_ad(!mutex_own(&buf_pool->mutex));
+ ut_ad(!mutex_own(&buf_pool.mutex));
ut_ad(log_flush_order_mutex_own());
ut_ad(buf_page_mutex_own(block));
ut_ad(lsn);
- mutex_enter(&buf_pool->flush_list_mutex);
+ mutex_enter(&buf_pool.flush_list_mutex);
- ut_ad((UT_LIST_GET_FIRST(buf_pool->flush_list) == NULL)
- || (UT_LIST_GET_FIRST(buf_pool->flush_list)->oldest_modification
+ ut_ad((UT_LIST_GET_FIRST(buf_pool.flush_list) == NULL)
+ || (UT_LIST_GET_FIRST(buf_pool.flush_list)->oldest_modification
<= lsn));
/* If we are in the recovery then we need to update the flush
red-black tree as well. */
- if (buf_pool->flush_rbt != NULL) {
- mutex_exit(&buf_pool->flush_list_mutex);
+ if (buf_pool.flush_rbt != NULL) {
+ mutex_exit(&buf_pool.flush_list_mutex);
buf_flush_insert_sorted_into_flush_list(block, lsn);
return;
}
@@ -380,7 +380,7 @@ void buf_flush_insert_into_flush_list(buf_block_t* block, lsn_t lsn)
ut_d(block->page.in_flush_list = TRUE);
block->page.oldest_modification = lsn;
- UT_LIST_ADD_FIRST(buf_pool->flush_list, &block->page);
+ UT_LIST_ADD_FIRST(buf_pool.flush_list, &block->page);
incr_flush_list_size_in_bytes(block);
@@ -400,7 +400,7 @@ void buf_flush_insert_into_flush_list(buf_block_t* block, lsn_t lsn)
buf_flush_validate_skip();
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
- mutex_exit(&buf_pool->flush_list_mutex);
+ mutex_exit(&buf_pool.flush_list_mutex);
}
/********************************************************************//**
@@ -417,14 +417,14 @@ buf_flush_insert_sorted_into_flush_list(
buf_page_t* b;
ut_ad(srv_shutdown_state != SRV_SHUTDOWN_FLUSH_PHASE);
- ut_ad(!mutex_own(&buf_pool->mutex));
+ ut_ad(!mutex_own(&buf_pool.mutex));
ut_ad(log_flush_order_mutex_own());
ut_ad(buf_page_mutex_own(block));
ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
- mutex_enter(&buf_pool->flush_list_mutex);
+ mutex_enter(&buf_pool.flush_list_mutex);
- /* The field in_LRU_list is protected by buf_pool->mutex, which
+ /* The field in_LRU_list is protected by buf_pool.mutex, which
we are not holding. However, while a block is in the flush
list, it is dirty and cannot be discarded, not from the
page_hash or from the LRU list. At most, the uncompressed
@@ -464,13 +464,13 @@ buf_flush_insert_sorted_into_flush_list(
before the last page was hooked up in the flush_list by the
io-handler thread. In that case we'll just do a simple
linear search in the else block. */
- if (buf_pool->flush_rbt != NULL) {
+ if (buf_pool.flush_rbt != NULL) {
prev_b = buf_flush_insert_in_flush_rbt(&block->page);
} else {
- b = UT_LIST_GET_FIRST(buf_pool->flush_list);
+ b = UT_LIST_GET_FIRST(buf_pool.flush_list);
while (b != NULL && b->oldest_modification
> block->page.oldest_modification) {
@@ -482,9 +482,9 @@ buf_flush_insert_sorted_into_flush_list(
}
if (prev_b == NULL) {
- UT_LIST_ADD_FIRST(buf_pool->flush_list, &block->page);
+ UT_LIST_ADD_FIRST(buf_pool.flush_list, &block->page);
} else {
- UT_LIST_INSERT_AFTER(buf_pool->flush_list, prev_b, &block->page);
+ UT_LIST_INSERT_AFTER(buf_pool.flush_list, prev_b, &block->page);
}
incr_flush_list_size_in_bytes(block);
@@ -493,7 +493,7 @@ buf_flush_insert_sorted_into_flush_list(
buf_flush_validate_low();
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
- mutex_exit(&buf_pool->flush_list_mutex);
+ mutex_exit(&buf_pool.flush_list_mutex);
}
/********************************************************************//**
@@ -506,7 +506,7 @@ buf_flush_ready_for_replace(
buf_page_t* bpage) /*!< in: buffer control block, must be
buf_page_in_file(bpage) and in the LRU list */
{
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(mutex_own(buf_page_get_mutex(bpage)));
ut_ad(bpage->in_LRU_list);
@@ -533,7 +533,7 @@ buf_flush_ready_for_flush(
buf_page_in_file(bpage) */
buf_flush_t flush_type)/*!< in: type of flush */
{
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
ut_a(buf_page_in_file(bpage));
ut_ad(mutex_own(buf_page_get_mutex(bpage)));
ut_ad(flush_type < BUF_FLUSH_N_TYPES);
@@ -569,18 +569,18 @@ void buf_flush_remove(buf_page_t* bpage)
INNODB_EXTEND_TIMEOUT_INTERVAL,
"Flush and remove page with tablespace id %u"
", flush list length " ULINTPF,
- bpage->space, UT_LIST_GET_LEN(buf_pool->flush_list));
+ bpage->space, UT_LIST_GET_LEN(buf_pool.flush_list));
}
#endif
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(mutex_own(buf_page_get_mutex(bpage)));
ut_ad(bpage->in_flush_list);
- mutex_enter(&buf_pool->flush_list_mutex);
+ mutex_enter(&buf_pool.flush_list_mutex);
/* Important that we adjust the hazard pointer before removing
the bpage from flush list. */
- buf_pool->flush_hp.adjust(bpage);
+ buf_pool.flush_hp.adjust(bpage);
switch (buf_page_get_state(bpage)) {
case BUF_BLOCK_POOL_WATCH:
@@ -594,18 +594,18 @@ void buf_flush_remove(buf_page_t* bpage)
return;
case BUF_BLOCK_ZIP_DIRTY:
buf_page_set_state(bpage, BUF_BLOCK_ZIP_PAGE);
- UT_LIST_REMOVE(buf_pool->flush_list, bpage);
+ UT_LIST_REMOVE(buf_pool.flush_list, bpage);
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
buf_LRU_insert_zip_clean(bpage);
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
break;
case BUF_BLOCK_FILE_PAGE:
- UT_LIST_REMOVE(buf_pool->flush_list, bpage);
+ UT_LIST_REMOVE(buf_pool.flush_list, bpage);
break;
}
/* If the flush_rbt is active then delete from there as well. */
- if (buf_pool->flush_rbt != NULL) {
+ if (buf_pool.flush_rbt != NULL) {
buf_flush_delete_from_flush_rbt(bpage);
}
@@ -613,7 +613,7 @@ void buf_flush_remove(buf_page_t* bpage)
because we assert on in_flush_list in comparison function. */
ut_d(bpage->in_flush_list = FALSE);
- buf_pool->stat.flush_list_bytes -= bpage->size.physical();
+ buf_pool.stat.flush_list_bytes -= bpage->size.physical();
bpage->oldest_modification = 0;
@@ -628,7 +628,7 @@ void buf_flush_remove(buf_page_t* bpage)
bpage->flush_observer = NULL;
}
- mutex_exit(&buf_pool->flush_list_mutex);
+ mutex_exit(&buf_pool.flush_list_mutex);
}
/*******************************************************************//**
@@ -651,10 +651,10 @@ buf_flush_relocate_on_flush_list(
buf_page_t* prev;
buf_page_t* prev_b = NULL;
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(mutex_own(buf_page_get_mutex(bpage)));
- mutex_enter(&buf_pool->flush_list_mutex);
+ mutex_enter(&buf_pool.flush_list_mutex);
/* FIXME: At this point we have both buf_pool and flush_list
mutexes. Theoretically removal of a block from flush list is
@@ -668,38 +668,38 @@ buf_flush_relocate_on_flush_list(
/* If recovery is active we must swap the control blocks in
the flush_rbt as well. */
- if (buf_pool->flush_rbt != NULL) {
+ if (buf_pool.flush_rbt != NULL) {
buf_flush_delete_from_flush_rbt(bpage);
prev_b = buf_flush_insert_in_flush_rbt(dpage);
}
/* Important that we adjust the hazard pointer before removing
the bpage from the flush list. */
- buf_pool->flush_hp.adjust(bpage);
+ buf_pool.flush_hp.adjust(bpage);
/* Must be done after we have removed it from the flush_rbt
because we assert on in_flush_list in comparison function. */
ut_d(bpage->in_flush_list = FALSE);
prev = UT_LIST_GET_PREV(list, bpage);
- UT_LIST_REMOVE(buf_pool->flush_list, bpage);
+ UT_LIST_REMOVE(buf_pool.flush_list, bpage);
if (prev) {
ut_ad(prev->in_flush_list);
- UT_LIST_INSERT_AFTER( buf_pool->flush_list, prev, dpage);
+ UT_LIST_INSERT_AFTER( buf_pool.flush_list, prev, dpage);
} else {
- UT_LIST_ADD_FIRST(buf_pool->flush_list, dpage);
+ UT_LIST_ADD_FIRST(buf_pool.flush_list, dpage);
}
/* Just an extra check. Previous in flush_list
should be the same control block as in flush_rbt. */
- ut_a(buf_pool->flush_rbt == NULL || prev_b == prev);
+ ut_a(buf_pool.flush_rbt == NULL || prev_b == prev);
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
buf_flush_validate_low();
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
- mutex_exit(&buf_pool->flush_list_mutex);
+ mutex_exit(&buf_pool.flush_list_mutex);
}
/** Update the flush system data structures when a write is completed.
@@ -712,17 +712,17 @@ void buf_flush_write_complete(buf_page_t* bpage, bool dblwr)
buf_flush_remove(bpage);
const buf_flush_t flush_type = buf_page_get_flush_type(bpage);
- buf_pool->n_flush[flush_type]--;
- ut_ad(buf_pool->n_flush[flush_type] != ULINT_MAX);
+ buf_pool.n_flush[flush_type]--;
+ ut_ad(buf_pool.n_flush[flush_type] != ULINT_MAX);
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
- if (buf_pool->n_flush[flush_type] == 0
- && buf_pool->init_flush[flush_type] == FALSE) {
+ if (buf_pool.n_flush[flush_type] == 0
+ && buf_pool.init_flush[flush_type] == FALSE) {
/* The running flush batch has ended */
- os_event_set(buf_pool->no_flush[flush_type]);
+ os_event_set(buf_pool.no_flush[flush_type]);
}
if (dblwr) {
@@ -941,13 +941,13 @@ buf_flush_write_block_low(
ut_ad(buf_page_in_file(bpage));
- /* We are not holding buf_pool->mutex or block_mutex here.
+ /* We are not holding buf_pool.mutex or block_mutex here.
Nevertheless, it is safe to access bpage, because it is
io_fixed and oldest_modification != 0. Thus, it cannot be
relocated in the buffer pool or removed from flush_list or
LRU_list. */
- ut_ad(!mutex_own(&buf_pool->mutex));
- ut_ad(!mutex_own(&buf_pool->flush_list_mutex));
+ ut_ad(!mutex_own(&buf_pool.mutex));
+ ut_ad(!mutex_own(&buf_pool.flush_list_mutex));
ut_ad(!buf_page_get_mutex(bpage)->is_owned());
ut_ad(buf_page_get_io_fix(bpage) == BUF_IO_WRITE);
ut_ad(bpage->oldest_modification != 0);
@@ -1065,7 +1065,7 @@ bool buf_flush_page(buf_page_t* bpage, buf_flush_t flush_type, bool sync)
BPageMutex* block_mutex;
ut_ad(flush_type < BUF_FLUSH_N_TYPES);
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(buf_page_in_file(bpage));
ut_ad(!sync || flush_type == BUF_FLUSH_SINGLE_PAGE);
@@ -1077,7 +1077,7 @@ bool buf_flush_page(buf_page_t* bpage, buf_flush_t flush_type, bool sync)
bool is_uncompressed;
is_uncompressed = (buf_page_get_state(bpage) == BUF_BLOCK_FILE_PAGE);
- ut_ad(is_uncompressed == (block_mutex != &buf_pool->zip_mutex));
+ ut_ad(is_uncompressed == (block_mutex != &buf_pool.zip_mutex));
ibool flush;
rw_lock_t* rw_lock;
@@ -1113,16 +1113,15 @@ bool buf_flush_page(buf_page_t* bpage, buf_flush_t flush_type, bool sync)
buf_page_set_flush_type(bpage, flush_type);
- if (buf_pool->n_flush[flush_type] == 0) {
- os_event_reset(buf_pool->no_flush[flush_type]);
+ if (buf_pool.n_flush[flush_type] == 0) {
+ os_event_reset(buf_pool.no_flush[flush_type]);
}
- ++buf_pool->n_flush[flush_type];
- ut_ad(buf_pool->n_flush[flush_type] != 0);
+ ++buf_pool.n_flush[flush_type];
+ ut_ad(buf_pool.n_flush[flush_type] != 0);
mutex_exit(block_mutex);
-
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
if (flush_type == BUF_FLUSH_LIST
&& is_uncompressed
@@ -1169,7 +1168,7 @@ buf_flush_batch() and buf_flush_page().
@return whether the page was flushed and the mutex released */
bool buf_flush_page_try(buf_block_t* block)
{
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
ut_ad(buf_page_mutex_own(block));
@@ -1198,13 +1197,13 @@ buf_flush_check_neighbor(
ut_ad(flush_type == BUF_FLUSH_LRU
|| flush_type == BUF_FLUSH_LIST);
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
bpage = buf_page_hash_get(page_id);
if (!bpage) {
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
return(false);
}
@@ -1223,7 +1222,7 @@ buf_flush_check_neighbor(
}
mutex_exit(block_mutex);
}
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
return(ret);
}
@@ -1249,7 +1248,7 @@ buf_flush_try_neighbors(
ut_ad(flush_type == BUF_FLUSH_LRU || flush_type == BUF_FLUSH_LIST);
- if (UT_LIST_GET_LEN(buf_pool->LRU) < BUF_LRU_OLD_MIN_LEN
+ if (UT_LIST_GET_LEN(buf_pool.LRU) < BUF_LRU_OLD_MIN_LEN
|| srv_flush_neighbors == 0) {
/* If there is little space or neighbor flushing is
not enabled then just flush the victim. */
@@ -1263,8 +1262,8 @@ buf_flush_try_neighbors(
ulint buf_flush_area;
buf_flush_area = ut_min(
- buf_pool->read_ahead_area,
- buf_pool->curr_size / 16);
+ buf_pool.read_ahead_area,
+ buf_pool.curr_size / 16);
low = (page_id.page_no() / buf_flush_area) * buf_flush_area;
high = (page_id.page_no() / buf_flush_area + 1) * buf_flush_area;
@@ -1334,12 +1333,12 @@ buf_flush_try_neighbors(
const page_id_t cur_page_id(page_id.space(), i);
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
bpage = buf_page_hash_get(cur_page_id);
if (bpage == NULL) {
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
continue;
}
@@ -1367,7 +1366,7 @@ buf_flush_try_neighbors(
++count;
} else {
mutex_exit(block_mutex);
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
}
continue;
@@ -1375,7 +1374,7 @@ buf_flush_try_neighbors(
mutex_exit(block_mutex);
}
}
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
}
if (count > 1) {
@@ -1408,7 +1407,7 @@ buf_flush_page_and_try_neighbors(
ulint n_to_flush,
ulint* count)
{
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
bool flushed;
BPageMutex* block_mutex = buf_page_get_mutex(bpage);
@@ -1421,20 +1420,20 @@ buf_flush_page_and_try_neighbors(
const page_id_t page_id = bpage->id;
mutex_exit(block_mutex);
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
/* Try to flush also all the neighbors */
*count += buf_flush_try_neighbors(
page_id, flush_type, *count, n_to_flush);
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
flushed = true;
} else {
mutex_exit(block_mutex);
flushed = false;
}
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
return(flushed);
}
@@ -1453,35 +1452,35 @@ static ulint buf_free_from_unzip_LRU_list_batch(ulint max)
{
ulint scanned = 0;
ulint count = 0;
- ulint free_len = UT_LIST_GET_LEN(buf_pool->free);
- ulint lru_len = UT_LIST_GET_LEN(buf_pool->unzip_LRU);
+ ulint free_len = UT_LIST_GET_LEN(buf_pool.free);
+ ulint lru_len = UT_LIST_GET_LEN(buf_pool.unzip_LRU);
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
- buf_block_t* block = UT_LIST_GET_LAST(buf_pool->unzip_LRU);
+ buf_block_t* block = UT_LIST_GET_LAST(buf_pool.unzip_LRU);
while (block != NULL
&& count < max
&& free_len < srv_LRU_scan_depth
- && lru_len > UT_LIST_GET_LEN(buf_pool->LRU) / 10) {
+ && lru_len > UT_LIST_GET_LEN(buf_pool.LRU) / 10) {
++scanned;
if (buf_LRU_free_page(&block->page, false)) {
- /* Block was freed. buf_pool->mutex potentially
+ /* Block was freed. buf_pool.mutex potentially
released and reacquired */
++count;
- block = UT_LIST_GET_LAST(buf_pool->unzip_LRU);
+ block = UT_LIST_GET_LAST(buf_pool.unzip_LRU);
} else {
block = UT_LIST_GET_PREV(unzip_LRU, block);
}
- free_len = UT_LIST_GET_LEN(buf_pool->free);
- lru_len = UT_LIST_GET_LEN(buf_pool->unzip_LRU);
+ free_len = UT_LIST_GET_LEN(buf_pool.free);
+ lru_len = UT_LIST_GET_LEN(buf_pool.unzip_LRU);
}
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
if (scanned) {
MONITOR_INC_VALUE_CUMULATIVE(
@@ -1504,29 +1503,29 @@ static void buf_flush_LRU_list_batch(ulint max, flush_counters_t* n)
{
buf_page_t* bpage;
ulint scanned = 0;
- ulint free_len = UT_LIST_GET_LEN(buf_pool->free);
- ulint lru_len = UT_LIST_GET_LEN(buf_pool->LRU);
+ ulint free_len = UT_LIST_GET_LEN(buf_pool.free);
+ ulint lru_len = UT_LIST_GET_LEN(buf_pool.LRU);
ulint withdraw_depth = 0;
n->flushed = 0;
n->evicted = 0;
n->unzip_LRU_evicted = 0;
- ut_ad(mutex_own(&buf_pool->mutex));
- if (buf_pool->curr_size < buf_pool->old_size
- && buf_pool->withdraw_target > 0) {
- withdraw_depth = buf_pool->withdraw_target
- - UT_LIST_GET_LEN(buf_pool->withdraw);
+ ut_ad(mutex_own(&buf_pool.mutex));
+ if (buf_pool.curr_size < buf_pool.old_size
+ && buf_pool.withdraw_target > 0) {
+ withdraw_depth = buf_pool.withdraw_target
+ - UT_LIST_GET_LEN(buf_pool.withdraw);
}
- for (bpage = UT_LIST_GET_LAST(buf_pool->LRU);
+ for (bpage = UT_LIST_GET_LAST(buf_pool.LRU);
bpage != NULL && n->flushed + n->evicted < max
&& free_len < srv_LRU_scan_depth + withdraw_depth
&& lru_len > BUF_LRU_MIN_LEN;
++scanned,
- bpage = buf_pool->lru_hp.get()) {
+ bpage = buf_pool.lru_hp.get()) {
buf_page_t* prev = UT_LIST_GET_PREV(LRU, bpage);
- buf_pool->lru_hp.set(prev);
+ buf_pool.lru_hp.set(prev);
BPageMutex* block_mutex = buf_page_get_mutex(bpage);
@@ -1549,25 +1548,25 @@ static void buf_flush_LRU_list_batch(ulint max, flush_counters_t* n)
} else {
/* Can't evict or dispatch this block. Go to
previous. */
- ut_ad(buf_pool->lru_hp.is_hp(prev));
+ ut_ad(buf_pool.lru_hp.is_hp(prev));
mutex_exit(block_mutex);
}
ut_ad(!mutex_own(block_mutex));
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
- free_len = UT_LIST_GET_LEN(buf_pool->free);
- lru_len = UT_LIST_GET_LEN(buf_pool->LRU);
+ free_len = UT_LIST_GET_LEN(buf_pool.free);
+ lru_len = UT_LIST_GET_LEN(buf_pool.LRU);
}
- buf_pool->lru_hp.set(NULL);
+ buf_pool.lru_hp.set(NULL);
/* We keep track of all flushes happening as part of LRU
flush. When estimating the desired rate at which flush_list
should be flushed, we factor in this value. */
buf_lru_flush_page_count += n->flushed;
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
if (n->evicted) {
MONITOR_INC_VALUE_CUMULATIVE(
@@ -1622,22 +1621,22 @@ static ulint buf_do_flush_list_batch(ulint min_n, lsn_t lsn_limit)
ulint count = 0;
ulint scanned = 0;
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
/* Start from the end of the list looking for a suitable
block to be flushed. */
- mutex_enter(&buf_pool->flush_list_mutex);
- ulint len = UT_LIST_GET_LEN(buf_pool->flush_list);
+ mutex_enter(&buf_pool.flush_list_mutex);
+ ulint len = UT_LIST_GET_LEN(buf_pool.flush_list);
/* In order not to degenerate this scan to O(n*n) we attempt
to preserve pointer of previous block in the flush list. To do
so we declare it a hazard pointer. Any thread working on the
flush list must check the hazard pointer and if it is removing
the same block then it must reset it. */
- for (buf_page_t* bpage = UT_LIST_GET_LAST(buf_pool->flush_list);
+ for (buf_page_t* bpage = UT_LIST_GET_LAST(buf_pool.flush_list);
count < min_n && bpage != NULL && len > 0
&& bpage->oldest_modification < lsn_limit;
- bpage = buf_pool->flush_hp.get(),
+ bpage = buf_pool.flush_hp.get(),
++scanned) {
buf_page_t* prev;
@@ -1646,8 +1645,8 @@ static ulint buf_do_flush_list_batch(ulint min_n, lsn_t lsn_limit)
ut_ad(bpage->in_flush_list);
prev = UT_LIST_GET_PREV(list, bpage);
- buf_pool->flush_hp.set(prev);
- mutex_exit(&buf_pool->flush_list_mutex);
+ buf_pool.flush_hp.set(prev);
+ mutex_exit(&buf_pool.flush_list_mutex);
#ifdef UNIV_DEBUG
bool flushed =
@@ -1655,15 +1654,15 @@ static ulint buf_do_flush_list_batch(ulint min_n, lsn_t lsn_limit)
buf_flush_page_and_try_neighbors(
bpage, BUF_FLUSH_LIST, min_n, &count);
- mutex_enter(&buf_pool->flush_list_mutex);
+ mutex_enter(&buf_pool.flush_list_mutex);
- ut_ad(flushed || buf_pool->flush_hp.is_hp(prev));
+ ut_ad(flushed || buf_pool.flush_hp.is_hp(prev));
--len;
}
- buf_pool->flush_hp.set(NULL);
- mutex_exit(&buf_pool->flush_list_mutex);
+ buf_pool.flush_hp.set(NULL);
+ mutex_exit(&buf_pool.flush_list_mutex);
if (scanned) {
MONITOR_INC_VALUE_CUMULATIVE(
@@ -1681,7 +1680,7 @@ static ulint buf_do_flush_list_batch(ulint min_n, lsn_t lsn_limit)
count);
}
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
return(count);
}
@@ -1712,7 +1711,7 @@ buf_flush_batch(
ut_ad(flush_type == BUF_FLUSH_LRU
|| !sync_check_iterate(dict_sync_check()));
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
/* Note: The buffer pool mutex is released and reacquired within
the flush functions. */
@@ -1728,7 +1727,7 @@ buf_flush_batch(
ut_error;
}
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
DBUG_LOG("ib_buf", "flush " << flush_type << " completed");
}
@@ -1760,23 +1759,23 @@ bool buf_flush_start(buf_flush_t flush_type)
{
ut_ad(flush_type == BUF_FLUSH_LRU || flush_type == BUF_FLUSH_LIST);
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
- if (buf_pool->n_flush[flush_type] > 0
- || buf_pool->init_flush[flush_type] == TRUE) {
+ if (buf_pool.n_flush[flush_type] > 0
+ || buf_pool.init_flush[flush_type] == TRUE) {
/* There is already a flush batch of the same type running */
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
return(false);
}
- buf_pool->init_flush[flush_type] = TRUE;
+ buf_pool.init_flush[flush_type] = TRUE;
- os_event_reset(buf_pool->no_flush[flush_type]);
+ os_event_reset(buf_pool.no_flush[flush_type]);
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
return(true);
}
@@ -1785,20 +1784,22 @@ bool buf_flush_start(buf_flush_t flush_type)
@param[in] flush_type BUF_FLUSH_LRU or BUF_FLUSH_LIST */
void buf_flush_end(buf_flush_t flush_type)
{
- mutex_enter(&buf_pool->mutex);
+ ut_ad(flush_type == BUF_FLUSH_LRU || flush_type == BUF_FLUSH_LIST);
+
+ mutex_enter(&buf_pool.mutex);
- buf_pool->init_flush[flush_type] = FALSE;
+ buf_pool.init_flush[flush_type] = FALSE;
- buf_pool->try_LRU_scan = TRUE;
+ buf_pool.try_LRU_scan = TRUE;
- if (buf_pool->n_flush[flush_type] == 0) {
+ if (buf_pool.n_flush[flush_type] == 0) {
/* The running flush batch has ended */
- os_event_set(buf_pool->no_flush[flush_type]);
+ os_event_set(buf_pool.no_flush[flush_type]);
}
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
if (!srv_read_only_mode) {
buf_dblwr_flush_buffered_writes();
@@ -1813,7 +1814,7 @@ void buf_flush_wait_batch_end(buf_flush_t type)
{
ut_ad(type == BUF_FLUSH_LRU || type == BUF_FLUSH_LIST);
thd_wait_begin(NULL, THD_WAIT_DISKIO);
- os_event_wait(buf_pool->no_flush[type]);
+ os_event_wait(buf_pool.no_flush[type]);
thd_wait_end(NULL);
}
@@ -1860,7 +1861,7 @@ void buf_flush_wait_flushed(lsn_t new_oldest)
blocks, because anyway we need fsync to make chekpoint.
So, we don't need to wait for the batch end here. */
- mutex_enter(&buf_pool->flush_list_mutex);
+ mutex_enter(&buf_pool.flush_list_mutex);
buf_page_t* bpage;
@@ -1868,7 +1869,7 @@ void buf_flush_wait_flushed(lsn_t new_oldest)
list. We would only need to write out temporary pages if the
page is about to be evicted from the buffer pool, and the page
contents is still needed (the page has not been freed). */
- for (bpage = UT_LIST_GET_LAST(buf_pool->flush_list);
+ for (bpage = UT_LIST_GET_LAST(buf_pool.flush_list);
bpage && fsp_is_system_temporary(bpage->id.space());
bpage = UT_LIST_GET_PREV(list, bpage)) {
ut_ad(bpage->in_flush_list);
@@ -1876,7 +1877,7 @@ void buf_flush_wait_flushed(lsn_t new_oldest)
lsn_t oldest = bpage ? bpage->oldest_modification : 0;
- mutex_exit(&buf_pool->flush_list_mutex);
+ mutex_exit(&buf_pool.flush_list_mutex);
if (oldest == 0 || oldest >= new_oldest) {
break;
@@ -1938,17 +1939,17 @@ bool buf_flush_single_page_from_LRU()
buf_page_t* bpage;
ibool freed;
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
- for (bpage = buf_pool->single_scan_itr.start(), scanned = 0,
+ for (bpage = buf_pool.single_scan_itr.start(), scanned = 0,
freed = false;
bpage != NULL;
- ++scanned, bpage = buf_pool->single_scan_itr.get()) {
+ ++scanned, bpage = buf_pool.single_scan_itr.get()) {
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
buf_page_t* prev = UT_LIST_GET_PREV(LRU, bpage);
- buf_pool->single_scan_itr.set(prev);
+ buf_pool.single_scan_itr.set(prev);
BPageMutex* block_mutex;
block_mutex = buf_page_get_mutex(bpage);
@@ -1961,7 +1962,7 @@ bool buf_flush_single_page_from_LRU()
mutex_exit(block_mutex);
if (buf_LRU_free_page(bpage, true)) {
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
freed = true;
break;
}
@@ -1993,7 +1994,7 @@ bool buf_flush_single_page_from_LRU()
if (!freed) {
/* Can't find a single flushable page. */
ut_ad(!bpage);
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
}
if (scanned) {
@@ -2004,7 +2005,7 @@ bool buf_flush_single_page_from_LRU()
scanned);
}
- ut_ad(!mutex_own(&buf_pool->mutex));
+ ut_ad(!mutex_own(&buf_pool.mutex));
return(freed);
}
@@ -2024,16 +2025,16 @@ static ulint buf_flush_LRU_list()
/* srv_LRU_scan_depth can be arbitrarily large value.
We cap it with current LRU size. */
- mutex_enter(&buf_pool->mutex);
- scan_depth = UT_LIST_GET_LEN(buf_pool->LRU);
- if (buf_pool->curr_size < buf_pool->old_size
- && buf_pool->withdraw_target > 0) {
- withdraw_depth = buf_pool->withdraw_target
- - UT_LIST_GET_LEN(buf_pool->withdraw);
+ mutex_enter(&buf_pool.mutex);
+ scan_depth = UT_LIST_GET_LEN(buf_pool.LRU);
+ if (buf_pool.curr_size < buf_pool.old_size
+ && buf_pool.withdraw_target > 0) {
+ withdraw_depth = buf_pool.withdraw_target
+ - UT_LIST_GET_LEN(buf_pool.withdraw);
} else {
withdraw_depth = 0;
}
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
if (withdraw_depth > srv_LRU_scan_depth) {
scan_depth = ut_min(withdraw_depth, scan_depth);
} else {
@@ -2052,10 +2053,10 @@ static ulint buf_flush_LRU_list()
/** Wait for any possible LRU flushes to complete. */
void buf_flush_wait_LRU_batch_end()
{
- mutex_enter(&buf_pool->mutex);
- bool wait = buf_pool->n_flush[BUF_FLUSH_LRU]
- || buf_pool->init_flush[BUF_FLUSH_LRU];
- mutex_exit(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
+ bool wait = buf_pool.n_flush[BUF_FLUSH_LRU]
+ || buf_pool.init_flush[BUF_FLUSH_LRU];
+ mutex_exit(&buf_pool.mutex);
if (wait) {
buf_flush_wait_batch_end(BUF_FLUSH_LRU);
}
@@ -2069,7 +2070,7 @@ static
ulint
af_get_pct_for_dirty()
{
- const ulint dirty = UT_LIST_GET_LEN(buf_pool->flush_list);
+ const ulint dirty = UT_LIST_GET_LEN(buf_pool.flush_list);
if (!dirty) {
/* No pages modified */
return 0;
@@ -2079,8 +2080,8 @@ af_get_pct_for_dirty()
pool (including the flush_list) was emptied while we are
looking at it) */
double dirty_pct = double(100 * dirty)
- / (1 + UT_LIST_GET_LEN(buf_pool->LRU)
- + UT_LIST_GET_LEN(buf_pool->free));
+ / (1 + UT_LIST_GET_LEN(buf_pool.LRU)
+ + UT_LIST_GET_LEN(buf_pool.free));
ut_a(srv_max_dirty_pages_pct_lwm
<= srv_max_buf_pool_modified_pct);
@@ -2299,8 +2300,8 @@ page_cleaner_flush_pages_recommendation(ulint last_pages_in)
+ lsn_avg_rate * buf_flush_lsn_scan_factor;
ulint pages_for_lsn = 0;
- mutex_enter(&buf_pool->flush_list_mutex);
- for (buf_page_t* b = UT_LIST_GET_LAST(buf_pool->flush_list);
+ mutex_enter(&buf_pool.flush_list_mutex);
+ for (buf_page_t* b = UT_LIST_GET_LAST(buf_pool.flush_list);
b != NULL;
b = UT_LIST_GET_PREV(list, b)) {
if (b->oldest_modification > target_lsn) {
@@ -2308,7 +2309,7 @@ page_cleaner_flush_pages_recommendation(ulint last_pages_in)
}
++pages_for_lsn;
}
- mutex_exit(&buf_pool->flush_list_mutex);
+ mutex_exit(&buf_pool.flush_list_mutex);
mutex_enter(&page_cleaner.mutex);
ut_ad(page_cleaner.slot.state == PAGE_CLEANER_STATE_NONE);
@@ -2676,7 +2677,7 @@ static os_thread_ret_t DECLARE_THREAD(buf_flush_page_cleaner)(void*)
/* The page_cleaner skips sleep if the server is
idle and there are no pending IOs in the buffer pool
and there is work to do. */
- if (!n_flushed || !buf_pool->n_pend_reads
+ if (!n_flushed || !buf_pool.n_pend_reads
|| srv_check_activity(last_activity)) {
ret_sleep = pc_sleep_if_needed(
@@ -2930,7 +2931,7 @@ static os_thread_ret_t DECLARE_THREAD(buf_flush_page_cleaner)(void*)
/* Some sanity checks */
ut_a(srv_get_active_thread_type() == SRV_NONE);
ut_a(srv_shutdown_state == SRV_SHUTDOWN_FLUSH_PHASE);
- ut_a(UT_LIST_GET_LEN(buf_pool->flush_list) == 0);
+ ut_a(UT_LIST_GET_LEN(buf_pool.flush_list) == 0);
/* We have lived our life. Time to die. */
@@ -3012,34 +3013,34 @@ static void buf_flush_validate_low()
const ib_rbt_node_t* rnode = NULL;
Check check;
- ut_ad(mutex_own(&buf_pool->flush_list_mutex));
+ ut_ad(mutex_own(&buf_pool.flush_list_mutex));
- ut_list_validate(buf_pool->flush_list, check);
+ ut_list_validate(buf_pool.flush_list, check);
- bpage = UT_LIST_GET_FIRST(buf_pool->flush_list);
+ bpage = UT_LIST_GET_FIRST(buf_pool.flush_list);
/* If we are in recovery mode i.e.: flush_rbt != NULL
then each block in the flush_list must also be present
in the flush_rbt. */
- if (buf_pool->flush_rbt != NULL) {
- rnode = rbt_first(buf_pool->flush_rbt);
+ if (buf_pool.flush_rbt != NULL) {
+ rnode = rbt_first(buf_pool.flush_rbt);
}
while (bpage != NULL) {
const lsn_t om = bpage->oldest_modification;
ut_ad(bpage->in_flush_list);
- /* A page in buf_pool->flush_list can be in
+ /* A page in buf_pool.flush_list can be in
BUF_BLOCK_REMOVE_HASH state. This happens when a page
is in the middle of being relocated. In that case the
original descriptor can have this state and still be
in the flush list waiting to acquire the
- buf_pool->flush_list_mutex to complete the relocation. */
+ buf_pool.flush_list_mutex to complete the relocation. */
ut_a(buf_page_in_file(bpage)
|| buf_page_get_state(bpage) == BUF_BLOCK_REMOVE_HASH);
ut_a(om > 0);
- if (buf_pool->flush_rbt != NULL) {
+ if (buf_pool.flush_rbt != NULL) {
buf_page_t** prpage;
ut_a(rnode != NULL);
@@ -3047,7 +3048,7 @@ static void buf_flush_validate_low()
ut_a(*prpage != NULL);
ut_a(*prpage == bpage);
- rnode = rbt_next(buf_pool->flush_rbt, rnode);
+ rnode = rbt_next(buf_pool.flush_rbt, rnode);
}
bpage = UT_LIST_GET_NEXT(list, bpage);
@@ -3063,9 +3064,9 @@ static void buf_flush_validate_low()
/** Validate the flush list. */
void buf_flush_validate()
{
- mutex_enter(&buf_pool->flush_list_mutex);
+ mutex_enter(&buf_pool.flush_list_mutex);
buf_flush_validate_low();
- mutex_exit(&buf_pool->flush_list_mutex);
+ mutex_exit(&buf_pool.flush_list_mutex);
}
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
@@ -3078,12 +3079,12 @@ ulint buf_pool_get_dirty_pages_count(ulint id, FlushObserver* observer)
{
ulint count = 0;
- mutex_enter(&buf_pool->mutex);
- mutex_enter(&buf_pool->flush_list_mutex);
+ mutex_enter(&buf_pool.mutex);
+ mutex_enter(&buf_pool.flush_list_mutex);
buf_page_t* bpage;
- for (bpage = UT_LIST_GET_FIRST(buf_pool->flush_list);
+ for (bpage = UT_LIST_GET_FIRST(buf_pool.flush_list);
bpage != 0;
bpage = UT_LIST_GET_NEXT(list, bpage)) {
@@ -3099,8 +3100,8 @@ ulint buf_pool_get_dirty_pages_count(ulint id, FlushObserver* observer)
}
}
- mutex_exit(&buf_pool->flush_list_mutex);
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.flush_list_mutex);
+ mutex_exit(&buf_pool.mutex);
return(count);
}
diff --git a/storage/innobase/buf/buf0lru.cc b/storage/innobase/buf/buf0lru.cc
index 7fab23ca6d8..a1973fc81ed 100644
--- a/storage/innobase/buf/buf0lru.cc
+++ b/storage/innobase/buf/buf0lru.cc
@@ -47,7 +47,7 @@ Created 11/5/1995 Heikki Tuuri
#include "lock0lock.h"
/** The number of blocks from the LRU_old pointer onward, including
-the block pointed to, must be buf_pool->LRU_old_ratio/BUF_LRU_OLD_RATIO_DIV
+the block pointed to, must be buf_pool.LRU_old_ratio/BUF_LRU_OLD_RATIO_DIV
of the whole LRU list length, except that the tolerance defined below
is allowed. Note that the tolerance must be small enough such that for
even the BUF_LRU_OLD_MIN_LEN long LRU list, the LRU_old pointer is not
@@ -63,7 +63,7 @@ static const ulint BUF_LRU_OLD_TOLERANCE = 20;
/** When dropping the search hash index entries before deleting an ibd
file, we build a local array of pages belonging to that tablespace
in the buffer pool. Following is the size of that array.
-We also release buf_pool->mutex after scanning this many pages of the
+We also release buf_pool.mutex after scanning this many pages of the
flush_list when dropping a table. This is to ensure that other threads
are not blocked for extended period of time when using very large
buffer pools. */
@@ -129,7 +129,7 @@ Takes a block out of the LRU list and page hash table.
If the block is compressed-only (BUF_BLOCK_ZIP_PAGE),
the object will be freed.
-The caller must hold buf_pool->mutex, the buf_page_get_mutex() mutex
+The caller must hold buf_pool.mutex, the buf_page_get_mutex() mutex
and the appropriate hash_lock. This function will release the
buf_page_get_mutex() and the hash_lock.
@@ -161,35 +161,35 @@ buf_LRU_block_free_hashed_page(
static inline void incr_LRU_size_in_bytes(const buf_page_t* bpage)
{
/* FIXME: use atomics, not mutex */
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
- buf_pool->stat.LRU_bytes += bpage->size.physical();
+ buf_pool.stat.LRU_bytes += bpage->size.physical();
- ut_ad(buf_pool->stat.LRU_bytes <= buf_pool->curr_pool_size);
+ ut_ad(buf_pool.stat.LRU_bytes <= buf_pool.curr_pool_size);
}
/** @return whether the unzip_LRU list should be used for evicting a victim
instead of the general LRU list */
bool buf_LRU_evict_from_unzip_LRU()
{
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
/* If the unzip_LRU list is empty, we can only use the LRU. */
- if (UT_LIST_GET_LEN(buf_pool->unzip_LRU) == 0) {
+ if (UT_LIST_GET_LEN(buf_pool.unzip_LRU) == 0) {
return false;
}
/* If unzip_LRU is at most 10% of the size of the LRU list,
then use the LRU. This slack allows us to keep hot
decompressed pages in the buffer pool. */
- if (UT_LIST_GET_LEN(buf_pool->unzip_LRU)
- <= UT_LIST_GET_LEN(buf_pool->LRU) / 10) {
+ if (UT_LIST_GET_LEN(buf_pool.unzip_LRU)
+ <= UT_LIST_GET_LEN(buf_pool.LRU) / 10) {
return false;
}
/* If eviction hasn't started yet, we assume by default
that a workload is disk bound. */
- if (buf_pool->freed_page_clock == 0) {
+ if (buf_pool.freed_page_clock == 0) {
return true;
}
@@ -245,10 +245,10 @@ buf_LRU_drop_page_hash_for_tablespace(ulint id)
ulint num_entries = 0;
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
scan_again:
- for (buf_page_t* bpage = UT_LIST_GET_LAST(buf_pool->LRU);
+ for (buf_page_t* bpage = UT_LIST_GET_LAST(buf_pool.LRU);
bpage != NULL;
/* No op */) {
@@ -302,17 +302,17 @@ next_page:
goto next_page;
}
- /* Array full. We release the buf_pool->mutex to obey
+ /* Array full. We release the buf_pool.mutex to obey
the latching order. */
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
buf_LRU_drop_page_hash_batch(id, page_arr, num_entries);
num_entries = 0;
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
- /* Note that we released the buf_pool mutex above
+ /* Note that we released the buf_pool.mutex above
after reading the prev_bpage during processing of a
page_hash_batch (i.e.: when the array was full).
Because prev_bpage could belong to a compressed-only
@@ -335,7 +335,7 @@ next_page:
}
}
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
/* Drop any remaining batch of search hashed pages. */
buf_LRU_drop_page_hash_batch(id, page_arr, num_entries);
@@ -380,16 +380,14 @@ static void buf_flush_yield(buf_page_t* bpage)
block mutexes. */
buf_page_set_sticky(bpage);
- /* Now it is safe to release the buf_pool->mutex. */
- mutex_exit(&buf_pool->mutex);
-
+ /* Now it is safe to release the buf_pool.mutex. */
+ mutex_exit(&buf_pool.mutex);
mutex_exit(block_mutex);
/* Try and force a context switch. */
os_thread_yield();
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
mutex_enter(block_mutex);
-
/* "Unfix" the block now that we have both the
buffer pool and block mutex again. */
buf_page_unset_sticky(bpage);
@@ -409,7 +407,7 @@ buf_flush_try_yield(
ulint processed) /*!< in: number of pages processed */
{
/* Every BUF_LRU_DROP_SEARCH_SIZE iterations in the
- loop we release buf_pool->mutex to let other threads
+ loop we release buf_pool.mutex to let other threads
do their job but only if the block is not IO fixed. This
ensures that the block stays in its position in the
flush_list. */
@@ -418,14 +416,14 @@ buf_flush_try_yield(
&& processed >= BUF_LRU_DROP_SEARCH_SIZE
&& buf_page_get_io_fix(bpage) == BUF_IO_NONE) {
- mutex_exit(&buf_pool->flush_list_mutex);
+ mutex_exit(&buf_pool.flush_list_mutex);
/* Release the buffer pool and block mutex
to give the other threads a go. */
buf_flush_yield(bpage);
- mutex_enter(&buf_pool->flush_list_mutex);
+ mutex_enter(&buf_pool.flush_list_mutex);
/* Should not have been removed from the flush
list during the yield. However, this check is
@@ -449,12 +447,12 @@ static MY_ATTRIBUTE((warn_unused_result))
bool
buf_flush_or_remove_page(buf_page_t* bpage, bool flush)
{
- ut_ad(mutex_own(&buf_pool->mutex));
- ut_ad(mutex_own(&buf_pool->flush_list_mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
+ ut_ad(mutex_own(&buf_pool.flush_list_mutex));
/* bpage->space and bpage->io_fix are protected by
- buf_pool->mutex and block_mutex. It is safe to check
- them while holding buf_pool->mutex only. */
+ buf_pool.mutex and block_mutex. It is safe to check
+ them while holding buf_pool.mutex only. */
if (buf_page_get_io_fix(bpage) != BUF_IO_NONE) {
@@ -474,9 +472,9 @@ buf_flush_or_remove_page(buf_page_t* bpage, bool flush)
latching order. We are however guaranteed that the page
will stay in the flush_list and won't be relocated because
buf_flush_remove() and buf_flush_relocate_on_flush_list()
- need buf_pool->mutex as well. */
+ need buf_pool.mutex as well. */
- mutex_exit(&buf_pool->flush_list_mutex);
+ mutex_exit(&buf_pool.flush_list_mutex);
mutex_enter(block_mutex);
@@ -501,7 +499,7 @@ buf_flush_or_remove_page(buf_page_t* bpage, bool flush)
/* Wake possible simulated aio thread to actually
post the writes to the operating system */
os_aio_simulated_wake_handler_threads();
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
} else {
mutex_exit(block_mutex);
}
@@ -509,10 +507,10 @@ buf_flush_or_remove_page(buf_page_t* bpage, bool flush)
mutex_exit(block_mutex);
}
- mutex_enter(&buf_pool->flush_list_mutex);
+ mutex_enter(&buf_pool.flush_list_mutex);
ut_ad(!mutex_own(block_mutex));
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
return(processed);
}
@@ -533,11 +531,11 @@ buf_flush_or_remove_pages(ulint id, FlushObserver* observer)
buf_page_t* bpage;
ulint processed = 0;
- mutex_enter(&buf_pool->flush_list_mutex);
+ mutex_enter(&buf_pool.flush_list_mutex);
rescan:
bool all_freed = true;
- for (bpage = UT_LIST_GET_LAST(buf_pool->flush_list);
+ for (bpage = UT_LIST_GET_LAST(buf_pool.flush_list);
bpage != NULL;
bpage = prev) {
@@ -615,7 +613,7 @@ rescan:
}
}
- mutex_exit(&buf_pool->flush_list_mutex);
+ mutex_exit(&buf_pool.flush_list_mutex);
return(all_freed);
}
@@ -632,9 +630,9 @@ void
buf_flush_dirty_pages(ulint id, FlushObserver* observer)
{
for (;;) {
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
bool freed = buf_flush_or_remove_pages(id, observer);
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
ut_d(buf_flush_validate());
@@ -669,14 +667,11 @@ void buf_LRU_flush_or_remove_pages(ulint id, FlushObserver* observer)
}
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
-/********************************************************************//**
-Insert a compressed block into buf_pool->zip_clean in the LRU order. */
-void
-buf_LRU_insert_zip_clean(
-/*=====================*/
- buf_page_t* bpage) /*!< in: pointer to the block in question */
+/** Insert a compressed block into buf_pool.zip_clean in the LRU order.
+@param[in] bpage pointer to the block in question */
+void buf_LRU_insert_zip_clean(buf_page_t* bpage)
{
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(buf_page_get_state(bpage) == BUF_BLOCK_ZIP_PAGE);
/* Find the first successor of bpage in the LRU list
@@ -693,9 +688,9 @@ buf_LRU_insert_zip_clean(
}
if (b != NULL) {
- UT_LIST_INSERT_AFTER(buf_pool->zip_clean, b, bpage);
+ UT_LIST_INSERT_AFTER(buf_pool.zip_clean, b, bpage);
} else {
- UT_LIST_ADD_FIRST(buf_pool->zip_clean, bpage);
+ UT_LIST_ADD_FIRST(buf_pool.zip_clean, bpage);
}
}
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
@@ -707,7 +702,7 @@ LRU list. The compressed page is preserved, and it need not be clean.
@return true if freed */
static bool buf_LRU_free_from_unzip_LRU_list(bool scan_all)
{
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
if (!buf_LRU_evict_from_unzip_LRU()) {
return(false);
@@ -716,7 +711,7 @@ static bool buf_LRU_free_from_unzip_LRU_list(bool scan_all)
ulint scanned = 0;
bool freed = false;
- for (buf_block_t* block = UT_LIST_GET_LAST(buf_pool->unzip_LRU);
+ for (buf_block_t* block = UT_LIST_GET_LAST(buf_pool.unzip_LRU);
block != NULL
&& !freed
&& (scan_all || scanned < srv_LRU_scan_depth);
@@ -752,21 +747,21 @@ static bool buf_LRU_free_from_unzip_LRU_list(bool scan_all)
@return whether a page was freed */
static bool buf_LRU_free_from_common_LRU_list(bool scan_all)
{
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
ulint scanned = 0;
bool freed = false;
- for (buf_page_t* bpage = buf_pool->lru_scan_itr.start();
+ for (buf_page_t* bpage = buf_pool.lru_scan_itr.start();
bpage != NULL
&& !freed
&& (scan_all || scanned < BUF_LRU_SEARCH_SCAN_THRESHOLD);
- ++scanned, bpage = buf_pool->lru_scan_itr.get()) {
+ ++scanned, bpage = buf_pool.lru_scan_itr.get()) {
buf_page_t* prev = UT_LIST_GET_PREV(LRU, bpage);
BPageMutex* mutex = buf_page_get_mutex(bpage);
- buf_pool->lru_scan_itr.set(prev);
+ buf_pool.lru_scan_itr.set(prev);
mutex_enter(mutex);
@@ -786,10 +781,10 @@ static bool buf_LRU_free_from_common_LRU_list(bool scan_all)
/* Keep track of pages that are evicted without
ever being accessed. This gives us a measure of
the effectiveness of readahead */
- ++buf_pool->stat.n_ra_pages_evicted;
+ ++buf_pool.stat.n_ra_pages_evicted;
}
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(!mutex_own(mutex));
}
@@ -810,7 +805,7 @@ static bool buf_LRU_free_from_common_LRU_list(bool scan_all)
@return true if found and freed */
bool buf_LRU_scan_and_free_block(bool scan_all)
{
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
return(buf_LRU_free_from_unzip_LRU_list(scan_all)
|| buf_LRU_free_from_common_LRU_list(scan_all));
@@ -820,21 +815,21 @@ bool buf_LRU_scan_and_free_block(bool scan_all)
bool buf_LRU_buf_pool_running_out()
{
return !recv_recovery_is_on()
- && UT_LIST_GET_LEN(buf_pool->free)
- + UT_LIST_GET_LEN(buf_pool->LRU)
- < ut_min(buf_pool->curr_size, buf_pool->old_size) / 4;
+ && UT_LIST_GET_LEN(buf_pool.free)
+ + UT_LIST_GET_LEN(buf_pool.LRU)
+ < ut_min(buf_pool.curr_size, buf_pool.old_size) / 4;
}
-/** @return a buffer block from the buf_pool->free list
+/** @return a buffer block from the buf_pool.free list
@retval NULL if the free list is empty */
buf_block_t* buf_LRU_get_free_only()
{
buf_block_t* block;
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
block = reinterpret_cast<buf_block_t*>(
- UT_LIST_GET_FIRST(buf_pool->free));
+ UT_LIST_GET_FIRST(buf_pool.free));
while (block != NULL) {
@@ -843,11 +838,11 @@ buf_block_t* buf_LRU_get_free_only()
ut_ad(!block->page.in_flush_list);
ut_ad(!block->page.in_LRU_list);
ut_a(!buf_page_in_file(&block->page));
- UT_LIST_REMOVE(buf_pool->free, &block->page);
+ UT_LIST_REMOVE(buf_pool.free, &block->page);
- if (buf_pool->curr_size >= buf_pool->old_size
- || UT_LIST_GET_LEN(buf_pool->withdraw)
- >= buf_pool->withdraw_target
+ if (buf_pool.curr_size >= buf_pool.old_size
+ || UT_LIST_GET_LEN(buf_pool.withdraw)
+ >= buf_pool.withdraw_target
|| !buf_block_will_be_withdrawn(block)) {
/* found valid free block */
buf_page_mutex_enter(block);
@@ -864,12 +859,12 @@ buf_block_t* buf_LRU_get_free_only()
/* This should be withdrawn */
UT_LIST_ADD_LAST(
- buf_pool->withdraw,
+ buf_pool.withdraw,
&block->page);
ut_d(block->in_withdraw_list = TRUE);
block = reinterpret_cast<buf_block_t*>(
- UT_LIST_GET_FIRST(buf_pool->free));
+ UT_LIST_GET_FIRST(buf_pool.free));
}
return(block);
@@ -882,12 +877,12 @@ function will either assert or issue a warning and switch on the
status monitor. */
static void buf_LRU_check_size_of_non_data_objects()
{
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
if (!recv_recovery_is_on()
- && buf_pool->curr_size == buf_pool->old_size
- && UT_LIST_GET_LEN(buf_pool->free)
- + UT_LIST_GET_LEN(buf_pool->LRU) < buf_pool->curr_size / 20) {
+ && buf_pool.curr_size == buf_pool.old_size
+ && UT_LIST_GET_LEN(buf_pool.free)
+ + UT_LIST_GET_LEN(buf_pool.LRU) < buf_pool.curr_size / 20) {
ib::fatal() << "Over 95 percent of the buffer pool is"
" occupied by lock heaps"
@@ -897,13 +892,13 @@ static void buf_LRU_check_size_of_non_data_objects()
" Check that your transactions do not set too many"
" row locks, or review if"
" innodb_buffer_pool_size="
- << (buf_pool->curr_size >> (20U - srv_page_size_shift))
+ << (buf_pool.curr_size >> (20U - srv_page_size_shift))
<< "M could be bigger.";
} else if (!recv_recovery_is_on()
- && buf_pool->curr_size == buf_pool->old_size
- && (UT_LIST_GET_LEN(buf_pool->free)
- + UT_LIST_GET_LEN(buf_pool->LRU))
- < buf_pool->curr_size / 3) {
+ && buf_pool.curr_size == buf_pool.old_size
+ && (UT_LIST_GET_LEN(buf_pool.free)
+ + UT_LIST_GET_LEN(buf_pool.LRU))
+ < buf_pool.curr_size / 3) {
if (!buf_lru_switched_on_innodb_mon) {
@@ -919,7 +914,7 @@ static void buf_LRU_check_size_of_non_data_objects()
" Check that your transactions do not"
" set too many row locks."
" innodb_buffer_pool_size="
- << (buf_pool->curr_size >>
+ << (buf_pool.curr_size >>
(20U - srv_page_size_shift)) << "M."
" Starting the InnoDB Monitor to print"
" diagnostics.";
@@ -950,7 +945,7 @@ the free list. Even when we flush a page or find a page in LRU scan
we put it to free list to be used.
* iteration 0:
* get a block from free list, success:done
- * if buf_pool->try_LRU_scan is set
+ * if buf_pool.try_LRU_scan is set
* scan LRU up to srv_LRU_scan_depth to find a clean block
* the above will put the block on free list
* success:retry the free list
@@ -960,7 +955,7 @@ we put it to free list to be used.
* iteration 1:
* same as iteration 0 except:
* scan whole LRU list
- * scan LRU list even if buf_pool->try_LRU_scan is not set
+ * scan LRU list even if buf_pool.try_LRU_scan is not set
* iteration > 1:
* same as iteration 1 but sleep 10ms
@return the free control block, in state BUF_BLOCK_READY_FOR_USE */
@@ -973,7 +968,7 @@ buf_block_t* buf_LRU_get_free_block()
MONITOR_INC(MONITOR_LRU_GET_FREE_SEARCH);
loop:
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
buf_LRU_check_size_of_non_data_objects();
@@ -986,7 +981,7 @@ loop:
block = buf_LRU_get_free_only();
if (block != NULL) {
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
memset(&block->page.zip, 0, sizeof block->page.zip);
block->skip_flush_check = false;
block->page.flush_observer = NULL;
@@ -995,7 +990,7 @@ loop:
MONITOR_INC( MONITOR_LRU_GET_FREE_LOOPS );
freed = false;
- if (buf_pool->try_LRU_scan || n_iterations > 0) {
+ if (buf_pool.try_LRU_scan || n_iterations > 0) {
/* If no block was in the free list, search from the
end of the LRU list and try to free a block there.
If we are doing for the first time we'll scan only
@@ -1008,7 +1003,7 @@ loop:
in scanning the LRU list. This flag is set to
TRUE again when we flush a batch from this
buffer pool. */
- buf_pool->try_LRU_scan = FALSE;
+ buf_pool.try_LRU_scan = FALSE;
/* Also tell the page_cleaner thread that
there is work for it to do. */
@@ -1020,7 +1015,7 @@ loop:
not_found:
#endif
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
if (freed) {
goto loop;
@@ -1090,34 +1085,34 @@ static void buf_LRU_old_adjust_len()
ulint old_len;
ulint new_len;
- ut_a(buf_pool->LRU_old);
- ut_ad(mutex_own(&buf_pool->mutex));
- ut_ad(buf_pool->LRU_old_ratio >= BUF_LRU_OLD_RATIO_MIN);
- ut_ad(buf_pool->LRU_old_ratio <= BUF_LRU_OLD_RATIO_MAX);
+ ut_a(buf_pool.LRU_old);
+ ut_ad(mutex_own(&buf_pool.mutex));
+ ut_ad(buf_pool.LRU_old_ratio >= BUF_LRU_OLD_RATIO_MIN);
+ ut_ad(buf_pool.LRU_old_ratio <= BUF_LRU_OLD_RATIO_MAX);
compile_time_assert(BUF_LRU_OLD_RATIO_MIN * BUF_LRU_OLD_MIN_LEN
> BUF_LRU_OLD_RATIO_DIV
* (BUF_LRU_OLD_TOLERANCE + 5));
compile_time_assert(BUF_LRU_NON_OLD_MIN_LEN < BUF_LRU_OLD_MIN_LEN);
#ifdef UNIV_LRU_DEBUG
- /* buf_pool->LRU_old must be the first item in the LRU list
+ /* buf_pool.LRU_old must be the first item in the LRU list
whose "old" flag is set. */
- ut_a(buf_pool->LRU_old->old);
- ut_a(!UT_LIST_GET_PREV(LRU, buf_pool->LRU_old)
- || !UT_LIST_GET_PREV(LRU, buf_pool->LRU_old)->old);
- ut_a(!UT_LIST_GET_NEXT(LRU, buf_pool->LRU_old)
- || UT_LIST_GET_NEXT(LRU, buf_pool->LRU_old)->old);
+ ut_a(buf_pool.LRU_old->old);
+ ut_a(!UT_LIST_GET_PREV(LRU, buf_pool.LRU_old)
+ || !UT_LIST_GET_PREV(LRU, buf_pool.LRU_old)->old);
+ ut_a(!UT_LIST_GET_NEXT(LRU, buf_pool.LRU_old)
+ || UT_LIST_GET_NEXT(LRU, buf_pool.LRU_old)->old);
#endif /* UNIV_LRU_DEBUG */
- old_len = buf_pool->LRU_old_len;
- new_len = ut_min(UT_LIST_GET_LEN(buf_pool->LRU)
- * buf_pool->LRU_old_ratio / BUF_LRU_OLD_RATIO_DIV,
- UT_LIST_GET_LEN(buf_pool->LRU)
+ old_len = buf_pool.LRU_old_len;
+ new_len = ut_min(UT_LIST_GET_LEN(buf_pool.LRU)
+ * buf_pool.LRU_old_ratio / BUF_LRU_OLD_RATIO_DIV,
+ UT_LIST_GET_LEN(buf_pool.LRU)
- (BUF_LRU_OLD_TOLERANCE
+ BUF_LRU_NON_OLD_MIN_LEN));
for (;;) {
- buf_page_t* LRU_old = buf_pool->LRU_old;
+ buf_page_t* LRU_old = buf_pool.LRU_old;
ut_a(LRU_old);
ut_ad(LRU_old->in_LRU_list);
@@ -1129,18 +1124,18 @@ static void buf_LRU_old_adjust_len()
if (old_len + BUF_LRU_OLD_TOLERANCE < new_len) {
- buf_pool->LRU_old = LRU_old = UT_LIST_GET_PREV(
+ buf_pool.LRU_old = LRU_old = UT_LIST_GET_PREV(
LRU, LRU_old);
#ifdef UNIV_LRU_DEBUG
ut_a(!LRU_old->old);
#endif /* UNIV_LRU_DEBUG */
- old_len = ++buf_pool->LRU_old_len;
+ old_len = ++buf_pool.LRU_old_len;
buf_page_set_old(LRU_old, TRUE);
} else if (old_len > new_len + BUF_LRU_OLD_TOLERANCE) {
- buf_pool->LRU_old = UT_LIST_GET_NEXT(LRU, LRU_old);
- old_len = --buf_pool->LRU_old_len;
+ buf_pool.LRU_old = UT_LIST_GET_NEXT(LRU, LRU_old);
+ old_len = --buf_pool.LRU_old_len;
buf_page_set_old(LRU_old, FALSE);
} else {
return;
@@ -1152,14 +1147,14 @@ static void buf_LRU_old_adjust_len()
called when the LRU list grows to BUF_LRU_OLD_MIN_LEN length. */
static void buf_LRU_old_init()
{
- ut_ad(mutex_own(&buf_pool->mutex));
- ut_a(UT_LIST_GET_LEN(buf_pool->LRU) == BUF_LRU_OLD_MIN_LEN);
+ ut_ad(mutex_own(&buf_pool.mutex));
+ ut_a(UT_LIST_GET_LEN(buf_pool.LRU) == BUF_LRU_OLD_MIN_LEN);
/* We first initialize all blocks in the LRU list as old and then use
the adjust function to move the LRU_old pointer to the right
position */
- for (buf_page_t* bpage = UT_LIST_GET_LAST(buf_pool->LRU);
+ for (buf_page_t* bpage = UT_LIST_GET_LAST(buf_pool.LRU);
bpage != NULL;
bpage = UT_LIST_GET_PREV(LRU, bpage)) {
@@ -1171,8 +1166,8 @@ static void buf_LRU_old_init()
bpage->old = TRUE;
}
- buf_pool->LRU_old = UT_LIST_GET_FIRST(buf_pool->LRU);
- buf_pool->LRU_old_len = UT_LIST_GET_LEN(buf_pool->LRU);
+ buf_pool.LRU_old = UT_LIST_GET_FIRST(buf_pool.LRU);
+ buf_pool.LRU_old_len = UT_LIST_GET_LEN(buf_pool.LRU);
buf_LRU_old_adjust_len();
}
@@ -1182,7 +1177,7 @@ static void buf_LRU_old_init()
static void buf_unzip_LRU_remove_block_if_needed(buf_page_t* bpage)
{
ut_ad(buf_page_in_file(bpage));
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
if (buf_page_belongs_to_unzip_LRU(bpage)) {
buf_block_t* block = reinterpret_cast<buf_block_t*>(bpage);
@@ -1190,7 +1185,7 @@ static void buf_unzip_LRU_remove_block_if_needed(buf_page_t* bpage)
ut_ad(block->in_unzip_LRU_list);
ut_d(block->in_unzip_LRU_list = FALSE);
- UT_LIST_REMOVE(buf_pool->unzip_LRU, block);
+ UT_LIST_REMOVE(buf_pool.unzip_LRU, block);
}
}
@@ -1198,16 +1193,16 @@ static void buf_unzip_LRU_remove_block_if_needed(buf_page_t* bpage)
@param[in] bpage buffer page descriptor */
void buf_LRU_adjust_hp(const buf_page_t* bpage)
{
- buf_pool->lru_hp.adjust(bpage);
- buf_pool->lru_scan_itr.adjust(bpage);
- buf_pool->single_scan_itr.adjust(bpage);
+ buf_pool.lru_hp.adjust(bpage);
+ buf_pool.lru_scan_itr.adjust(bpage);
+ buf_pool.single_scan_itr.adjust(bpage);
}
/** Removes a block from the LRU list.
@param[in] bpage control block */
static inline void buf_LRU_remove_block(buf_page_t* bpage)
{
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
ut_a(buf_page_in_file(bpage));
@@ -1220,12 +1215,12 @@ static inline void buf_LRU_remove_block(buf_page_t* bpage)
/* If the LRU_old pointer is defined and points to just this block,
move it backward one step */
- if (bpage == buf_pool->LRU_old) {
+ if (bpage == buf_pool.LRU_old) {
/* Below: the previous block is guaranteed to exist,
because the LRU_old pointer is only allowed to differ
by BUF_LRU_OLD_TOLERANCE from strict
- buf_pool->LRU_old_ratio/BUF_LRU_OLD_RATIO_DIV of the LRU
+ buf_pool.LRU_old_ratio/BUF_LRU_OLD_RATIO_DIV of the LRU
list length. */
buf_page_t* prev_bpage = UT_LIST_GET_PREV(LRU, bpage);
@@ -1233,25 +1228,25 @@ static inline void buf_LRU_remove_block(buf_page_t* bpage)
#ifdef UNIV_LRU_DEBUG
ut_a(!prev_bpage->old);
#endif /* UNIV_LRU_DEBUG */
- buf_pool->LRU_old = prev_bpage;
+ buf_pool.LRU_old = prev_bpage;
buf_page_set_old(prev_bpage, TRUE);
- buf_pool->LRU_old_len++;
+ buf_pool.LRU_old_len++;
}
/* Remove the block from the LRU list */
- UT_LIST_REMOVE(buf_pool->LRU, bpage);
+ UT_LIST_REMOVE(buf_pool.LRU, bpage);
ut_d(bpage->in_LRU_list = FALSE);
- buf_pool->stat.LRU_bytes -= bpage->size.physical();
+ buf_pool.stat.LRU_bytes -= bpage->size.physical();
buf_unzip_LRU_remove_block_if_needed(bpage);
/* If the LRU list is so short that LRU_old is not defined,
clear the "old" flags and return */
- if (UT_LIST_GET_LEN(buf_pool->LRU) < BUF_LRU_OLD_MIN_LEN) {
+ if (UT_LIST_GET_LEN(buf_pool.LRU) < BUF_LRU_OLD_MIN_LEN) {
- for (buf_page_t* bpage = UT_LIST_GET_FIRST(buf_pool->LRU);
+ for (buf_page_t* bpage = UT_LIST_GET_FIRST(buf_pool.LRU);
bpage != NULL;
bpage = UT_LIST_GET_NEXT(LRU, bpage)) {
@@ -1260,18 +1255,18 @@ static inline void buf_LRU_remove_block(buf_page_t* bpage)
bpage->old = FALSE;
}
- buf_pool->LRU_old = NULL;
- buf_pool->LRU_old_len = 0;
+ buf_pool.LRU_old = NULL;
+ buf_pool.LRU_old_len = 0;
return;
}
- ut_ad(buf_pool->LRU_old);
+ ut_ad(buf_pool.LRU_old);
/* Update the LRU_old_len field if necessary */
if (buf_page_is_old(bpage)) {
- buf_pool->LRU_old_len--;
+ buf_pool.LRU_old_len--;
}
/* Adjust the length of the old block list if necessary */
@@ -1287,15 +1282,15 @@ buf_unzip_LRU_add_block(
ibool old) /*!< in: TRUE if should be put to the end
of the list, else put to the start */
{
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
ut_a(buf_page_belongs_to_unzip_LRU(&block->page));
ut_ad(!block->in_unzip_LRU_list);
ut_d(block->in_unzip_LRU_list = TRUE);
if (old) {
- UT_LIST_ADD_LAST(buf_pool->unzip_LRU, block);
+ UT_LIST_ADD_LAST(buf_pool.unzip_LRU, block);
} else {
- UT_LIST_ADD_FIRST(buf_pool->unzip_LRU, block);
+ UT_LIST_ADD_FIRST(buf_pool.unzip_LRU, block);
}
}
@@ -1313,52 +1308,52 @@ buf_LRU_add_block_low(
LRU list is very short, the block is added to
the start, regardless of this parameter */
{
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
ut_a(buf_page_in_file(bpage));
ut_ad(!bpage->in_LRU_list);
- if (!old || (UT_LIST_GET_LEN(buf_pool->LRU) < BUF_LRU_OLD_MIN_LEN)) {
+ if (!old || (UT_LIST_GET_LEN(buf_pool.LRU) < BUF_LRU_OLD_MIN_LEN)) {
- UT_LIST_ADD_FIRST(buf_pool->LRU, bpage);
+ UT_LIST_ADD_FIRST(buf_pool.LRU, bpage);
- bpage->freed_page_clock = buf_pool->freed_page_clock;
+ bpage->freed_page_clock = buf_pool.freed_page_clock;
} else {
#ifdef UNIV_LRU_DEBUG
- /* buf_pool->LRU_old must be the first item in the LRU list
+ /* buf_pool.LRU_old must be the first item in the LRU list
whose "old" flag is set. */
- ut_a(buf_pool->LRU_old->old);
- ut_a(!UT_LIST_GET_PREV(LRU, buf_pool->LRU_old)
- || !UT_LIST_GET_PREV(LRU, buf_pool->LRU_old)->old);
- ut_a(!UT_LIST_GET_NEXT(LRU, buf_pool->LRU_old)
- || UT_LIST_GET_NEXT(LRU, buf_pool->LRU_old)->old);
+ ut_a(buf_pool.LRU_old->old);
+ ut_a(!UT_LIST_GET_PREV(LRU, buf_pool.LRU_old)
+ || !UT_LIST_GET_PREV(LRU, buf_pool.LRU_old)->old);
+ ut_a(!UT_LIST_GET_NEXT(LRU, buf_pool.LRU_old)
+ || UT_LIST_GET_NEXT(LRU, buf_pool.LRU_old)->old);
#endif /* UNIV_LRU_DEBUG */
- UT_LIST_INSERT_AFTER(buf_pool->LRU, buf_pool->LRU_old,
+ UT_LIST_INSERT_AFTER(buf_pool.LRU, buf_pool.LRU_old,
bpage);
- buf_pool->LRU_old_len++;
+ buf_pool.LRU_old_len++;
}
ut_d(bpage->in_LRU_list = TRUE);
incr_LRU_size_in_bytes(bpage);
- if (UT_LIST_GET_LEN(buf_pool->LRU) > BUF_LRU_OLD_MIN_LEN) {
+ if (UT_LIST_GET_LEN(buf_pool.LRU) > BUF_LRU_OLD_MIN_LEN) {
- ut_ad(buf_pool->LRU_old);
+ ut_ad(buf_pool.LRU_old);
/* Adjust the length of the old block list if necessary */
buf_page_set_old(bpage, old);
buf_LRU_old_adjust_len();
- } else if (UT_LIST_GET_LEN(buf_pool->LRU) == BUF_LRU_OLD_MIN_LEN) {
+ } else if (UT_LIST_GET_LEN(buf_pool.LRU) == BUF_LRU_OLD_MIN_LEN) {
/* The LRU list is now long enough for LRU_old to become
defined: init it */
buf_LRU_old_init();
} else {
- buf_page_set_old(bpage, buf_pool->LRU_old != NULL);
+ buf_page_set_old(bpage, buf_pool.LRU_old != NULL);
}
/* If this is a zipped block with decompressed frame as well
@@ -1392,10 +1387,10 @@ buf_LRU_make_block_young(
/*=====================*/
buf_page_t* bpage) /*!< in: control block */
{
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
if (bpage->old) {
- buf_pool->stat.n_pages_made_young++;
+ buf_pool.stat.n_pages_made_young++;
}
buf_LRU_remove_block(bpage);
@@ -1407,10 +1402,10 @@ Try to free a block. If bpage is a descriptor of a compressed-only
page, the descriptor object will be freed as well.
NOTE: If this function returns true, it will temporarily
-release buf_pool->mutex. Furthermore, the page frame will no longer be
+release buf_pool.mutex. Furthermore, the page frame will no longer be
accessible via bpage.
-The caller must hold buf_pool->mutex and must not hold any
+The caller must hold buf_pool.mutex and must not hold any
buf_page_get_mutex() when calling this function.
@return true if freed, false otherwise. */
bool
@@ -1424,7 +1419,7 @@ buf_LRU_free_page(
rw_lock_t* hash_lock = buf_page_hash_lock_get(bpage->id);
BPageMutex* block_mutex = buf_page_get_mutex(bpage);
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(buf_page_in_file(bpage));
ut_ad(bpage->in_LRU_list);
@@ -1464,7 +1459,7 @@ func_exit:
memcpy(b, bpage, sizeof *b);
}
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(buf_page_in_file(bpage));
ut_ad(bpage->in_LRU_list);
ut_ad(!bpage->in_flush_list == !bpage->oldest_modification);
@@ -1525,7 +1520,7 @@ func_exit:
ut_ad(b->in_page_hash);
ut_ad(b->in_LRU_list);
- HASH_INSERT(buf_page_t, hash, buf_pool->page_hash,
+ HASH_INSERT(buf_page_t, hash, buf_pool.page_hash,
b->id.fold(), b);
/* Insert b where bpage was in the LRU list. */
@@ -1535,23 +1530,23 @@ func_exit:
ut_ad(prev_b->in_LRU_list);
ut_ad(buf_page_in_file(prev_b));
- UT_LIST_INSERT_AFTER(buf_pool->LRU, prev_b, b);
+ UT_LIST_INSERT_AFTER(buf_pool.LRU, prev_b, b);
incr_LRU_size_in_bytes(b);
if (buf_page_is_old(b)) {
- buf_pool->LRU_old_len++;
- if (buf_pool->LRU_old
+ buf_pool.LRU_old_len++;
+ if (buf_pool.LRU_old
== UT_LIST_GET_NEXT(LRU, b)) {
- buf_pool->LRU_old = b;
+ buf_pool.LRU_old = b;
}
}
- lru_len = UT_LIST_GET_LEN(buf_pool->LRU);
+ lru_len = UT_LIST_GET_LEN(buf_pool.LRU);
if (lru_len > BUF_LRU_OLD_MIN_LEN) {
- ut_ad(buf_pool->LRU_old);
+ ut_ad(buf_pool.LRU_old);
/* Adjust the length of the
old block list if necessary */
buf_LRU_old_adjust_len();
@@ -1576,7 +1571,7 @@ func_exit:
buf_LRU_insert_zip_clean(b);
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
} else {
- /* Relocate on buf_pool->flush_list. */
+ /* Relocate on buf_pool.flush_list. */
buf_flush_relocate_on_flush_list(bpage, b);
}
@@ -1592,7 +1587,7 @@ func_exit:
/* Prevent buf_page_get_gen() from
decompressing the block while we release
- buf_pool->mutex and block_mutex. */
+ buf_pool.mutex and block_mutex. */
block_mutex = buf_page_get_mutex(b);
mutex_enter(block_mutex);
@@ -1604,7 +1599,7 @@ func_exit:
rw_lock_x_unlock(hash_lock);
}
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
/* Remove possible adaptive hash index on the page.
The page was declared uninitialized by
@@ -1624,7 +1619,7 @@ func_exit:
checksum while not holding any mutex. The
block is already half-freed
(BUF_BLOCK_REMOVE_HASH) and removed from
- buf_pool->page_hash, thus inaccessible by any
+ buf_pool.page_hash, thus inaccessible by any
other thread. */
ut_ad(b->size.is_compressed());
@@ -1639,7 +1634,7 @@ func_exit:
checksum);
}
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
if (b != NULL) {
mutex_enter(block_mutex);
@@ -1663,7 +1658,7 @@ buf_LRU_block_free_non_file_page(
{
void* data;
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(buf_page_mutex_own(block));
switch (buf_block_get_state(block)) {
@@ -1712,16 +1707,16 @@ buf_LRU_block_free_non_file_page(
false));
}
- if (buf_pool->curr_size < buf_pool->old_size
- && UT_LIST_GET_LEN(buf_pool->withdraw) < buf_pool->withdraw_target
+ if (buf_pool.curr_size < buf_pool.old_size
+ && UT_LIST_GET_LEN(buf_pool.withdraw) < buf_pool.withdraw_target
&& buf_block_will_be_withdrawn(block)) {
/* This should be withdrawn */
UT_LIST_ADD_LAST(
- buf_pool->withdraw,
+ buf_pool.withdraw,
&block->page);
ut_d(block->in_withdraw_list = TRUE);
} else {
- UT_LIST_ADD_FIRST(buf_pool->free, &block->page);
+ UT_LIST_ADD_FIRST(buf_pool.free, &block->page);
ut_d(block->page.in_free_list = TRUE);
}
@@ -1733,7 +1728,7 @@ Takes a block out of the LRU list and page hash table.
If the block is compressed-only (BUF_BLOCK_ZIP_PAGE),
the object will be freed.
-The caller must hold buf_pool->mutex, the buf_page_get_mutex() mutex
+The caller must hold buf_pool.mutex, the buf_page_get_mutex() mutex
and the appropriate hash_lock. This function will release the
buf_page_get_mutex() and the hash_lock.
@@ -1755,7 +1750,7 @@ buf_LRU_block_remove_hashed(
const buf_page_t* hashed_bpage;
rw_lock_t* hash_lock;
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(mutex_own(buf_page_get_mutex(bpage)));
hash_lock = buf_page_hash_lock_get(bpage->id);
@@ -1767,7 +1762,7 @@ buf_LRU_block_remove_hashed(
buf_LRU_remove_block(bpage);
- buf_pool->freed_page_clock += 1;
+ buf_pool.freed_page_clock += 1;
switch (buf_page_get_state(bpage)) {
case BUF_BLOCK_FILE_PAGE:
@@ -1868,7 +1863,7 @@ buf_LRU_block_remove_hashed(
ut_d(mutex_exit(buf_page_get_mutex(bpage)));
ut_d(rw_lock_x_unlock(hash_lock));
- ut_d(mutex_exit(&buf_pool->mutex));
+ ut_d(mutex_exit(&buf_pool.mutex));
ut_d(buf_print());
ut_d(buf_LRU_print());
ut_d(buf_validate());
@@ -1880,7 +1875,7 @@ buf_LRU_block_remove_hashed(
ut_ad(bpage->in_page_hash);
ut_d(bpage->in_page_hash = FALSE);
- HASH_DELETE(buf_page_t, hash, buf_pool->page_hash, bpage->id.fold(),
+ HASH_DELETE(buf_page_t, hash, buf_pool.page_hash, bpage->id.fold(),
bpage);
switch (buf_page_get_state(bpage)) {
@@ -1891,9 +1886,9 @@ buf_LRU_block_remove_hashed(
ut_a(bpage->zip.data);
ut_a(bpage->size.is_compressed());
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
- UT_LIST_REMOVE(buf_pool->zip_clean, bpage);
+ UT_LIST_REMOVE(buf_pool.zip_clean, bpage);
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
- mutex_exit(&buf_pool->zip_mutex);
+ mutex_exit(&buf_pool.zip_mutex);
rw_lock_x_unlock(hash_lock);
buf_pool_mutex_exit_forbid();
@@ -1980,11 +1975,11 @@ buf_LRU_block_free_hashed_page(
buf_block_t* block) /*!< in: block, must contain a file page and
be in a state where it can be freed */
{
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
buf_page_mutex_enter(block);
- if (buf_pool->flush_rbt == NULL) {
+ if (buf_pool.flush_rbt == NULL) {
block->page.id.reset();
}
@@ -2006,7 +2001,7 @@ buf_LRU_free_one_page(
rw_lock_t* hash_lock = buf_page_hash_lock_get(bpage->id);
BPageMutex* block_mutex = buf_page_get_mutex(bpage);
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
rw_lock_x_lock(hash_lock);
mutex_enter(block_mutex);
@@ -2022,11 +2017,11 @@ buf_LRU_free_one_page(
ut_ad(!mutex_own(block_mutex));
}
-/** Update buf_pool->LRU_old_ratio.
+/** Update buf_pool.LRU_old_ratio.
@param[in] old_pct Reserve this percentage of
the buffer pool for "old" blocks
@param[in] adjust true=adjust the LRU list;
- false=just assign buf_pool->LRU_old_ratio
+ false=just assign buf_pool.LRU_old_ratio
during the initialization of InnoDB
@return updated old_pct */
uint buf_LRU_old_ratio_update(uint old_pct, bool adjust)
@@ -2039,20 +2034,20 @@ uint buf_LRU_old_ratio_update(uint old_pct, bool adjust)
}
if (adjust) {
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
- if (ratio != buf_pool->LRU_old_ratio) {
- buf_pool->LRU_old_ratio = ratio;
+ if (ratio != buf_pool.LRU_old_ratio) {
+ buf_pool.LRU_old_ratio = ratio;
- if (UT_LIST_GET_LEN(buf_pool->LRU)
+ if (UT_LIST_GET_LEN(buf_pool.LRU)
>= BUF_LRU_OLD_MIN_LEN) {
buf_LRU_old_adjust_len();
}
}
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
} else {
- buf_pool->LRU_old_ratio = ratio;
+ buf_pool.LRU_old_ratio = ratio;
}
/* the reverse of
ratio = old_pct * BUF_LRU_OLD_RATIO_DIV / 100 */
@@ -2068,7 +2063,7 @@ buf_LRU_stat_update()
buf_LRU_stat_t* item;
buf_LRU_stat_t cur_stat;
- if (!buf_pool->freed_page_clock) {
+ if (!buf_pool.freed_page_clock) {
goto func_exit;
}
@@ -2103,17 +2098,17 @@ void buf_LRU_validate()
ulint old_len;
ulint new_len;
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
- if (UT_LIST_GET_LEN(buf_pool->LRU) >= BUF_LRU_OLD_MIN_LEN) {
+ if (UT_LIST_GET_LEN(buf_pool.LRU) >= BUF_LRU_OLD_MIN_LEN) {
- ut_a(buf_pool->LRU_old);
- old_len = buf_pool->LRU_old_len;
+ ut_a(buf_pool.LRU_old);
+ old_len = buf_pool.LRU_old_len;
- new_len = ut_min(UT_LIST_GET_LEN(buf_pool->LRU)
- * buf_pool->LRU_old_ratio
+ new_len = ut_min(UT_LIST_GET_LEN(buf_pool.LRU)
+ * buf_pool.LRU_old_ratio
/ BUF_LRU_OLD_RATIO_DIV,
- UT_LIST_GET_LEN(buf_pool->LRU)
+ UT_LIST_GET_LEN(buf_pool.LRU)
- (BUF_LRU_OLD_TOLERANCE
+ BUF_LRU_NON_OLD_MIN_LEN));
@@ -2125,7 +2120,7 @@ void buf_LRU_validate()
old_len = 0;
- for (buf_page_t* bpage = UT_LIST_GET_FIRST(buf_pool->LRU);
+ for (buf_page_t* bpage = UT_LIST_GET_FIRST(buf_pool.LRU);
bpage != NULL;
bpage = UT_LIST_GET_NEXT(LRU, bpage)) {
@@ -2152,7 +2147,7 @@ void buf_LRU_validate()
= UT_LIST_GET_NEXT(LRU, bpage);
if (!old_len++) {
- ut_a(buf_pool->LRU_old == bpage);
+ ut_a(buf_pool.LRU_old == bpage);
} else {
ut_a(!prev || buf_page_is_old(prev));
}
@@ -2161,11 +2156,11 @@ void buf_LRU_validate()
}
}
- ut_a(buf_pool->LRU_old_len == old_len);
+ ut_a(buf_pool.LRU_old_len == old_len);
CheckInFreeList::validate();
- for (buf_page_t* bpage = UT_LIST_GET_FIRST(buf_pool->free);
+ for (buf_page_t* bpage = UT_LIST_GET_FIRST(buf_pool.free);
bpage != NULL;
bpage = UT_LIST_GET_NEXT(list, bpage)) {
@@ -2174,7 +2169,7 @@ void buf_LRU_validate()
CheckUnzipLRUAndLRUList::validate();
- for (buf_block_t* block = UT_LIST_GET_FIRST(buf_pool->unzip_LRU);
+ for (buf_block_t* block = UT_LIST_GET_FIRST(buf_pool.unzip_LRU);
block != NULL;
block = UT_LIST_GET_NEXT(unzip_LRU, block)) {
@@ -2183,7 +2178,7 @@ void buf_LRU_validate()
ut_a(buf_page_belongs_to_unzip_LRU(&block->page));
}
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
}
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
@@ -2191,9 +2186,9 @@ void buf_LRU_validate()
/** Dump the LRU list to stderr. */
void buf_LRU_print()
{
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
- for (const buf_page_t* bpage = UT_LIST_GET_FIRST(buf_pool->LRU);
+ for (const buf_page_t* bpage = UT_LIST_GET_FIRST(buf_pool.LRU);
bpage != NULL;
bpage = UT_LIST_GET_NEXT(LRU, bpage)) {
@@ -2246,6 +2241,6 @@ void buf_LRU_print()
mutex_exit(buf_page_get_mutex(bpage));
}
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
}
#endif /* UNIV_DEBUG_PRINT || UNIV_DEBUG || UNIV_BUF_DEBUG */
diff --git a/storage/innobase/buf/buf0rea.cc b/storage/innobase/buf/buf0rea.cc
index 72f1204b276..bf67a8513d7 100644
--- a/storage/innobase/buf/buf0rea.cc
+++ b/storage/innobase/buf/buf0rea.cc
@@ -41,7 +41,7 @@ Created 11/5/1995 Heikki Tuuri
#include "srv0start.h"
#include "srv0srv.h"
-/** If there are buf_pool->curr_size per the number below pending reads, then
+/** If there are buf_pool.curr_size per the number below pending reads, then
read-ahead is not done: this is to prevent flooding the buffer pool with
i/o-fixed buffer blocks */
#define BUF_READ_AHEAD_PEND_LIMIT 2
@@ -59,7 +59,7 @@ buf_read_page_handle_error(
== BUF_BLOCK_FILE_PAGE);
/* First unfix and release lock on the bpage */
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
rw_lock_t* hash_lock = buf_page_hash_lock_get(bpage->id);
rw_lock_x_lock(hash_lock);
mutex_enter(buf_page_get_mutex(bpage));
@@ -80,10 +80,11 @@ buf_read_page_handle_error(
/* remove the block from LRU list */
buf_LRU_free_one_page(bpage);
- ut_ad(buf_pool->n_pend_reads > 0);
- buf_pool->n_pend_reads--;
+ /* FIXME: use atomics */
+ ut_ad(buf_pool.n_pend_reads > 0);
+ buf_pool.n_pend_reads--;
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
}
/** Low-level function which reads a page asynchronously from a file to the
@@ -277,7 +278,7 @@ buf_read_ahead_random(
}
const ulint buf_read_ahead_random_area
- = buf_pool->read_ahead_area;
+ = buf_pool.read_ahead_area;
low = (page_id.page_no() / buf_read_ahead_random_area)
* buf_read_ahead_random_area;
@@ -314,12 +315,11 @@ buf_read_ahead_random(
return(0);
}
- mutex_enter(&buf_pool->mutex);
-
- if (buf_pool->n_pend_reads
- > buf_pool->curr_size / BUF_READ_AHEAD_PEND_LIMIT) {
- mutex_exit(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
+ if (buf_pool.n_pend_reads
+ > buf_pool.curr_size / BUF_READ_AHEAD_PEND_LIMIT) {
+ mutex_exit(&buf_pool.mutex);
return(0);
}
@@ -335,7 +335,7 @@ buf_read_ahead_random(
space->release();
if (skip) {
high = space->size;
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
goto read_ahead;
}
});
@@ -345,14 +345,14 @@ buf_read_ahead_random(
if (buf_page_is_accessed(bpage)
&& buf_page_peek_if_young(bpage)
&& ++recent_blocks
- >= 5 + buf_pool->read_ahead_area / 8) {
- mutex_exit(&buf_pool->mutex);
+ >= 5 + buf_pool.read_ahead_area / 8) {
+ mutex_exit(&buf_pool.mutex);
goto read_ahead;
}
}
}
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
/* Do nothing */
return(0);
@@ -414,7 +414,7 @@ read_ahead:
LRU policy decision. */
buf_LRU_stat_inc_io();
- buf_pool->stat.n_ra_pages_read_rnd += count;
+ buf_pool.stat.n_ra_pages_read_rnd += count;
srv_stats.buf_pool_reads.add(count);
return(count);
}
@@ -570,7 +570,7 @@ buf_read_ahead_linear(
}
const ulint buf_read_ahead_linear_area
- = buf_pool->read_ahead_area;
+ = buf_pool.read_ahead_area;
low = (page_id.page_no() / buf_read_ahead_linear_area)
* buf_read_ahead_linear_area;
high = (page_id.page_no() / buf_read_ahead_linear_area + 1)
@@ -608,11 +608,10 @@ buf_read_ahead_linear(
return(0);
}
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
- if (buf_pool->n_pend_reads
- > buf_pool->curr_size / BUF_READ_AHEAD_PEND_LIMIT) {
- mutex_exit(&buf_pool->mutex);
+ if (buf_pool.n_pend_reads
+ > buf_pool.curr_size / BUF_READ_AHEAD_PEND_LIMIT) {
return(0);
}
@@ -630,7 +629,7 @@ buf_read_ahead_linear(
/* How many out of order accessed pages can we ignore
when working out the access pattern for linear readahead */
threshold = ut_min(static_cast<ulint>(64 - srv_read_ahead_threshold),
- buf_pool->read_ahead_area);
+ buf_pool.read_ahead_area);
fail_count = 0;
@@ -661,7 +660,7 @@ buf_read_ahead_linear(
if (fail_count > threshold) {
/* Too many failures: return */
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
return(0);
}
@@ -676,7 +675,7 @@ buf_read_ahead_linear(
bpage = buf_page_hash_get(page_id);
if (bpage == NULL) {
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
return(0);
}
@@ -702,7 +701,7 @@ buf_read_ahead_linear(
pred_offset = fil_page_get_prev(frame);
succ_offset = fil_page_get_next(frame);
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
if ((page_id.page_no() == low)
&& (succ_offset == page_id.page_no() + 1)) {
@@ -800,7 +799,7 @@ buf_read_ahead_linear(
LRU policy decision. */
buf_LRU_stat_inc_io();
- buf_pool->stat.n_ra_pages_read += count;
+ buf_pool.stat.n_ra_pages_read += count;
return(count);
}
@@ -847,8 +846,8 @@ tablespace_deleted:
const page_id_t page_id(space_ids[i], page_nos[i]);
- while (buf_pool->n_pend_reads
- > buf_pool->curr_size / BUF_READ_AHEAD_PEND_LIMIT) {
+ while (buf_pool.n_pend_reads
+ > buf_pool.curr_size / BUF_READ_AHEAD_PEND_LIMIT) {
os_thread_sleep(500000);
}
@@ -916,7 +915,7 @@ buf_read_recv_pages(
ulint count = 0;
- while (buf_pool->n_pend_reads >= recv_n_pool_free_frames / 2) {
+ while (buf_pool.n_pend_reads >= recv_n_pool_free_frames / 2) {
os_aio_simulated_wake_handler_threads();
os_thread_sleep(10000);
@@ -928,7 +927,7 @@ buf_read_recv_pages(
ib::error()
<< "Waited for " << count / 100
<< " seconds for "
- << buf_pool->n_pend_reads
+ << buf_pool.n_pend_reads
<< " pending reads";
}
}
diff --git a/storage/innobase/fsp/fsp0fsp.cc b/storage/innobase/fsp/fsp0fsp.cc
index e46cb0d7cf1..0aa0769664f 100644
--- a/storage/innobase/fsp/fsp0fsp.cc
+++ b/storage/innobase/fsp/fsp0fsp.cc
@@ -1257,7 +1257,7 @@ fsp_page_create(
ut_ad(rw_latch == RW_X_LATCH || rw_latch == RW_SX_LATCH);
- /* Mimic buf_page_get(), but avoid the buf_pool->page_hash lookup. */
+ /* Mimic buf_page_get(), but avoid the buf_pool.page_hash lookup. */
if (rw_latch == RW_X_LATCH) {
rw_lock_x_lock(&block->lock);
} else {
diff --git a/storage/innobase/ha/ha0ha.cc b/storage/innobase/ha/ha0ha.cc
index fa1a9bc5db9..501823d6364 100644
--- a/storage/innobase/ha/ha0ha.cc
+++ b/storage/innobase/ha/ha0ha.cc
@@ -72,7 +72,7 @@ ib_create(
if (type == MEM_HEAP_FOR_PAGE_HASH) {
/* We create a hash table protected by rw_locks for
- buf_pool->page_hash. */
+ buf_pool.page_hash. */
hash_create_sync_obj(
table, HASH_TABLE_SYNC_RW_LOCK, id, n_sync_obj);
} else {
diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc
index 3631768d55d..a95acd9a750 100644
--- a/storage/innobase/handler/ha_innodb.cc
+++ b/storage/innobase/handler/ha_innodb.cc
@@ -18022,9 +18022,9 @@ innodb_buffer_pool_evict_uncompressed()
{
bool all_evicted = true;
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
- for (buf_block_t* block = UT_LIST_GET_LAST(buf_pool->unzip_LRU);
+ for (buf_block_t* block = UT_LIST_GET_LAST(buf_pool.unzip_LRU);
block != NULL; ) {
buf_block_t* prev_block = UT_LIST_GET_PREV(unzip_LRU, block);
ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
@@ -18039,7 +18039,7 @@ innodb_buffer_pool_evict_uncompressed()
block = prev_block;
}
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
return(all_evicted);
}
@@ -21076,10 +21076,10 @@ innodb_buffer_pool_size_validate(
#endif /* UNIV_DEBUG */
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
if (srv_buf_pool_old_size != srv_buf_pool_size) {
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
my_printf_error(ER_WRONG_ARGUMENTS,
"Another buffer pool resize is already in progress.", MYF(0));
return(1);
@@ -21090,13 +21090,13 @@ innodb_buffer_pool_size_validate(
*static_cast<ulonglong*>(save) = requested_buf_pool_size;
if (srv_buf_pool_size == ulint(intbuf)) {
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
/* nothing to do */
return(0);
}
if (srv_buf_pool_size == requested_buf_pool_size) {
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_WRONG_ARGUMENTS,
"innodb_buffer_pool_size must be at least"
@@ -21107,7 +21107,7 @@ innodb_buffer_pool_size_validate(
}
srv_buf_pool_size = requested_buf_pool_size;
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
if (intbuf != static_cast<longlong>(requested_buf_pool_size)) {
char buf[64];
diff --git a/storage/innobase/handler/i_s.cc b/storage/innobase/handler/i_s.cc
index 0e8893aeaa9..b76fd30d9cc 100644
--- a/storage/innobase/handler/i_s.cc
+++ b/storage/innobase/handler/i_s.cc
@@ -126,9 +126,9 @@ struct buf_page_info_t{
built on this page */
#endif /* BTR_CUR_HASH_ADAPT */
unsigned is_old:1; /*!< TRUE if the block is in the old
- blocks in buf_pool->LRU_old */
+ blocks in buf_pool.LRU_old */
unsigned freed_page_clock:31; /*!< the value of
- buf_pool->freed_page_clock */
+ buf_pool.freed_page_clock */
unsigned zip_ssize:PAGE_ZIP_SSIZE_BITS;
/*!< Compressed page size */
unsigned page_state:BUF_PAGE_STATE_BITS; /*!< Page state */
@@ -1962,22 +1962,22 @@ i_s_cmpmem_fill_low(
buf_buddy_stat_t buddy_stat_local[BUF_BUDDY_SIZES_MAX + 1];
/* Save buddy stats for buffer pool in local variables. */
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
for (uint x = 0; x <= BUF_BUDDY_SIZES; x++) {
zip_free_len_local[x] = (x < BUF_BUDDY_SIZES) ?
- UT_LIST_GET_LEN(buf_pool->zip_free[x]) : 0;
+ UT_LIST_GET_LEN(buf_pool.zip_free[x]) : 0;
- buddy_stat_local[x] = buf_pool->buddy_stat[x];
+ buddy_stat_local[x] = buf_pool.buddy_stat[x];
if (reset) {
- /* This is protected by buf_pool->mutex. */
- buf_pool->buddy_stat[x].relocated = 0;
- buf_pool->buddy_stat[x].relocated_usec = 0;
+ /* This is protected by buf_pool.mutex. */
+ buf_pool.buddy_stat[x].relocated = 0;
+ buf_pool.buddy_stat[x].relocated_usec = 0;
}
}
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
for (uint x = 0; x <= BUF_BUDDY_SIZES; x++) {
buf_buddy_stat_t* buddy_stat;
@@ -5035,7 +5035,7 @@ static int i_s_innodb_buffer_page_fill(THD *thd, TABLE_LIST *tables, Item *)
heap = mem_heap_create(10000);
for (ulint n = 0;
- n < ut_min(buf_pool->n_chunks, buf_pool->n_chunks_new); n++) {
+ n < ut_min(buf_pool.n_chunks, buf_pool.n_chunks_new); n++) {
const buf_block_t* block;
ulint n_blocks;
buf_page_info_t* info_buffer;
@@ -5046,8 +5046,8 @@ static int i_s_innodb_buffer_page_fill(THD *thd, TABLE_LIST *tables, Item *)
ulint block_id = 0;
/* Get buffer block of the nth chunk */
- block = buf_pool->chunks[n].blocks;
- chunk_size = buf_pool->chunks[n].size;
+ block = buf_pool.chunks[n].blocks;
+ chunk_size = buf_pool.chunks[n].size;
num_page = 0;
while (chunk_size > 0) {
@@ -5068,7 +5068,7 @@ static int i_s_innodb_buffer_page_fill(THD *thd, TABLE_LIST *tables, Item *)
buffer pool info printout, we are not required to
preserve the overall consistency, so we can
release mutex periodically */
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
/* GO through each block in the chunk */
for (n_blocks = num_to_process; n_blocks--; block++) {
@@ -5079,7 +5079,7 @@ static int i_s_innodb_buffer_page_fill(THD *thd, TABLE_LIST *tables, Item *)
num_page++;
}
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
/* Fill in information schema table with information
just collected from the buffer chunk scan */
@@ -5551,10 +5551,10 @@ i_s_innodb_fill_buffer_lru(THD *thd, TABLE_LIST *tables, Item*)
}
/* Aquire the mutex before allocating info_buffer, since
- UT_LIST_GET_LEN(buf_pool->LRU) could change */
- mutex_enter(&buf_pool->mutex);
+ UT_LIST_GET_LEN(buf_pool.LRU) could change */
+ mutex_enter(&buf_pool.mutex);
- lru_len = UT_LIST_GET_LEN(buf_pool->LRU);
+ lru_len = UT_LIST_GET_LEN(buf_pool.LRU);
/* Print error message if malloc fail */
info_buffer = (buf_page_info_t*) my_malloc(
@@ -5573,7 +5573,7 @@ i_s_innodb_fill_buffer_lru(THD *thd, TABLE_LIST *tables, Item*)
/* Walk through Pool's LRU list and print the buffer page
information */
- bpage = UT_LIST_GET_LAST(buf_pool->LRU);
+ bpage = UT_LIST_GET_LAST(buf_pool.LRU);
while (bpage != NULL) {
/* Use the same function that collect buffer info for
@@ -5587,10 +5587,10 @@ i_s_innodb_fill_buffer_lru(THD *thd, TABLE_LIST *tables, Item*)
}
ut_ad(lru_pos == lru_len);
- ut_ad(lru_pos == UT_LIST_GET_LEN(buf_pool->LRU));
+ ut_ad(lru_pos == UT_LIST_GET_LEN(buf_pool.LRU));
exit:
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
if (info_buffer) {
status = i_s_innodb_buf_page_lru_fill(
diff --git a/storage/innobase/include/btr0sea.h b/storage/innobase/include/btr0sea.h
index de45bc7b39f..ad4debb84f9 100644
--- a/storage/innobase/include/btr0sea.h
+++ b/storage/innobase/include/btr0sea.h
@@ -115,7 +115,7 @@ btr_search_move_or_delete_hash_entries(
@param[in,out] block block containing index page, s- or x-latched, or an
index page for which we know that
block->buf_fix_count == 0 or it is an index page which
- has already been removed from the buf_pool->page_hash
+ has already been removed from the buf_pool.page_hash
i.e.: it is in state BUF_BLOCK_REMOVE_HASH */
void btr_search_drop_page_hash_index(buf_block_t* block);
diff --git a/storage/innobase/include/buf0buddy.h b/storage/innobase/include/buf0buddy.h
index a05c3ab0ca7..1a22afffc9b 100644
--- a/storage/innobase/include/buf0buddy.h
+++ b/storage/innobase/include/buf0buddy.h
@@ -37,7 +37,7 @@ Created December 2006 by Marko Makela
/**
@param[in] block size in bytes
-@return index of buf_pool->zip_free[], or BUF_BUDDY_SIZES */
+@return index of buf_pool.zip_free[], or BUF_BUDDY_SIZES */
inline
ulint
buf_buddy_get_slot(ulint size)
@@ -56,16 +56,16 @@ buf_buddy_get_slot(ulint size)
}
/** Allocate a ROW_FORMAT=COMPRESSED block.
-@param[in] i index of buf_pool->zip_free[] or BUF_BUDDY_SIZES
-@param[out] lru whether buf_pool->mutex was temporarily released
+@param[in] i index of buf_pool.zip_free[] or BUF_BUDDY_SIZES
+@param[out] lru whether buf_pool.mutex was temporarily released
@return allocated block, never NULL */
byte* buf_buddy_alloc_low(ulint i, bool& lru) MY_ATTRIBUTE((malloc));
/** Allocate a ROW_FORMAT=COMPRESSED block.
-The caller must not hold buf_pool->mutex nor buf_pool->zip_mutex nor any
+The caller must not hold buf_pool.mutex nor buf_pool.zip_mutex nor any
block->mutex.
@param[in] size compressed page size
-@param[out] lru whether buf_pool->mutex was temporarily released
+@param[out] lru whether buf_pool.mutex was temporarily released
@return allocated block, never NULL */
inline byte* buf_buddy_alloc(ulint size, bool& lru)
{
@@ -75,7 +75,7 @@ inline byte* buf_buddy_alloc(ulint size, bool& lru)
/** Deallocate a block.
@param[in] buf block to be freed, must not be pointed to
by the buffer pool
-@param[in] i index of buf_pool->zip_free[], or BUF_BUDDY_SIZES */
+@param[in] i index of buf_pool.zip_free[], or BUF_BUDDY_SIZES */
void buf_buddy_free_low(void* buf, ulint i);
/** Deallocate a block.
diff --git a/storage/innobase/include/buf0buf.h b/storage/innobase/include/buf0buf.h
index 3064865da98..35c5192aafe 100644
--- a/storage/innobase/include/buf0buf.h
+++ b/storage/innobase/include/buf0buf.h
@@ -85,9 +85,6 @@ struct fil_addr_t;
#define MAX_PAGE_HASH_LOCKS 1024 /*!< The maximum number of
page_hash locks */
-extern buf_pool_t* buf_pool; /*!< The buffer pools
- of the database */
-
extern volatile bool buf_pool_withdrawing; /*!< true when withdrawing buffer
pool pages might cause page relocation */
@@ -106,12 +103,12 @@ extern my_bool buf_disable_resize_buffer_pool_debug; /*!< if TRUE, resizing
The enumeration values must be 0..7. */
enum buf_page_state {
BUF_BLOCK_POOL_WATCH, /*!< a sentinel for the buffer pool
- watch, element of buf_pool->watch[] */
+ watch, element of buf_pool.watch[] */
BUF_BLOCK_ZIP_PAGE, /*!< contains a clean
compressed page */
BUF_BLOCK_ZIP_DIRTY, /*!< contains a compressed
page that is in the
- buf_pool->flush_list */
+ buf_pool.flush_list */
BUF_BLOCK_NOT_USED, /*!< is in the free list;
must be after the BUF_BLOCK_ZIP_
@@ -133,13 +130,13 @@ struct buf_pool_info_t
{
/* General buffer pool info */
ulint pool_size; /*!< Buffer Pool size in pages */
- ulint lru_len; /*!< Length of buf_pool->LRU */
- ulint old_lru_len; /*!< buf_pool->LRU_old_len */
- ulint free_list_len; /*!< Length of buf_pool->free list */
- ulint flush_list_len; /*!< Length of buf_pool->flush_list */
- ulint n_pend_unzip; /*!< buf_pool->n_pend_unzip, pages
+ ulint lru_len; /*!< Length of buf_pool.LRU */
+ ulint old_lru_len; /*!< buf_pool.LRU_old_len */
+ ulint free_list_len; /*!< Length of buf_pool.free list */
+ ulint flush_list_len; /*!< Length of buf_pool.flush_list */
+ ulint n_pend_unzip; /*!< buf_pool.n_pend_unzip, pages
pending decompress */
- ulint n_pend_reads; /*!< buf_pool->n_pend_reads, pages
+ ulint n_pend_reads; /*!< buf_pool.n_pend_reads, pages
pending read */
ulint n_pending_flush_lru; /*!< Pages pending flush in LRU */
ulint n_pending_flush_single_page;/*!< Pages pending to be
@@ -150,15 +147,15 @@ struct buf_pool_info_t
LIST */
ulint n_pages_made_young; /*!< number of pages made young */
ulint n_pages_not_made_young; /*!< number of pages not made young */
- ulint n_pages_read; /*!< buf_pool->n_pages_read */
- ulint n_pages_created; /*!< buf_pool->n_pages_created */
- ulint n_pages_written; /*!< buf_pool->n_pages_written */
- ulint n_page_gets; /*!< buf_pool->n_page_gets */
- ulint n_ra_pages_read_rnd; /*!< buf_pool->n_ra_pages_read_rnd,
+ ulint n_pages_read; /*!< buf_pool.n_pages_read */
+ ulint n_pages_created; /*!< buf_pool.n_pages_created */
+ ulint n_pages_written; /*!< buf_pool.n_pages_written */
+ ulint n_page_gets; /*!< buf_pool.n_page_gets */
+ ulint n_ra_pages_read_rnd; /*!< buf_pool.n_ra_pages_read_rnd,
number of pages readahead */
- ulint n_ra_pages_read; /*!< buf_pool->n_ra_pages_read, number
+ ulint n_ra_pages_read; /*!< buf_pool.n_ra_pages_read, number
of pages readahead */
- ulint n_ra_pages_evicted; /*!< buf_pool->n_ra_pages_evicted,
+ ulint n_ra_pages_evicted; /*!< buf_pool.n_ra_pages_evicted,
number of readahead pages evicted
without access */
ulint n_page_get_delta; /*!< num of buffer pool page gets since
@@ -188,7 +185,7 @@ struct buf_pool_info_t
without access, in pages per second */
/* Stats about LRU eviction */
- ulint unzip_lru_len; /*!< length of buf_pool->unzip_LRU
+ ulint unzip_lru_len; /*!< length of buf_pool.unzip_LRU
list */
/* Counters for LRU policy */
ulint io_sum; /*!< buf_LRU_stat_sum.io */
@@ -313,13 +310,6 @@ operator<<(
const page_id_t& page_id);
#ifndef UNIV_INNOCHECKSUM
-/** Create the buffer pool.
-@return whether the creation failed */
-bool buf_pool_init();
-/** Free the buffer pool at shutdown.
-This must not be invoked before freeing all mutexes. */
-void buf_pool_free();
-
/** Determines if a block is intended to be withdrawn.
@param[in] block pointer to control block
@retval true if will be withdrawn */
@@ -614,7 +604,7 @@ buf_page_get_newest_modification(
page frame */
/********************************************************************//**
Increments the modify clock of a frame by 1. The caller must (1) own the
-buf_pool->mutex and block bufferfix count has to be zero, (2) or own an x-lock
+buf_pool.mutex and block bufferfix count has to be zero, (2) or own an x-lock
on the block. */
UNIV_INLINE
void
@@ -992,9 +982,9 @@ buf_block_set_io_fix(
/*=================*/
buf_block_t* block, /*!< in/out: control block */
enum buf_io_fix io_fix);/*!< in: io_fix state */
-/*********************************************************************//**
-Makes a block sticky. A sticky block implies that even after we release
-the buf_pool->mutex and the block->mutex:
+
+/** Make a block sticky. A sticky block implies that even after we release
+the buf_pool.mutex and the block->mutex:
* it cannot be removed from the flush_list
* the block descriptor cannot be relocated
* it cannot be removed from the LRU list
@@ -1338,9 +1328,9 @@ public:
/** @name General fields
None of these bit-fields must be modified without holding
buf_page_get_mutex() [buf_block_t::mutex or
- buf_pool->zip_mutex], since they can be stored in the same
+ buf_pool.zip_mutex], since they can be stored in the same
machine word. Some of these fields are additionally protected
- by buf_pool->mutex. */
+ by buf_pool.mutex. */
/* @{ */
/** Page id. Protected by buf_pool mutex. */
@@ -1353,7 +1343,7 @@ public:
ib_uint32_t buf_fix_count;
/** type of pending I/O operation; also protected by
- buf_pool->mutex for writes only */
+ buf_pool.mutex for writes only */
buf_io_fix io_fix;
/** Block state. @see buf_page_in_file */
@@ -1366,10 +1356,10 @@ public:
/* @} */
page_zip_des_t zip; /*!< compressed page; zip.data
(but not the data it points to) is
- also protected by buf_pool->mutex;
+ also protected by buf_pool.mutex;
state == BUF_BLOCK_ZIP_PAGE and
zip.data == NULL means an active
- buf_pool->watch */
+ buf_pool.watch */
ulint write_size; /* Write size is set when this
page is first time written and then
@@ -1388,22 +1378,22 @@ public:
used for encryption/compression
or NULL */
buf_page_t* hash; /*!< node used in chaining to
- buf_pool->page_hash or
- buf_pool->zip_hash */
+ buf_pool.page_hash or
+ buf_pool.zip_hash */
#ifdef UNIV_DEBUG
- ibool in_page_hash; /*!< TRUE if in buf_pool->page_hash */
- ibool in_zip_hash; /*!< TRUE if in buf_pool->zip_hash */
+ ibool in_page_hash; /*!< TRUE if in buf_pool.page_hash */
+ ibool in_zip_hash; /*!< TRUE if in buf_pool.zip_hash */
#endif /* UNIV_DEBUG */
/** @name Page flushing fields
- All these are protected by buf_pool->mutex. */
+ All these are protected by buf_pool.mutex. */
/* @{ */
UT_LIST_NODE_T(buf_page_t) list;
/*!< based on state, this is a
list node, protected either by
- buf_pool->mutex or by
- buf_pool->flush_list_mutex,
+ buf_pool.mutex or by
+ buf_pool.flush_list_mutex,
in one of the following lists in
buf_pool:
@@ -1414,9 +1404,9 @@ public:
If bpage is part of flush_list
then the node pointers are
- covered by buf_pool->flush_list_mutex.
+ covered by buf_pool.flush_list_mutex.
Otherwise these pointers are
- protected by buf_pool->mutex.
+ protected by buf_pool.mutex.
The contents of the list node
is undefined if !in_flush_list
@@ -1427,19 +1417,19 @@ public:
BUF_BLOCK_READY_IN_USE. */
#ifdef UNIV_DEBUG
- ibool in_flush_list; /*!< TRUE if in buf_pool->flush_list;
- when buf_pool->flush_list_mutex is
+ ibool in_flush_list; /*!< TRUE if in buf_pool.flush_list;
+ when buf_pool.flush_list_mutex is
free, the following should hold:
in_flush_list
== (state == BUF_BLOCK_FILE_PAGE
|| state == BUF_BLOCK_ZIP_DIRTY)
Writes to this field must be
covered by both block->mutex
- and buf_pool->flush_list_mutex. Hence
+ and buf_pool.flush_list_mutex. Hence
reads can happen while holding
any one of the two mutexes */
- ibool in_free_list; /*!< TRUE if in buf_pool->free; when
- buf_pool->mutex is free, the following
+ ibool in_free_list; /*!< TRUE if in buf_pool.free; when
+ buf_pool.mutex is free, the following
should hold: in_free_list
== (state == BUF_BLOCK_NOT_USED) */
#endif /* UNIV_DEBUG */
@@ -1462,13 +1452,13 @@ public:
modifications are on disk.
Writes to this field must be
covered by both block->mutex
- and buf_pool->flush_list_mutex. Hence
+ and buf_pool.flush_list_mutex. Hence
reads can happen while holding
any one of the two mutexes */
/* @} */
/** @name LRU replacement algorithm fields
- These fields are protected by buf_pool->mutex only (not
- buf_pool->zip_mutex or buf_block_t::mutex). */
+ These fields are protected by buf_pool.mutex only (not
+ buf_pool.zip_mutex or buf_block_t::mutex). */
/* @{ */
UT_LIST_NODE_T(buf_page_t) LRU;
@@ -1479,9 +1469,9 @@ public:
debugging */
#endif /* UNIV_DEBUG */
unsigned old:1; /*!< TRUE if the block is in the old
- blocks in buf_pool->LRU_old */
+ blocks in buf_pool.LRU_old */
unsigned freed_page_clock:31;/*!< the value of
- buf_pool->freed_page_clock
+ buf_pool.freed_page_clock
when this block was the last
time put to the head of the
LRU list; a thread is allowed
@@ -1497,7 +1487,7 @@ public:
ibool file_page_was_freed;
/*!< this is set to TRUE when
fsp frees a page in buffer pool;
- protected by buf_pool->zip_mutex
+ protected by buf_pool.zip_mutex
or buf_block_t::mutex. */
# endif /* UNIV_DEBUG */
};
@@ -1511,7 +1501,7 @@ struct buf_block_t{
buf_page_t page; /*!< page information; this must
be the first field, so that
- buf_pool->page_hash can point
+ buf_pool.page_hash can point
to buf_page_t or buf_block_t */
byte* frame; /*!< pointer to buffer frame which
is of size srv_page_size, and
@@ -1533,7 +1523,7 @@ struct buf_block_t{
uint32_t lock_hash_val; /*!< hashed value of the page address
in the record lock hash table;
protected by buf_block_t::lock
- (or buf_block_t::mutex, buf_pool->mutex
+ (or buf_block_t::mutex, buf_pool.mutex
in buf_page_get_gen(),
buf_page_init_for_read()
and buf_page_create()) */
@@ -1674,7 +1664,7 @@ struct buf_block_t{
/**********************************************************************//**
-Compute the hash fold value for blocks in buf_pool->zip_hash. */
+Compute the hash fold value for blocks in buf_pool.zip_hash. */
/* @{ */
#define BUF_POOL_ZIP_FOLD_PTR(ptr) (ulint(ptr) >> srv_page_size_shift)
#define BUF_POOL_ZIP_FOLD(b) BUF_POOL_ZIP_FOLD_PTR((b)->frame)
@@ -1740,7 +1730,7 @@ protected:
buf_page_t* m_hp;
};
-/** Class implementing buf_pool->flush_list hazard pointer */
+/** Class implementing buf_pool.flush_list hazard pointer */
class FlushHp: public HazardPointer {
public:
@@ -1758,7 +1748,7 @@ public:
void adjust(const buf_page_t* bpage);
};
-/** Class implementing buf_pool->LRU hazard pointer */
+/** Class implementing buf_pool.LRU hazard pointer */
class LRUHp: public HazardPointer {
public:
@@ -1867,8 +1857,28 @@ typedef struct {
} buf_tmp_array_t;
/** The buffer pool */
-struct buf_pool_t
+class buf_pool_t
{
+ bool m_initialised;
+public:
+ /** Constructor */
+ buf_pool_t() :
+ m_initialised(false),
+ allocator(ut_allocator<byte>(mem_key_buf_buf_pool)),
+ flush_hp(&flush_list_mutex),
+ lru_hp(&mutex),
+ lru_scan_itr(&mutex),
+ single_scan_itr(&mutex)
+ {}
+ /** Create the buffer pool.
+ @return whether the creation failed */
+ bool create();
+ /** Free the buffer pool at shutdown.
+ This must not be invoked before freeing all mutexes. */
+ void close();
+ /** @return whether the buffer pool is initialised */
+ bool is_initialised() const { return m_initialised; }
+
/** @name General fields */
/* @{ */
BufPoolMutex mutex; /*!< Buffer pool mutex of this
@@ -1907,9 +1917,9 @@ struct buf_pool_t
page_hash is protected by an
array of mutexes.
Changes in page_hash are protected
- by buf_pool->mutex and the relevant
+ by buf_pool.mutex and the relevant
page_hash mutex. Lookups can happen
- while holding the buf_pool->mutex or
+ while holding the buf_pool.mutex or
the relevant page_hash mutex. */
hash_table_t* page_hash_old; /*!< old pointer to page_hash to be
freed after resizing buffer pool */
@@ -1995,7 +2005,7 @@ struct buf_pool_t
eviction. Set to TRUE whenever
we flush a batch from the
buffer pool. Protected by the
- buf_pool->mutex */
+ buf_pool.mutex */
/* @} */
/** @name LRU replacement algorithm fields */
@@ -2064,7 +2074,7 @@ struct buf_pool_t
buf_page_t* watch;
/*!< Sentinel records for buffer
pool watches. Protected by
- buf_pool->mutex. */
+ buf_pool.mutex. */
buf_tmp_array_t* tmp_arr;
/*!< Array for temporal memory
@@ -2076,6 +2086,9 @@ struct buf_pool_t
/* @} */
};
+/** The InnoDB buffer pool */
+extern buf_pool_t buf_pool;
+
/** @name Accessors for buffer pool mutexes
Use these instead of accessing buffer pool mutexes directly. */
/* @{ */
@@ -2099,15 +2112,15 @@ UNIV_INLINE
rw_lock_t*
buf_page_hash_lock_get(const page_id_t& page_id)
{
- return hash_get_lock(buf_pool->page_hash, page_id.fold());
+ return hash_get_lock(buf_pool.page_hash, page_id.fold());
}
/** If not appropriate page_hash_lock, relock until appropriate. */
# define buf_page_hash_lock_s_confirm(hash_lock, page_id)\
- hash_lock_s_confirm(hash_lock, buf_pool->page_hash, (page_id).fold())
+ hash_lock_s_confirm(hash_lock, buf_pool.page_hash, (page_id).fold())
# define buf_page_hash_lock_x_confirm(hash_lock, page_id)\
- hash_lock_x_confirm(hash_lock, buf_pool->page_hash, (page_id).fold())
+ hash_lock_x_confirm(hash_lock, buf_pool.page_hash, (page_id).fold())
#ifdef UNIV_DEBUG
/** Test if page_hash lock is held in s-mode. */
@@ -2143,13 +2156,13 @@ buf_page_hash_lock_get(const page_id_t& page_id)
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
/** Forbid the release of the buffer pool mutex. */
# define buf_pool_mutex_exit_forbid() do { \
- ut_ad(mutex_own(&buf_pool->mutex)); \
- buf_pool->mutex_exit_forbidden++; \
+ ut_ad(mutex_own(&buf_pool.mutex)); \
+ buf_pool.mutex_exit_forbidden++; \
} while (0)
/** Allow the release of the buffer pool mutex. */
# define buf_pool_mutex_exit_allow() do { \
- ut_ad(mutex_own(&buf_pool->mutex)); \
- ut_ad(buf_pool->mutex_exit_forbidden--); \
+ ut_ad(mutex_own(&buf_pool.mutex)); \
+ ut_ad(buf_pool.mutex_exit_forbidden--); \
} while (0)
#else
/** Forbid the release of the buffer pool mutex. */
@@ -2212,7 +2225,7 @@ inline buf_page_t* LRUItr::start()
ut_ad(mutex_own(m_mutex));
if (!m_hp || m_hp->old) {
- m_hp = UT_LIST_GET_LAST(buf_pool->LRU);
+ m_hp = UT_LIST_GET_LAST(buf_pool.LRU);
}
return(m_hp);
@@ -2229,7 +2242,7 @@ struct CheckInLRUList {
static void validate()
{
CheckInLRUList check;
- ut_list_validate(buf_pool->LRU, check);
+ ut_list_validate(buf_pool.LRU, check);
}
};
@@ -2243,7 +2256,7 @@ struct CheckInFreeList {
static void validate()
{
CheckInFreeList check;
- ut_list_validate(buf_pool->free, check);
+ ut_list_validate(buf_pool.free, check);
}
};
@@ -2257,7 +2270,7 @@ struct CheckUnzipLRUAndLRUList {
static void validate()
{
CheckUnzipLRUAndLRUList check;
- ut_list_validate(buf_pool->unzip_LRU, check);
+ ut_list_validate(buf_pool.unzip_LRU, check);
}
};
#endif /* UNIV_DEBUG || defined UNIV_BUF_DEBUG */
diff --git a/storage/innobase/include/buf0buf.ic b/storage/innobase/include/buf0buf.ic
index 7a7cc05940b..3f5fa93cd8a 100644
--- a/storage/innobase/include/buf0buf.ic
+++ b/storage/innobase/include/buf0buf.ic
@@ -87,7 +87,7 @@ buf_page_get_freed_page_clock(
/*==========================*/
const buf_page_t* bpage) /*!< in: block */
{
- /* This is sometimes read without holding buf_pool->mutex. */
+ /* This is sometimes read without holding buf_pool.mutex. */
return(bpage->freed_page_clock);
}
@@ -112,10 +112,10 @@ The page must be either buffer-fixed, either its page hash must be locked.
UNIV_INLINE bool buf_page_peek_if_young(const buf_page_t* bpage)
{
/* FIXME: bpage->freed_page_clock is 31 bits */
- return((buf_pool->freed_page_clock & ((1UL << 31) - 1))
+ return((buf_pool.freed_page_clock & ((1UL << 31) - 1))
< (bpage->freed_page_clock
- + (buf_pool->curr_size
- * (BUF_LRU_OLD_RATIO_DIV - buf_pool->LRU_old_ratio)
+ + (buf_pool.curr_size
+ * (BUF_LRU_OLD_RATIO_DIV - buf_pool.LRU_old_ratio)
/ (BUF_LRU_OLD_RATIO_DIV * 4))));
}
@@ -130,7 +130,7 @@ buf_page_peek_if_too_old(
/*=====================*/
const buf_page_t* bpage) /*!< in: block to make younger */
{
- if (buf_pool->freed_page_clock == 0) {
+ if (buf_pool.freed_page_clock == 0) {
/* If eviction has not started yet, do not update the
statistics or move blocks in the LRU list. This is
either the warm-up phase or an in-memory workload. */
@@ -150,7 +150,7 @@ buf_page_peek_if_too_old(
return(TRUE);
}
- buf_pool->stat.n_pages_not_made_young++;
+ buf_pool.stat.n_pages_not_made_young++;
return(FALSE);
} else {
return(!buf_page_peek_if_young(bpage));
@@ -319,7 +319,7 @@ buf_page_get_mutex(
return(NULL);
case BUF_BLOCK_ZIP_PAGE:
case BUF_BLOCK_ZIP_DIRTY:
- return(&buf_pool->zip_mutex);
+ return(&buf_pool.zip_mutex);
default:
return(&((buf_block_t*) bpage)->mutex);
}
@@ -422,7 +422,7 @@ buf_page_set_io_fix(
buf_page_t* bpage, /*!< in/out: control block */
enum buf_io_fix io_fix) /*!< in: io_fix state */
{
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(mutex_own(buf_page_get_mutex(bpage)));
bpage->io_fix = io_fix;
@@ -441,9 +441,8 @@ buf_block_set_io_fix(
buf_page_set_io_fix(&block->page, io_fix);
}
-/*********************************************************************//**
-Makes a block sticky. A sticky block implies that even after we release
-the buf_pool->mutex and the block->mutex:
+/** Make a block sticky. A sticky block implies that even after we release
+the buf_pool.mutex and the block->mutex:
* it cannot be removed from the flush_list
* the block descriptor cannot be relocated
* it cannot be removed from the LRU list
@@ -456,7 +455,7 @@ buf_page_set_sticky(
/*================*/
buf_page_t* bpage) /*!< in/out: control block */
{
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(mutex_own(buf_page_get_mutex(bpage)));
ut_ad(buf_page_get_io_fix(bpage) == BUF_IO_NONE);
@@ -471,7 +470,7 @@ buf_page_unset_sticky(
/*==================*/
buf_page_t* bpage) /*!< in/out: control block */
{
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(mutex_own(buf_page_get_mutex(bpage)));
ut_ad(buf_page_get_io_fix(bpage) == BUF_IO_PIN);
@@ -487,7 +486,7 @@ buf_page_can_relocate(
/*==================*/
const buf_page_t* bpage) /*!< control block being relocated */
{
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(mutex_own(buf_page_get_mutex(bpage)));
ut_ad(buf_page_in_file(bpage));
ut_ad(bpage->in_LRU_list);
@@ -509,7 +508,7 @@ buf_page_is_old(
purposes even if LRU mutex is not being held. Keep the assertion
for not since all the callers hold it. */
ut_ad(mutex_own(buf_page_get_mutex(bpage))
- || mutex_own(&buf_pool->mutex));
+ || mutex_own(&buf_pool.mutex));
ut_ad(buf_page_in_file(bpage));
return(bpage->old);
@@ -525,13 +524,13 @@ buf_page_set_old(
bool old) /*!< in: old */
{
ut_a(buf_page_in_file(bpage));
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(bpage->in_LRU_list);
#ifdef UNIV_LRU_DEBUG
- ut_a((buf_pool->LRU_old_len == 0) == (buf_pool->LRU_old == NULL));
+ ut_a((buf_pool.LRU_old_len == 0) == (buf_pool.LRU_old == NULL));
/* If a block is flagged "old", the LRU_old list must exist. */
- ut_a(!old || buf_pool->LRU_old);
+ ut_a(!old || buf_pool.LRU_old);
if (UT_LIST_GET_PREV(LRU, bpage) && UT_LIST_GET_NEXT(LRU, bpage)) {
const buf_page_t* prev = UT_LIST_GET_PREV(LRU, bpage);
@@ -540,7 +539,7 @@ buf_page_set_old(
ut_a(prev->old == old);
} else {
ut_a(!prev->old);
- ut_a(buf_pool->LRU_old == (old ? bpage : next));
+ ut_a(buf_pool.LRU_old == (old ? bpage : next));
}
}
#endif /* UNIV_LRU_DEBUG */
@@ -570,7 +569,7 @@ buf_page_set_accessed(
/*==================*/
buf_page_t* bpage) /*!< in/out: control block */
{
- ut_ad(!mutex_own(&buf_pool->mutex));
+ ut_ad(!mutex_own(&buf_pool.mutex));
ut_ad(mutex_own(buf_page_get_mutex(bpage)));
ut_a(buf_page_in_file(bpage));
@@ -592,7 +591,7 @@ buf_page_get_block(
{
if (bpage != NULL) {
ut_ad(buf_page_hash_lock_held_s_or_x(bpage)
- || mutex_own(&buf_pool->mutex));
+ || mutex_own(&buf_pool.mutex));
ut_ad(buf_page_in_file(bpage));
if (buf_page_get_state(bpage) == BUF_BLOCK_FILE_PAGE) {
@@ -730,7 +729,7 @@ buf_block_free(
/*===========*/
buf_block_t* block) /*!< in, own: block to be freed */
{
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
buf_page_mutex_enter(block);
@@ -740,7 +739,7 @@ buf_block_free(
buf_page_mutex_exit(block);
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
}
/*********************************************************************//**
@@ -799,7 +798,7 @@ buf_block_modify_clock_inc(
{
/* No latch is acquired for the shared temporary tablespace. */
ut_ad(fsp_is_system_temporary(block->page.id.space())
- || (mutex_own(&buf_pool->mutex)
+ || (mutex_own(&buf_pool.mutex)
&& block->page.buf_fix_count == 0)
|| rw_lock_own_flagged(&block->lock,
RW_LOCK_FLAG_X | RW_LOCK_FLAG_SX));
@@ -935,14 +934,14 @@ buf_page_hash_get_low(const page_id_t& page_id)
#ifdef UNIV_DEBUG
rw_lock_t* hash_lock;
- hash_lock = hash_get_lock(buf_pool->page_hash, page_id.fold());
+ hash_lock = hash_get_lock(buf_pool.page_hash, page_id.fold());
ut_ad(rw_lock_own(hash_lock, RW_LOCK_X)
|| rw_lock_own(hash_lock, RW_LOCK_S));
#endif /* UNIV_DEBUG */
/* Look for the page in the hash table */
- HASH_SEARCH(hash, buf_pool->page_hash, page_id.fold(), buf_page_t*,
+ HASH_SEARCH(hash, buf_pool.page_hash, page_id.fold(), buf_page_t*,
bpage,
ut_ad(bpage->in_page_hash && !bpage->in_zip_hash
&& buf_page_in_file(bpage)),
@@ -990,7 +989,7 @@ buf_page_hash_get_locked(
mode = lock_mode;
}
- hash_lock = hash_get_lock(buf_pool->page_hash, page_id.fold());
+ hash_lock = hash_get_lock(buf_pool.page_hash, page_id.fold());
ut_ad(!rw_lock_own(hash_lock, RW_LOCK_X)
&& !rw_lock_own(hash_lock, RW_LOCK_S));
@@ -1000,12 +999,12 @@ buf_page_hash_get_locked(
/* If not own buf_pool_mutex, page_hash can be changed. */
hash_lock = hash_lock_s_confirm(
- hash_lock, buf_pool->page_hash, page_id.fold());
+ hash_lock, buf_pool.page_hash, page_id.fold());
} else {
rw_lock_x_lock(hash_lock);
/* If not own buf_pool_mutex, page_hash can be changed. */
hash_lock = hash_lock_x_confirm(
- hash_lock, buf_pool->page_hash, page_id.fold());
+ hash_lock, buf_pool.page_hash, page_id.fold());
}
bpage = buf_page_hash_get_low(page_id);
diff --git a/storage/innobase/include/buf0flu.ic b/storage/innobase/include/buf0flu.ic
index e12234565de..4fed4aee63d 100644
--- a/storage/innobase/include/buf0flu.ic
+++ b/storage/innobase/include/buf0flu.ic
@@ -68,8 +68,8 @@ buf_flush_note_modification(
|| fsp_is_system_temporary(block->page.id.space()));
ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
ut_ad(block->page.buf_fix_count > 0);
- ut_ad(!mutex_own(&buf_pool->mutex));
- ut_ad(!mutex_own(&buf_pool->flush_list_mutex));
+ ut_ad(!mutex_own(&buf_pool.mutex));
+ ut_ad(!mutex_own(&buf_pool.flush_list_mutex));
mutex_enter(&block->mutex);
ut_ad(block->page.newest_modification <= end_lsn);
@@ -107,8 +107,8 @@ buf_flush_recv_note_modification(
ut_ad(!srv_read_only_mode);
ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
ut_ad(block->page.buf_fix_count > 0);
- ut_ad(!mutex_own(&buf_pool->mutex));
- ut_ad(!mutex_own(&buf_pool->flush_list_mutex));
+ ut_ad(!mutex_own(&buf_pool.mutex));
+ ut_ad(!mutex_own(&buf_pool.flush_list_mutex));
ut_ad(start_lsn != 0);
ut_ad(block->page.newest_modification <= end_lsn);
diff --git a/storage/innobase/include/buf0lru.h b/storage/innobase/include/buf0lru.h
index 982cfeaf418..d39e0c7fd24 100644
--- a/storage/innobase/include/buf0lru.h
+++ b/storage/innobase/include/buf0lru.h
@@ -62,12 +62,9 @@ void buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table);
void buf_LRU_flush_or_remove_pages(ulint id, FlushObserver* observer);
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
-/********************************************************************//**
-Insert a compressed block into buf_pool->zip_clean in the LRU order. */
-void
-buf_LRU_insert_zip_clean(
-/*=====================*/
- buf_page_t* bpage); /*!< in: pointer to the block in question */
+/** Insert a compressed block into buf_pool.zip_clean in the LRU order.
+@param[in] bpage pointer to the block in question */
+void buf_LRU_insert_zip_clean(buf_page_t* bpage);
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
/******************************************************************//**
@@ -75,10 +72,10 @@ Try to free a block. If bpage is a descriptor of a compressed-only
page, the descriptor object will be freed as well.
NOTE: If this function returns true, it will temporarily
-release buf_pool->mutex. Furthermore, the page frame will no longer be
+release buf_pool.mutex. Furthermore, the page frame will no longer be
accessible via bpage.
-The caller must hold buf_pool->mutex and must not hold any
+The caller must hold buf_pool.mutex and must not hold any
buf_page_get_mutex() when calling this function.
@return true if freed, false otherwise. */
bool
@@ -95,7 +92,7 @@ buf_LRU_free_page(
@return true if found and freed */
bool buf_LRU_scan_and_free_block(bool scan_all);
-/** @return a buffer block from the buf_pool->free list
+/** @return a buffer block from the buf_pool.free list
@retval NULL if the free list is empty */
buf_block_t* buf_LRU_get_free_only();
@@ -108,7 +105,7 @@ the free list. Even when we flush a page or find a page in LRU scan
we put it to free list to be used.
* iteration 0:
* get a block from free list, success:done
- * if buf_pool->try_LRU_scan is set
+ * if buf_pool.try_LRU_scan is set
* scan LRU up to srv_LRU_scan_depth to find a clean block
* the above will put the block on free list
* success:retry the free list
@@ -118,7 +115,7 @@ we put it to free list to be used.
* iteration 1:
* same as iteration 0 except:
* scan whole LRU list
- * scan LRU list even if buf_pool->try_LRU_scan is not set
+ * scan LRU list even if buf_pool.try_LRU_scan is not set
* iteration > 1:
* same as iteration 1 but sleep 10ms
@return the free control block, in state BUF_BLOCK_READY_FOR_USE */
@@ -158,11 +155,11 @@ Moves a block to the start of the LRU list. */
void
buf_LRU_make_block_young(buf_page_t* bpage);
-/** Update buf_pool->LRU_old_ratio.
+/** Update buf_pool.LRU_old_ratio.
@param[in] old_pct Reserve this percentage of
the buffer pool for "old" blocks
@param[in] adjust true=adjust the LRU list;
- false=just assign buf_pool->LRU_old_ratio
+ false=just assign buf_pool.LRU_old_ratio
during the initialization of InnoDB
@return updated old_pct */
uint
@@ -197,15 +194,15 @@ void buf_LRU_print();
#endif /* UNIV_DEBUG_PRINT || UNIV_DEBUG || UNIV_BUF_DEBUG */
/** @name Heuristics for detecting index scan @{ */
-/** The denominator of buf_pool->LRU_old_ratio. */
+/** The denominator of buf_pool.LRU_old_ratio. */
#define BUF_LRU_OLD_RATIO_DIV 1024
-/** Maximum value of buf_pool->LRU_old_ratio.
+/** Maximum value of buf_pool.LRU_old_ratio.
@see buf_LRU_old_adjust_len
-@see buf_pool->LRU_old_ratio_update */
+@see buf_pool.LRU_old_ratio_update */
#define BUF_LRU_OLD_RATIO_MAX BUF_LRU_OLD_RATIO_DIV
-/** Minimum value of buf_pool->LRU_old_ratio.
+/** Minimum value of buf_pool.LRU_old_ratio.
@see buf_LRU_old_adjust_len
-@see buf_pool->LRU_old_ratio_update
+@see buf_pool.LRU_old_ratio_update
The minimum must exceed
(BUF_LRU_OLD_TOLERANCE + 5) * BUF_LRU_OLD_RATIO_DIV / BUF_LRU_OLD_MIN_LEN. */
#define BUF_LRU_OLD_RATIO_MIN 51
@@ -226,7 +223,7 @@ extern uint buf_LRU_old_threshold_ms;
These statistics are not 'of' LRU but 'for' LRU. We keep count of I/O
and page_zip_decompress() operations. Based on the statistics we decide
-if we want to evict from buf_pool->unzip_LRU or buf_pool->LRU. */
+if we want to evict from buf_pool.unzip_LRU or buf_pool.LRU. */
struct buf_LRU_stat_t
{
ulint io; /**< Counter of buffer pool I/O operations. */
@@ -238,7 +235,7 @@ Cleared by buf_LRU_stat_update(). */
extern buf_LRU_stat_t buf_LRU_stat_cur;
/** Running sum of past values of buf_LRU_stat_cur.
-Updated by buf_LRU_stat_update(). Protected by buf_pool->mutex. */
+Updated by buf_LRU_stat_update(). Protected by buf_pool.mutex. */
extern buf_LRU_stat_t buf_LRU_stat_sum;
/********************************************************************//**
diff --git a/storage/innobase/include/buf0types.h b/storage/innobase/include/buf0types.h
index 2847e328515..34bdc0ca407 100644
--- a/storage/innobase/include/buf0types.h
+++ b/storage/innobase/include/buf0types.h
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 1995, 2015, Oracle and/or its affiliates. All rights reserved.
+Copyright (c) 2018, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -35,8 +36,6 @@ class buf_page_t;
struct buf_block_t;
/** Buffer pool chunk comprising buf_block_t */
struct buf_chunk_t;
-/** Buffer pool comprising buf_chunk_t */
-struct buf_pool_t;
/** Buffer pool statistics struct */
struct buf_pool_stat_t;
/** Buffer pool buddy statistics struct */
diff --git a/storage/innobase/include/srv0srv.h b/storage/innobase/include/srv0srv.h
index 3510864139c..d0217bfe749 100644
--- a/storage/innobase/include/srv0srv.h
+++ b/storage/innobase/include/srv0srv.h
@@ -368,7 +368,7 @@ extern const ulint srv_buf_pool_def_size;
/** Requested buffer pool chunk size. Each buffer pool instance consists
of one or more chunks. */
extern ulong srv_buf_pool_chunk_unit;
-/** Number of locks to protect buf_pool->page_hash */
+/** Number of locks to protect buf_pool.page_hash */
extern ulong srv_n_page_hash_locks;
/** Scan depth for LRU flush batch i.e.: number of blocks scanned*/
extern ulong srv_LRU_scan_depth;
@@ -946,7 +946,7 @@ struct export_var_t{
#ifdef UNIV_DEBUG
ulint innodb_buffer_pool_pages_latched; /*!< Latched pages */
#endif /* UNIV_DEBUG */
- ulint innodb_buffer_pool_read_requests; /*!< buf_pool->stat.n_page_gets */
+ ulint innodb_buffer_pool_read_requests; /*!< buf_pool.stat.n_page_gets */
ulint innodb_buffer_pool_reads; /*!< srv_buf_pool_reads */
ulint innodb_buffer_pool_wait_free; /*!< srv_buf_pool_wait_free */
ulint innodb_buffer_pool_pages_flushed; /*!< srv_buf_pool_flushed */
@@ -965,10 +965,10 @@ struct export_var_t{
ulint innodb_os_log_pending_writes; /*!< srv_os_log_pending_writes */
ulint innodb_os_log_pending_fsyncs; /*!< fil_n_pending_log_flushes */
ulint innodb_page_size; /*!< srv_page_size */
- ulint innodb_pages_created; /*!< buf_pool->stat.n_pages_created */
- ulint innodb_pages_read; /*!< buf_pool->stat.n_pages_read*/
+ ulint innodb_pages_created; /*!< buf_pool.stat.n_pages_created */
+ ulint innodb_pages_read; /*!< buf_pool.stat.n_pages_read*/
ulint innodb_page0_read; /*!< srv_stats.page0_read */
- ulint innodb_pages_written; /*!< buf_pool->stat.n_pages_written */
+ ulint innodb_pages_written; /*!< buf_pool.stat.n_pages_written */
ulint innodb_row_lock_waits; /*!< srv_n_lock_wait_count */
ulint innodb_row_lock_current_waits; /*!< srv_n_lock_wait_current_count */
int64_t innodb_row_lock_time; /*!< srv_n_lock_wait_time
diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc
index c1c4a7906be..b38f1990356 100644
--- a/storage/innobase/lock/lock0lock.cc
+++ b/storage/innobase/lock/lock0lock.cc
@@ -537,8 +537,8 @@ void lock_sys_t::resize(ulint n_cells)
hash_table_free(old_hash);
/* need to update block->lock_hash_val */
- mutex_enter(&buf_pool->mutex);
- for (buf_page_t* bpage = UT_LIST_GET_FIRST(buf_pool->LRU);
+ mutex_enter(&buf_pool.mutex);
+ for (buf_page_t* bpage = UT_LIST_GET_FIRST(buf_pool.LRU);
bpage; bpage = UT_LIST_GET_NEXT(LRU, bpage)) {
if (buf_page_get_state(bpage) == BUF_BLOCK_FILE_PAGE) {
buf_block_t* block = reinterpret_cast<buf_block_t*>(
@@ -548,7 +548,7 @@ void lock_sys_t::resize(ulint n_cells)
bpage->id.space(), bpage->id.page_no());
}
}
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
mutex_exit(&mutex);
}
diff --git a/storage/innobase/log/log0log.cc b/storage/innobase/log/log0log.cc
index b475af8558d..54b5622cc3b 100644
--- a/storage/innobase/log/log0log.cc
+++ b/storage/innobase/log/log0log.cc
@@ -1849,7 +1849,7 @@ wait_suspend_loop:
ut_ad(!log_scrub_thread_active);
- if (!buf_pool) {
+ if (!buf_pool.is_initialised()) {
ut_ad(!srv_was_started);
} else if (ulint pending_io = buf_pool_check_no_pending_io()) {
if (srv_print_verbose_log && count > 600) {
diff --git a/storage/innobase/srv/srv0mon.cc b/storage/innobase/srv/srv0mon.cc
index d8fa1475930..836c5bde031 100644
--- a/storage/innobase/srv/srv0mon.cc
+++ b/storage/innobase/srv/srv0mon.cc
@@ -1643,7 +1643,7 @@ srv_mon_process_existing_counter(
/* innodb_buffer_pool_read_requests, the number of logical
read requests */
case MONITOR_OVLD_BUF_POOL_READ_REQUESTS:
- value = buf_pool->stat.n_page_gets;
+ value = buf_pool.stat.n_page_gets;
break;
/* innodb_buffer_pool_write_requests, the number of
@@ -1659,12 +1659,12 @@ srv_mon_process_existing_counter(
/* innodb_buffer_pool_read_ahead */
case MONITOR_OVLD_BUF_POOL_READ_AHEAD:
- value = buf_pool->stat.n_ra_pages_read;
+ value = buf_pool.stat.n_ra_pages_read;
break;
/* innodb_buffer_pool_read_ahead_evicted */
case MONITOR_OVLD_BUF_POOL_READ_AHEAD_EVICTED:
- value = buf_pool->stat.n_ra_pages_evicted;
+ value = buf_pool.stat.n_ra_pages_evicted;
break;
/* innodb_buffer_pool_pages_total */
@@ -1675,45 +1675,45 @@ srv_mon_process_existing_counter(
/* innodb_buffer_pool_pages_misc */
case MONITOR_OVLD_BUF_POOL_PAGE_MISC:
value = buf_pool_get_n_pages()
- - UT_LIST_GET_LEN(buf_pool->LRU)
- - UT_LIST_GET_LEN(buf_pool->free);
+ - UT_LIST_GET_LEN(buf_pool.LRU)
+ - UT_LIST_GET_LEN(buf_pool.free);
break;
/* innodb_buffer_pool_pages_data */
case MONITOR_OVLD_BUF_POOL_PAGES_DATA:
- value = UT_LIST_GET_LEN(buf_pool->LRU);
+ value = UT_LIST_GET_LEN(buf_pool.LRU);
break;
/* innodb_buffer_pool_bytes_data */
case MONITOR_OVLD_BUF_POOL_BYTES_DATA:
- value = buf_pool->stat.LRU_bytes
- + (UT_LIST_GET_LEN(buf_pool->unzip_LRU)
+ value = buf_pool.stat.LRU_bytes
+ + (UT_LIST_GET_LEN(buf_pool.unzip_LRU)
<< srv_page_size_shift);
break;
/* innodb_buffer_pool_pages_dirty */
case MONITOR_OVLD_BUF_POOL_PAGES_DIRTY:
- value = UT_LIST_GET_LEN(buf_pool->flush_list);
+ value = UT_LIST_GET_LEN(buf_pool.flush_list);
break;
/* innodb_buffer_pool_bytes_dirty */
case MONITOR_OVLD_BUF_POOL_BYTES_DIRTY:
- value = buf_pool->stat.flush_list_bytes;
+ value = buf_pool.stat.flush_list_bytes;
break;
/* innodb_buffer_pool_pages_free */
case MONITOR_OVLD_BUF_POOL_PAGES_FREE:
- value = UT_LIST_GET_LEN(buf_pool->free);
+ value = UT_LIST_GET_LEN(buf_pool.free);
break;
/* innodb_pages_created, the number of pages created */
case MONITOR_OVLD_PAGE_CREATED:
- value = buf_pool->stat.n_pages_created;
+ value = buf_pool.stat.n_pages_created;
break;
/* innodb_pages_written, the number of page written */
case MONITOR_OVLD_PAGES_WRITTEN:
- value = buf_pool->stat.n_pages_written;
+ value = buf_pool.stat.n_pages_written;
break;
/* innodb_index_pages_written, the number of index pages written */
@@ -1728,7 +1728,7 @@ srv_mon_process_existing_counter(
/* innodb_pages_read */
case MONITOR_OVLD_PAGES_READ:
- value = buf_pool->stat.n_pages_read;
+ value = buf_pool.stat.n_pages_read;
break;
/* innodb_pages0_read */
diff --git a/storage/innobase/srv/srv0srv.cc b/storage/innobase/srv/srv0srv.cc
index c3e6aadab3e..8d67da47172 100644
--- a/storage/innobase/srv/srv0srv.cc
+++ b/storage/innobase/srv/srv0srv.cc
@@ -233,7 +233,7 @@ const ulint srv_buf_pool_def_size = 128 * 1024 * 1024;
of one or more chunks. */
ulong srv_buf_pool_chunk_unit;
/** innodb_page_hash_locks (a debug-only parameter);
-number of locks to protect buf_pool->page_hash */
+number of locks to protect buf_pool.page_hash */
ulong srv_n_page_hash_locks = 16;
/** innodb_lru_scan_depth; number of blocks scanned in LRU flush batch */
ulong srv_LRU_scan_depth;
@@ -1278,7 +1278,7 @@ srv_printf_innodb_monitor(
const hash_table_t* table = btr_search_sys->hash_tables[i];
ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
- /* this is only used for buf_pool->page_hash */
+ /* this is only used for buf_pool.page_hash */
ut_ad(!table->heaps);
/* this is used for the adaptive hash index */
ut_ad(table->heap);
@@ -1450,7 +1450,7 @@ srv_export_innodb_status(void)
export_vars.innodb_data_written = srv_stats.data_written;
export_vars.innodb_buffer_pool_read_requests
- = buf_pool->stat.n_page_gets;
+ = buf_pool.stat.n_page_gets;
export_vars.innodb_buffer_pool_write_requests =
srv_stats.buf_pool_write_requests;
@@ -1464,30 +1464,30 @@ srv_export_innodb_status(void)
export_vars.innodb_buffer_pool_reads = srv_stats.buf_pool_reads;
export_vars.innodb_buffer_pool_read_ahead_rnd =
- buf_pool->stat.n_ra_pages_read_rnd;
+ buf_pool.stat.n_ra_pages_read_rnd;
export_vars.innodb_buffer_pool_read_ahead =
- buf_pool->stat.n_ra_pages_read;
+ buf_pool.stat.n_ra_pages_read;
export_vars.innodb_buffer_pool_read_ahead_evicted =
- buf_pool->stat.n_ra_pages_evicted;
+ buf_pool.stat.n_ra_pages_evicted;
export_vars.innodb_buffer_pool_pages_data =
- UT_LIST_GET_LEN(buf_pool->LRU);
+ UT_LIST_GET_LEN(buf_pool.LRU);
export_vars.innodb_buffer_pool_bytes_data =
- buf_pool->stat.LRU_bytes
- + (UT_LIST_GET_LEN(buf_pool->unzip_LRU)
+ buf_pool.stat.LRU_bytes
+ + (UT_LIST_GET_LEN(buf_pool.unzip_LRU)
<< srv_page_size_shift);
export_vars.innodb_buffer_pool_pages_dirty =
- UT_LIST_GET_LEN(buf_pool->flush_list);
+ UT_LIST_GET_LEN(buf_pool.flush_list);
export_vars.innodb_buffer_pool_bytes_dirty =
- buf_pool->stat.flush_list_bytes;
+ buf_pool.stat.flush_list_bytes;
export_vars.innodb_buffer_pool_pages_free =
- UT_LIST_GET_LEN(buf_pool->free);
+ UT_LIST_GET_LEN(buf_pool.free);
#ifdef UNIV_DEBUG
export_vars.innodb_buffer_pool_pages_latched =
@@ -1497,8 +1497,8 @@ srv_export_innodb_status(void)
export_vars.innodb_buffer_pool_pages_misc =
buf_pool_get_n_pages()
- - UT_LIST_GET_LEN(buf_pool->LRU)
- - UT_LIST_GET_LEN(buf_pool->free);
+ - UT_LIST_GET_LEN(buf_pool.LRU)
+ - UT_LIST_GET_LEN(buf_pool.free);
#ifdef HAVE_ATOMIC_BUILTINS
export_vars.innodb_have_atomic_builtins = 1;
@@ -1528,12 +1528,12 @@ srv_export_innodb_status(void)
export_vars.innodb_dblwr_writes = srv_stats.dblwr_writes;
- export_vars.innodb_pages_created = buf_pool->stat.n_pages_created;
+ export_vars.innodb_pages_created = buf_pool.stat.n_pages_created;
- export_vars.innodb_pages_read = buf_pool->stat.n_pages_read;
+ export_vars.innodb_pages_read = buf_pool.stat.n_pages_read;
export_vars.innodb_page0_read = srv_stats.page0_read;
- export_vars.innodb_pages_written = buf_pool->stat.n_pages_written;
+ export_vars.innodb_pages_written = buf_pool.stat.n_pages_written;
export_vars.innodb_row_lock_waits = srv_stats.n_lock_wait_count;
diff --git a/storage/innobase/srv/srv0start.cc b/storage/innobase/srv/srv0start.cc
index 43d49e817df..991d91319f4 100644
--- a/storage/innobase/srv/srv0start.cc
+++ b/storage/innobase/srv/srv0start.cc
@@ -1608,7 +1608,7 @@ dberr_t srv_start(bool create_new_db)
<< srv_buf_pool_size
<< ", chunk size = " << srv_buf_pool_chunk_unit;
- if (buf_pool_init()) {
+ if (buf_pool.create()) {
ib::error() << "Cannot allocate memory for the buffer pool";
return(srv_init_abort(DB_ERROR));
@@ -2665,11 +2665,7 @@ void innodb_shutdown()
pars_lexer_close();
recv_sys_close();
-
- ut_ad(buf_pool || !srv_was_started);
- if (buf_pool) {
- buf_pool_free();
- }
+ buf_pool.close();
sync_check_close();
diff --git a/storage/innobase/sync/sync0debug.cc b/storage/innobase/sync/sync0debug.cc
index 2053b36e61d..718a5b9489e 100644
--- a/storage/innobase/sync/sync0debug.cc
+++ b/storage/innobase/sync/sync0debug.cc
@@ -837,9 +837,9 @@ LatchDebug::check_order(
case SYNC_BUF_BLOCK:
- /* Either the thread must own the (buffer pool) buf_pool->mutex
+ /* Either the thread must own the (buffer pool) buf_pool.mutex
or it is allowed to latch only ONE of (buffer block)
- block->mutex or buf_pool->zip_mutex. */
+ block->mutex or buf_pool.zip_mutex. */
if (less(latches, level) != NULL) {
basic_check(latches, level, level - 1);