summaryrefslogtreecommitdiff
path: root/storage/innobase/buf/buf0rea.cc
diff options
context:
space:
mode:
Diffstat (limited to 'storage/innobase/buf/buf0rea.cc')
-rw-r--r--storage/innobase/buf/buf0rea.cc59
1 files changed, 29 insertions, 30 deletions
diff --git a/storage/innobase/buf/buf0rea.cc b/storage/innobase/buf/buf0rea.cc
index 72f1204b276..bf67a8513d7 100644
--- a/storage/innobase/buf/buf0rea.cc
+++ b/storage/innobase/buf/buf0rea.cc
@@ -41,7 +41,7 @@ Created 11/5/1995 Heikki Tuuri
#include "srv0start.h"
#include "srv0srv.h"
-/** If there are buf_pool->curr_size per the number below pending reads, then
+/** If there are buf_pool.curr_size per the number below pending reads, then
read-ahead is not done: this is to prevent flooding the buffer pool with
i/o-fixed buffer blocks */
#define BUF_READ_AHEAD_PEND_LIMIT 2
@@ -59,7 +59,7 @@ buf_read_page_handle_error(
== BUF_BLOCK_FILE_PAGE);
/* First unfix and release lock on the bpage */
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
rw_lock_t* hash_lock = buf_page_hash_lock_get(bpage->id);
rw_lock_x_lock(hash_lock);
mutex_enter(buf_page_get_mutex(bpage));
@@ -80,10 +80,11 @@ buf_read_page_handle_error(
/* remove the block from LRU list */
buf_LRU_free_one_page(bpage);
- ut_ad(buf_pool->n_pend_reads > 0);
- buf_pool->n_pend_reads--;
+ /* FIXME: use atomics */
+ ut_ad(buf_pool.n_pend_reads > 0);
+ buf_pool.n_pend_reads--;
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
}
/** Low-level function which reads a page asynchronously from a file to the
@@ -277,7 +278,7 @@ buf_read_ahead_random(
}
const ulint buf_read_ahead_random_area
- = buf_pool->read_ahead_area;
+ = buf_pool.read_ahead_area;
low = (page_id.page_no() / buf_read_ahead_random_area)
* buf_read_ahead_random_area;
@@ -314,12 +315,11 @@ buf_read_ahead_random(
return(0);
}
- mutex_enter(&buf_pool->mutex);
-
- if (buf_pool->n_pend_reads
- > buf_pool->curr_size / BUF_READ_AHEAD_PEND_LIMIT) {
- mutex_exit(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
+ if (buf_pool.n_pend_reads
+ > buf_pool.curr_size / BUF_READ_AHEAD_PEND_LIMIT) {
+ mutex_exit(&buf_pool.mutex);
return(0);
}
@@ -335,7 +335,7 @@ buf_read_ahead_random(
space->release();
if (skip) {
high = space->size;
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
goto read_ahead;
}
});
@@ -345,14 +345,14 @@ buf_read_ahead_random(
if (buf_page_is_accessed(bpage)
&& buf_page_peek_if_young(bpage)
&& ++recent_blocks
- >= 5 + buf_pool->read_ahead_area / 8) {
- mutex_exit(&buf_pool->mutex);
+ >= 5 + buf_pool.read_ahead_area / 8) {
+ mutex_exit(&buf_pool.mutex);
goto read_ahead;
}
}
}
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
/* Do nothing */
return(0);
@@ -414,7 +414,7 @@ read_ahead:
LRU policy decision. */
buf_LRU_stat_inc_io();
- buf_pool->stat.n_ra_pages_read_rnd += count;
+ buf_pool.stat.n_ra_pages_read_rnd += count;
srv_stats.buf_pool_reads.add(count);
return(count);
}
@@ -570,7 +570,7 @@ buf_read_ahead_linear(
}
const ulint buf_read_ahead_linear_area
- = buf_pool->read_ahead_area;
+ = buf_pool.read_ahead_area;
low = (page_id.page_no() / buf_read_ahead_linear_area)
* buf_read_ahead_linear_area;
high = (page_id.page_no() / buf_read_ahead_linear_area + 1)
@@ -608,11 +608,10 @@ buf_read_ahead_linear(
return(0);
}
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
- if (buf_pool->n_pend_reads
- > buf_pool->curr_size / BUF_READ_AHEAD_PEND_LIMIT) {
- mutex_exit(&buf_pool->mutex);
+ if (buf_pool.n_pend_reads
+ > buf_pool.curr_size / BUF_READ_AHEAD_PEND_LIMIT) {
return(0);
}
@@ -630,7 +629,7 @@ buf_read_ahead_linear(
/* How many out of order accessed pages can we ignore
when working out the access pattern for linear readahead */
threshold = ut_min(static_cast<ulint>(64 - srv_read_ahead_threshold),
- buf_pool->read_ahead_area);
+ buf_pool.read_ahead_area);
fail_count = 0;
@@ -661,7 +660,7 @@ buf_read_ahead_linear(
if (fail_count > threshold) {
/* Too many failures: return */
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
return(0);
}
@@ -676,7 +675,7 @@ buf_read_ahead_linear(
bpage = buf_page_hash_get(page_id);
if (bpage == NULL) {
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
return(0);
}
@@ -702,7 +701,7 @@ buf_read_ahead_linear(
pred_offset = fil_page_get_prev(frame);
succ_offset = fil_page_get_next(frame);
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
if ((page_id.page_no() == low)
&& (succ_offset == page_id.page_no() + 1)) {
@@ -800,7 +799,7 @@ buf_read_ahead_linear(
LRU policy decision. */
buf_LRU_stat_inc_io();
- buf_pool->stat.n_ra_pages_read += count;
+ buf_pool.stat.n_ra_pages_read += count;
return(count);
}
@@ -847,8 +846,8 @@ tablespace_deleted:
const page_id_t page_id(space_ids[i], page_nos[i]);
- while (buf_pool->n_pend_reads
- > buf_pool->curr_size / BUF_READ_AHEAD_PEND_LIMIT) {
+ while (buf_pool.n_pend_reads
+ > buf_pool.curr_size / BUF_READ_AHEAD_PEND_LIMIT) {
os_thread_sleep(500000);
}
@@ -916,7 +915,7 @@ buf_read_recv_pages(
ulint count = 0;
- while (buf_pool->n_pend_reads >= recv_n_pool_free_frames / 2) {
+ while (buf_pool.n_pend_reads >= recv_n_pool_free_frames / 2) {
os_aio_simulated_wake_handler_threads();
os_thread_sleep(10000);
@@ -928,7 +927,7 @@ buf_read_recv_pages(
ib::error()
<< "Waited for " << count / 100
<< " seconds for "
- << buf_pool->n_pend_reads
+ << buf_pool.n_pend_reads
<< " pending reads";
}
}