diff options
author | Inaam Rana <inaam.rana@oracle.com> | 2011-07-19 10:54:59 -0400 |
---|---|---|
committer | Inaam Rana <inaam.rana@oracle.com> | 2011-07-19 10:54:59 -0400 |
commit | 588a546bf1634612e3b03773e36529330436fc4c (patch) | |
tree | 83032f0a82682e5140d4e84af9609ad79261c723 /storage/innobase/buf | |
parent | a31dc61865b537da7e8a3602080920f87756fb02 (diff) | |
parent | 41258f8e6c46d06e817d55921c66309ae9701799 (diff) | |
download | mariadb-git-588a546bf1634612e3b03773e36529330436fc4c.tar.gz |
Merge from 5.1 the fix for Bug 12356373
Diffstat (limited to 'storage/innobase/buf')
-rw-r--r-- | storage/innobase/buf/buf0buf.c | 18 | ||||
-rw-r--r-- | storage/innobase/buf/buf0rea.c | 173 |
2 files changed, 186 insertions, 5 deletions
diff --git a/storage/innobase/buf/buf0buf.c b/storage/innobase/buf/buf0buf.c index fe311ebfd8d..648e53ac4a6 100644 --- a/storage/innobase/buf/buf0buf.c +++ b/storage/innobase/buf/buf0buf.c @@ -400,6 +400,7 @@ buf_get_total_stat( tot_stat->n_pages_read += buf_stat->n_pages_read; tot_stat->n_pages_written += buf_stat->n_pages_written; tot_stat->n_pages_created += buf_stat->n_pages_created; + tot_stat->n_ra_pages_read_rnd += buf_stat->n_ra_pages_read_rnd; tot_stat->n_ra_pages_read += buf_stat->n_ra_pages_read; tot_stat->n_ra_pages_evicted += buf_stat->n_ra_pages_evicted; tot_stat->n_pages_made_young += buf_stat->n_pages_made_young; @@ -2358,6 +2359,9 @@ loop2: } if (buf_read_page(space, zip_size, offset)) { + buf_read_ahead_random(space, zip_size, offset, + ibuf_inside(mtr)); + retries = 0; } else if (retries < BUF_PAGE_READ_MAX_RETRIES) { ++retries; @@ -4381,6 +4385,7 @@ buf_stats_aggregate_pool_info( total_info->n_pages_created += pool_info->n_pages_created; total_info->n_pages_written += pool_info->n_pages_written; total_info->n_page_gets += pool_info->n_page_gets; + total_info->n_ra_pages_read_rnd += pool_info->n_ra_pages_read_rnd; total_info->n_ra_pages_read += pool_info->n_ra_pages_read; total_info->n_ra_pages_evicted += pool_info->n_ra_pages_evicted; total_info->page_made_young_rate += pool_info->page_made_young_rate; @@ -4393,6 +4398,7 @@ buf_stats_aggregate_pool_info( total_info->page_read_delta += pool_info->page_read_delta; total_info->young_making_delta += pool_info->young_making_delta; total_info->not_young_making_delta += pool_info->not_young_making_delta; + total_info->pages_readahead_rnd_rate += pool_info->pages_readahead_rnd_rate; total_info->pages_readahead_rate += pool_info->pages_readahead_rate; total_info->pages_evicted_rate += pool_info->pages_evicted_rate; total_info->unzip_lru_len += pool_info->unzip_lru_len; @@ -4470,6 +4476,7 @@ buf_stats_get_pool_info( pool_info->n_page_gets = buf_pool->stat.n_page_gets; + pool_info->n_ra_pages_read_rnd = buf_pool->stat.n_ra_pages_read_rnd; pool_info->n_ra_pages_read = buf_pool->stat.n_ra_pages_read; pool_info->n_ra_pages_evicted = buf_pool->stat.n_ra_pages_evicted; @@ -4509,6 +4516,10 @@ buf_stats_get_pool_info( buf_pool->stat.n_pages_not_made_young - buf_pool->old_stat.n_pages_not_made_young; } + pool_info->pages_readahead_rnd_rate = + (buf_pool->stat.n_ra_pages_read_rnd + - buf_pool->old_stat.n_ra_pages_read_rnd) / time_elapsed; + pool_info->pages_readahead_rate = (buf_pool->stat.n_ra_pages_read @@ -4594,9 +4605,12 @@ buf_print_io_instance( /* Statistics about read ahead algorithm */ fprintf(file, "Pages read ahead %.2f/s," - " evicted without access %.2f/s\n", + " evicted without access %.2f/s," + " Random read ahead %.2f/s\n", + pool_info->pages_readahead_rate, - pool_info->pages_evicted_rate); + pool_info->pages_evicted_rate, + pool_info->pages_readahead_rnd_rate); /* Print some values to help us with visualizing what is happening with LRU eviction. */ diff --git a/storage/innobase/buf/buf0rea.c b/storage/innobase/buf/buf0rea.c index eeaa21ae9ef..da804a66b29 100644 --- a/storage/innobase/buf/buf0rea.c +++ b/storage/innobase/buf/buf0rea.c @@ -40,8 +40,10 @@ Created 11/5/1995 Heikki Tuuri #include "mysql/plugin.h" #include "mysql/service_thd_wait.h" -/** The linear read-ahead area size */ -#define BUF_READ_AHEAD_LINEAR_AREA BUF_READ_AHEAD_AREA +/** There must be at least this many pages in buf_pool in the area to start +a random read-ahead */ +#define BUF_READ_AHEAD_RANDOM_THRESHOLD(b) \ + (5 + BUF_READ_AHEAD_AREA(b) / 8) /** If there are buf_pool->curr_size per the number below pending reads, then read-ahead is not done: this is to prevent flooding the buffer pool with @@ -162,6 +164,171 @@ buf_read_page_low( } /********************************************************************//** +Applies a random read-ahead in buf_pool if there are at least a threshold +value of accessed pages from the random read-ahead area. Does not read any +page, not even the one at the position (space, offset), if the read-ahead +mechanism is not activated. NOTE 1: the calling thread may own latches on +pages: to avoid deadlocks this function must be written such that it cannot +end up waiting for these latches! NOTE 2: the calling thread must want +access to the page given: this rule is set to prevent unintended read-aheads +performed by ibuf routines, a situation which could result in a deadlock if +the OS does not support asynchronous i/o. +@return number of page read requests issued; NOTE that if we read ibuf +pages, it may happen that the page at the given page number does not +get read even if we return a positive value! +@return number of page read requests issued */ +UNIV_INTERN +ulint +buf_read_ahead_random( +/*==================*/ + ulint space, /*!< in: space id */ + ulint zip_size, /*!< in: compressed page size in bytes, + or 0 */ + ulint offset, /*!< in: page number of a page which + the current thread wants to access */ + ibool inside_ibuf) /*!< in: TRUE if we are inside ibuf + routine */ +{ + buf_pool_t* buf_pool = buf_pool_get(space, offset); + ib_int64_t tablespace_version; + ulint recent_blocks = 0; + ulint ibuf_mode; + ulint count; + ulint low, high; + ulint err; + ulint i; + const ulint buf_read_ahead_random_area + = BUF_READ_AHEAD_AREA(buf_pool); + + if (!srv_random_read_ahead) { + /* Disabled by user */ + return(0); + } + + if (srv_startup_is_before_trx_rollback_phase) { + /* No read-ahead to avoid thread deadlocks */ + return(0); + } + + if (ibuf_bitmap_page(zip_size, offset) + || trx_sys_hdr_page(space, offset)) { + + /* If it is an ibuf bitmap page or trx sys hdr, we do + no read-ahead, as that could break the ibuf page access + order */ + + return(0); + } + + /* Remember the tablespace version before we ask te tablespace size + below: if DISCARD + IMPORT changes the actual .ibd file meanwhile, we + do not try to read outside the bounds of the tablespace! */ + + tablespace_version = fil_space_get_version(space); + + low = (offset / buf_read_ahead_random_area) + * buf_read_ahead_random_area; + high = (offset / buf_read_ahead_random_area + 1) + * buf_read_ahead_random_area; + if (high > fil_space_get_size(space)) { + + high = fil_space_get_size(space); + } + + buf_pool_mutex_enter(buf_pool); + + if (buf_pool->n_pend_reads + > buf_pool->curr_size / BUF_READ_AHEAD_PEND_LIMIT) { + buf_pool_mutex_exit(buf_pool); + + return(0); + } + + /* Count how many blocks in the area have been recently accessed, + that is, reside near the start of the LRU list. */ + + for (i = low; i < high; i++) { + const buf_page_t* bpage = + buf_page_hash_get(buf_pool, space, i); + + if (bpage + && buf_page_is_accessed(bpage) + && buf_page_peek_if_young(bpage)) { + + recent_blocks++; + + if (recent_blocks + >= BUF_READ_AHEAD_RANDOM_THRESHOLD(buf_pool)) { + + buf_pool_mutex_exit(buf_pool); + goto read_ahead; + } + } + } + + buf_pool_mutex_exit(buf_pool); + /* Do nothing */ + return(0); + +read_ahead: + /* Read all the suitable blocks within the area */ + + if (inside_ibuf) { + ibuf_mode = BUF_READ_IBUF_PAGES_ONLY; + } else { + ibuf_mode = BUF_READ_ANY_PAGE; + } + + count = 0; + + for (i = low; i < high; i++) { + /* It is only sensible to do read-ahead in the non-sync aio + mode: hence FALSE as the first parameter */ + + if (!ibuf_bitmap_page(zip_size, i)) { + count += buf_read_page_low( + &err, FALSE, + ibuf_mode | OS_AIO_SIMULATED_WAKE_LATER, + space, zip_size, FALSE, + tablespace_version, i); + if (err == DB_TABLESPACE_DELETED) { + ut_print_timestamp(stderr); + fprintf(stderr, + " InnoDB: Warning: in random" + " readahead trying to access\n" + "InnoDB: tablespace %lu page %lu,\n" + "InnoDB: but the tablespace does not" + " exist or is just being dropped.\n", + (ulong) space, (ulong) i); + } + } + } + + /* In simulated aio we wake the aio handler threads only after + queuing all aio requests, in native aio the following call does + nothing: */ + + os_aio_simulated_wake_handler_threads(); + +#ifdef UNIV_DEBUG + if (buf_debug_prints && (count > 0)) { + fprintf(stderr, + "Random read-ahead space %lu offset %lu pages %lu\n", + (ulong) space, (ulong) offset, + (ulong) count); + } +#endif /* UNIV_DEBUG */ + + /* Read ahead is considered one I/O operation for the purpose of + LRU policy decision. */ + buf_LRU_stat_inc_io(); + + buf_pool->stat.n_ra_pages_read_rnd += count; + srv_buf_pool_reads += count; + return(count); +} + +/********************************************************************//** High-level function which reads a page asynchronously from a file to the buffer buf_pool if it is not already there. Sets the io_fix flag and sets an exclusive lock on the buffer frame. The flag is cleared and the x-lock @@ -257,7 +424,7 @@ buf_read_ahead_linear( ulint err; ulint i; const ulint buf_read_ahead_linear_area - = BUF_READ_AHEAD_LINEAR_AREA(buf_pool); + = BUF_READ_AHEAD_AREA(buf_pool); ulint threshold; if (UNIV_UNLIKELY(srv_startup_is_before_trx_rollback_phase)) { |