summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarko Mäkelä <marko.makela@mariadb.com>2020-12-08 14:57:51 +0200
committerMarko Mäkelä <marko.makela@mariadb.com>2020-12-08 19:07:36 +0200
commitf0c295e2de8c074c2ca72e19ff06e1d0e3ee6d2b (patch)
tree51b5a00fc982f687982af07f770de9b588cbc9c9
parent6859e80df7b7307c1c1f140867d6b54ec472e9ac (diff)
downloadmariadb-git-f0c295e2de8c074c2ca72e19ff06e1d0e3ee6d2b.tar.gz
MDEV-24369 Page cleaner sleeps despite innodb_max_dirty_pages_pct_lwm being exceeded
MDEV-24278 improved the page cleaner so that it will no longer wake up once per second on an idle server. However, with innodb_adaptive_flushing (the default) the function page_cleaner_flush_pages_recommendation() could initially return 0 even if there is work to do. af_get_pct_for_dirty(): Remove. Based on a comment here, it appears that an initial intention of innodb_max_dirty_pages_pct_lwm=0.0 (the default value) was to disable something. That ceased to hold in MDEV-23855: the value is a pure threshold; the page cleaner will not perform any work unless the threshold is exceeded. page_cleaner_flush_pages_recommendation(): Add the parameter dirty_blocks to ensure that buf_pool.flush_list will eventually be emptied.
-rw-r--r--storage/innobase/buf/buf0flu.cc50
1 files changed, 18 insertions, 32 deletions
diff --git a/storage/innobase/buf/buf0flu.cc b/storage/innobase/buf/buf0flu.cc
index bdb1f3404a1..7db8abd2285 100644
--- a/storage/innobase/buf/buf0flu.cc
+++ b/storage/innobase/buf/buf0flu.cc
@@ -1851,33 +1851,6 @@ ATTRIBUTE_COLD static void buf_flush_sync_for_checkpoint(lsn_t lsn)
}
/*********************************************************************//**
-Calculates if flushing is required based on number of dirty pages in
-the buffer pool.
-@param dirty_pct 100*flush_list.count / (LRU.count + free.count)
-@return percent of io_capacity to flush to manage dirty page ratio */
-static ulint af_get_pct_for_dirty(double dirty_pct)
-{
- ut_ad(srv_max_dirty_pages_pct_lwm <= srv_max_buf_pool_modified_pct);
-
- if (srv_max_dirty_pages_pct_lwm == 0) {
- /* The user has not set the option to preflush dirty
- pages as we approach the high water mark. */
- if (dirty_pct >= srv_max_buf_pool_modified_pct) {
- /* We have crossed the high water mark of dirty
- pages In this case we start flushing at 100% of
- innodb_io_capacity. */
- return(100);
- }
- } else {
- /* We should start flushing pages gradually. */
- return(static_cast<ulint>((dirty_pct * 100)
- / (srv_max_buf_pool_modified_pct + 1)));
- }
-
- return(0);
-}
-
-/*********************************************************************//**
Calculates if flushing is required based on redo generation rate.
@return percent of io_capacity to flush to manage redo space */
static
@@ -1911,9 +1884,11 @@ Based on various factors it decides if there is a need to do flushing.
@return number of pages recommended to be flushed
@param last_pages_in number of pages flushed in previous batch
@param oldest_lsn buf_pool.get_oldest_modification(0)
+@param dirty_blocks UT_LIST_GET_LEN(buf_pool.flush_list)
@param dirty_pct 100*flush_list.count / (LRU.count + free.count) */
static ulint page_cleaner_flush_pages_recommendation(ulint last_pages_in,
lsn_t oldest_lsn,
+ ulint dirty_blocks,
double dirty_pct)
{
static lsn_t prev_lsn = 0;
@@ -1925,16 +1900,24 @@ static ulint page_cleaner_flush_pages_recommendation(ulint last_pages_in,
ulint n_pages = 0;
const lsn_t cur_lsn = log_sys.get_lsn();
- ulint pct_for_dirty = af_get_pct_for_dirty(dirty_pct);
ut_ad(oldest_lsn <= cur_lsn);
ulint pct_for_lsn = af_get_pct_for_lsn(cur_lsn - oldest_lsn);
time_t curr_time = time(nullptr);
+ const double max_pct = srv_max_buf_pool_modified_pct;
if (!prev_lsn || !pct_for_lsn) {
prev_time = curr_time;
prev_lsn = cur_lsn;
- return ulint(double(pct_for_dirty) / 100.0
- * double(srv_io_capacity));
+ if (max_pct > 0.0) {
+ dirty_pct /= max_pct;
+ }
+
+ n_pages = ulint(dirty_pct * double(srv_io_capacity));
+ if (n_pages < dirty_blocks) {
+ n_pages= std::min<ulint>(srv_io_capacity, dirty_blocks);
+ }
+
+ return n_pages;
}
sum_pages += last_pages_in;
@@ -1983,8 +1966,8 @@ static ulint page_cleaner_flush_pages_recommendation(ulint last_pages_in,
sum_pages = 0;
}
- mysql_mutex_lock(&buf_pool.flush_list_mutex);
-
+ const ulint pct_for_dirty = static_cast<ulint>
+ (max_pct > 0.0 ? dirty_pct / max_pct : dirty_pct);
ulint pct_total = std::max(pct_for_dirty, pct_for_lsn);
/* Estimate pages to be flushed for the lsn progress */
@@ -1992,6 +1975,8 @@ static ulint page_cleaner_flush_pages_recommendation(ulint last_pages_in,
+ lsn_avg_rate * buf_flush_lsn_scan_factor;
ulint pages_for_lsn = 0;
+ mysql_mutex_lock(&buf_pool.flush_list_mutex);
+
for (buf_page_t* b = UT_LIST_GET_LAST(buf_pool.flush_list);
b != NULL;
b = UT_LIST_GET_PREV(list, b)) {
@@ -2173,6 +2158,7 @@ do_checkpoint:
}
else if (ulint n= page_cleaner_flush_pages_recommendation(last_pages,
oldest_lsn,
+ dirty_blocks,
dirty_pct))
{
page_cleaner.flush_pass++;