diff options
author | Eugene Kosov <eugene.kosov@mariadb.com> | 2020-05-19 14:19:20 +0300 |
---|---|---|
committer | Eugene Kosov <claprix@yandex.ru> | 2021-03-03 13:27:52 +0300 |
commit | d5b3efb65e0afbc0461f08cee35a1ac5ff13af0f (patch) | |
tree | 463f475feb2e8801753d4a9b2a3b9845d4d735b4 | |
parent | 01b44c054d608c7a7c3ee751a14782558d06275f (diff) | |
download | mariadb-git-bb-10.5-for-axel.tar.gz |
MDEV-21212 buf_page_get_gen -> buf_pool->stat.n_page_gets++ is a cpu waste (0.5-1%)bb-10.5-for-axel
This patch is inspired by wg21.link/P0261
Fix by introducing yet another distributed counter. This one uses
C++11 thread_local specifier to achive a perfect scalability.
In the following benchmark ShardedAtomic is a counter
with 64 shards we use now.
Single thread mode is faster because of a weak atomical semantics of a new
counter. Strong semantics is not needed because only one writer exists for
each counter.
Running ./counter-benchmark
Run on (128 X 200 MHz CPU s)
CPU Caches:
L1 Data 64 KiB (x128)
L1 Instruction 64 KiB (x128)
L2 Unified 512 KiB (x128)
L3 Unified 65536 KiB (x4)
Load Average: 0.00, 0.13, 0.22
------------------------------------------------------------------------
Benchmark Time CPU Iterations
------------------------------------------------------------------------
BM_SimpleAtomic/threads:1 6.16 ns 6.16 ns 113705842
BM_SimpleAtomic/threads:2 8.17 ns 16.3 ns 42821142
BM_SimpleAtomic/threads:4 21.6 ns 86.5 ns 12652320
BM_SimpleAtomic/threads:8 33.3 ns 266 ns 2148016
BM_SimpleAtomic/threads:16 133 ns 2130 ns 487520
BM_SimpleAtomic/threads:32 124 ns 3952 ns 217344
BM_SimpleAtomic/threads:64 157 ns 10026 ns 64000
BM_SimpleAtomic/threads:128 94.6 ns 12102 ns 128000
BM_SimpleAtomic/threads:256 60.4 ns 15446 ns 256000
BM_SimpleAtomic/threads:512 26.8 ns 13714 ns 512000
BM_SimpleAtomic/threads:1024 14.5 ns 14889 ns 102400
BM_SimpleAtomic/threads:2048 5.65 ns 11566 ns 204800
BM_ShardedAtomic/threads:1 6.16 ns 6.16 ns 113679824
BM_ShardedAtomic/threads:2 3.08 ns 6.16 ns 113721534
BM_ShardedAtomic/threads:4 1.54 ns 6.16 ns 113711620
BM_ShardedAtomic/threads:8 0.769 ns 6.16 ns 113712616
BM_ShardedAtomic/threads:16 0.385 ns 6.16 ns 113628336
BM_ShardedAtomic/threads:32 0.192 ns 6.16 ns 113594432
BM_ShardedAtomic/threads:64 0.228 ns 14.6 ns 52447616
BM_ShardedAtomic/threads:128 0.195 ns 25.0 ns 30016768
BM_ShardedAtomic/threads:256 0.179 ns 45.8 ns 25600000
BM_ShardedAtomic/threads:512 0.122 ns 62.6 ns 58018304
BM_ShardedAtomic/threads:1024 0.073 ns 74.9 ns 10240000
BM_ShardedAtomic/threads:2048 0.054 ns 110 ns 20480000
BM_TlsCounter/threads:1 2.71 ns 2.71 ns 259471815
BM_TlsCounter/threads:2 1.35 ns 2.69 ns 255314508
BM_TlsCounter/threads:4 0.674 ns 2.70 ns 259838580
BM_TlsCounter/threads:8 0.337 ns 2.69 ns 259870616
BM_TlsCounter/threads:16 0.169 ns 2.71 ns 257862656
BM_TlsCounter/threads:32 0.084 ns 2.70 ns 259839840
BM_TlsCounter/threads:64 0.043 ns 2.72 ns 257231104
BM_TlsCounter/threads:128 0.021 ns 2.70 ns 258558848
BM_TlsCounter/threads:256 0.011 ns 2.70 ns 257301248
BM_TlsCounter/threads:512 0.005 ns 2.71 ns 257305088
BM_TlsCounter/threads:1024 0.003 ns 2.72 ns 246151168
BM_TlsCounter/threads:2048 0.001 ns 2.74 ns 235972608
Several counters were translated to a new one because I suppose
they could be slow.
-rw-r--r-- | include/distributable_counter.h | 237 | ||||
-rw-r--r-- | storage/innobase/btr/btr0sea.cc | 4 | ||||
-rw-r--r-- | storage/innobase/buf/buf0buf.cc | 17 | ||||
-rw-r--r-- | storage/innobase/handler/ha_innodb.cc | 6 | ||||
-rw-r--r-- | storage/innobase/include/buf0buf.h | 9 | ||||
-rw-r--r-- | storage/innobase/include/srv0srv.h | 35 | ||||
-rw-r--r-- | storage/innobase/row/row0mysql.cc | 13 | ||||
-rw-r--r-- | storage/innobase/srv/srv0mon.cc | 10 | ||||
-rw-r--r-- | storage/innobase/srv/srv0srv.cc | 54 |
9 files changed, 311 insertions, 74 deletions
diff --git a/include/distributable_counter.h b/include/distributable_counter.h new file mode 100644 index 00000000000..ff37fb5c5d3 --- /dev/null +++ b/include/distributable_counter.h @@ -0,0 +1,237 @@ +/***************************************************************************** + +Copyright (c) 2021 MariaDB Corporation. + +This program is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free Software +Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along with +this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA + +*****************************************************************************/ + +#ifndef DISTRIBUTABLE_COUNTER_H +#define DISTRIBUTABLE_COUNTER_H + +#include <cassert> +#include <cstddef> + +#include <array> +#include <atomic> +#include <mutex> + +#include "ilist.h" +#include "my_attribute.h" + +namespace detail +{ + +template <typename Integral> class strong_bumper +{ +public: + strong_bumper(std::atomic<Integral> &value) noexcept : value_(value) {} + + void operator+=(Integral amount) noexcept { value_.fetch_add(amount); } + void operator-=(Integral amount) noexcept { value_.fetch_sub(amount); } + void operator++() noexcept { *this+= 1; } + void operator++(int) noexcept { *this+= 1; } + void operator--() noexcept { *this-= 1; } + void operator--(int) noexcept { *this-= 1; } + +private: + std::atomic<Integral> &value_; +}; + +template <typename Integral> class weak_bumper +{ +public: + weak_bumper(std::atomic<Integral> &value) noexcept : value_(value) {} + + void operator+=(Integral amount) noexcept + { + value_.store(value_.load(std::memory_order_relaxed) + amount, + std::memory_order_relaxed); + } + void operator-=(Integral amount) noexcept + { + value_.store(value_.load(std::memory_order_relaxed) - amount, + std::memory_order_relaxed); + } + void operator++() noexcept { *this+= 1; } + void operator++(int) noexcept { *this+= 1; } + void operator--() noexcept { *this-= 1; } + void operator--(int) noexcept { *this-= 1; } + +private: + std::atomic<Integral> &value_; +}; + +}; // namespace detail + +template <typename Integral, size_t Size> class counter_broker_array; + +template <typename Integral, size_t Size> class distributable_counter_array +{ + distributable_counter_array(const distributable_counter_array &)= delete; + distributable_counter_array & + operator=(const distributable_counter_array &)= delete; + distributable_counter_array(distributable_counter_array &&)= delete; + distributable_counter_array & + operator=(distributable_counter_array &&)= delete; + +public: + using size_type= std::size_t; + + distributable_counter_array() noexcept + { + for (auto &counter : counters_) + counter.store(0, std::memory_order_relaxed); + } + + __attribute__((warn_unused_result)) detail::strong_bumper<Integral> + operator[](size_type idx) noexcept + { + assert(idx < size()); + return detail::strong_bumper<Integral>(counters_[idx]); + } + + __attribute__((warn_unused_result)) size_type size() const noexcept + { + return counters_.size(); + } + __attribute__((warn_unused_result)) Integral load(size_type idx); + Integral exchange(size_type idx, Integral to); + +private: + std::array<std::atomic<Integral>, Size> counters_; + ilist<counter_broker_array<Integral, Size>> brokers_; // guarded by mutex_ + std::mutex mutex_; + + friend counter_broker_array<Integral, Size>; +}; + +template <typename Integral, size_t Size> +class counter_broker_array : public ilist_node<> +{ + counter_broker_array(const counter_broker_array &)= delete; + counter_broker_array &operator=(const counter_broker_array &)= delete; + counter_broker_array(counter_broker_array &&)= delete; + counter_broker_array &operator=(counter_broker_array &&)= delete; + +public: + using size_type= std::size_t; + + counter_broker_array(distributable_counter_array<Integral, Size> &array) + : base_(array) + { + for (auto &counter : counters_) + counter.store(0, std::memory_order_relaxed); + + std::lock_guard<std::mutex> _(array.mutex_); + array.brokers_.push_back(*this); + } + + ~counter_broker_array() noexcept + { + // A reader of a distributable_counter_array may access this object while + // it's being destroyed. To prevent a double sum of a counter we use an + // exchange() here. + for (size_type i= 0; i < size(); i++) + base_[i]+= counters_[i].exchange(0, std::memory_order_relaxed); + + std::lock_guard<std::mutex> _(base_.mutex_); + base_.brokers_.remove(*this); + } + + __attribute__((warn_unused_result)) detail::weak_bumper<Integral> + operator[](size_type idx) noexcept + { + assert(idx < size()); + return detail::weak_bumper<Integral>(counters_[idx]); + } + + __attribute__((warn_unused_result)) size_type size() const noexcept + { + return counters_.size(); + } + +private: + std::array<std::atomic<Integral>, Size> counters_; + distributable_counter_array<Integral, Size> &base_; + + friend class distributable_counter_array<Integral, Size>; +}; + +template <typename Integral, size_t Size> +Integral distributable_counter_array<Integral, Size>::load(size_type idx) +{ + assert(idx < size()); + + Integral accumulator= 0; + { + std::lock_guard<std::mutex> _(mutex_); + for (const auto &broker : brokers_) + accumulator+= broker.counters_[idx].load(std::memory_order_relaxed); + } + return accumulator + counters_[idx].load(std::memory_order_relaxed); +} + +template <typename Integral, size_t Size> +Integral distributable_counter_array<Integral, Size>::exchange(size_type idx, + Integral to) +{ + assert(idx < size()); + + Integral accumulator= 0; + { + std::lock_guard<std::mutex> _(mutex_); + for (const auto &broker : brokers_) + accumulator+= + broker.counters_[idx].exchange(0, std::memory_order_relaxed); + } + + return accumulator + counters_[idx].exchange(to, std::memory_order_relaxed); +} + +// Uses TLS to automatically distribute counter over any number of threads. +// Writing is a weakly atomical increment. +// Reading of values is O(N) where N is a number of threads. +// Thus, counter is optimized for writing and pessimized for reading. +template <typename Integral, size_t Size> class singleton_counter_array +{ +public: + __attribute__((warn_unused_result)) detail::weak_bumper<Integral> + operator[](size_t idx) + { + assert(idx < Size); + return local()[idx]; + } + + __attribute__((warn_unused_result)) Integral load(size_t idx) + { + return global_.load(idx); + } + Integral exchange(size_t idx, Integral to) + { + return global_.exchange(idx, to); + } + +private: + counter_broker_array<Integral, Size> &local() + { + // Meyers' singleton ensures that the broker will be initialized on the + // first access and thus will not slow down thread creation. + thread_local counter_broker_array<Integral, Size> broker(global_); + return broker; + } + + distributable_counter_array<Integral, Size> global_; +}; + +#endif diff --git a/storage/innobase/btr/btr0sea.cc b/storage/innobase/btr/btr0sea.cc index 5c8f97b7bcf..b63fe85054f 100644 --- a/storage/innobase/btr/btr0sea.cc +++ b/storage/innobase/btr/btr0sea.cc @@ -1131,7 +1131,7 @@ got_no_latch: } mtr->memo_push(block, fix_type); - buf_pool.stat.n_page_gets++; + COUNTER(N_PAGE_GETS)++; rw_lock_s_unlock(&part->latch); @@ -1221,7 +1221,7 @@ fail_and_release_page: #endif /* Increment the page get statistics though we did not really fix the page: for user info only */ - ++buf_pool.stat.n_page_gets; + ++COUNTER(N_PAGE_GETS); if (!ahi_latch) { buf_page_make_young_if_needed(&block->page); diff --git a/storage/innobase/buf/buf0buf.cc b/storage/innobase/buf/buf0buf.cc index d531be43a8b..066045afeb4 100644 --- a/storage/innobase/buf/buf0buf.cc +++ b/storage/innobase/buf/buf0buf.cc @@ -2506,7 +2506,7 @@ void buf_page_free(fil_space_t *space, uint32_t page, mtr_t *mtr, ) mtr->add_freed_offset(space, page); - buf_pool.stat.n_page_gets++; + COUNTER(N_PAGE_GETS)++; const page_id_t page_id(space->id, page); const ulint fold= page_id.fold(); page_hash_latch *hash_lock= buf_pool.page_hash.lock<false>(fold); @@ -2547,7 +2547,7 @@ buf_page_t* buf_page_get_zip(const page_id_t page_id, ulint zip_size) { ut_ad(zip_size); ut_ad(ut_is_2pow(zip_size)); - buf_pool.stat.n_page_gets++; + COUNTER(N_PAGE_GETS)++; bool discard_attempted= false; const ulint fold= page_id.fold(); @@ -2946,7 +2946,7 @@ buf_page_get_low( ut_ad(!mtr || !ibuf_inside(mtr) || ibuf_page_low(page_id, zip_size, FALSE, file, line, NULL)); - buf_pool.stat.n_page_gets++; + COUNTER(N_PAGE_GETS)++; loop: buf_block_t* fix_block; block = guess; @@ -3567,7 +3567,7 @@ buf_page_optimistic_get( ut_ad(block->page.buf_fix_count()); ut_ad(block->page.state() == BUF_BLOCK_FILE_PAGE); - buf_pool.stat.n_page_gets++; + COUNTER(N_PAGE_GETS)++; return(TRUE); } @@ -3629,7 +3629,7 @@ buf_page_try_get_func( ut_ad(bpage->id() == page_id); buf_block_dbg_add_level(block, SYNC_NO_ORDER_CHECK); - buf_pool.stat.n_page_gets++; + COUNTER(N_PAGE_GETS)++; return block; } @@ -4265,6 +4265,7 @@ void buf_refresh_io_stats() { buf_pool.last_printout_time = time(NULL); buf_pool.old_stat = buf_pool.stat; + buf_pool.old_n_page_gets = COUNTER_LOAD(N_PAGE_GETS); } /** Invalidate all pages in the buffer pool. @@ -4571,7 +4572,7 @@ void buf_stats_get_pool_info(buf_pool_info_t *pool_info) pool_info->n_pages_written = buf_pool.stat.n_pages_written; - pool_info->n_page_gets = buf_pool.stat.n_page_gets; + pool_info->n_page_gets = COUNTER_LOAD(N_PAGE_GETS); pool_info->n_ra_pages_read_rnd = buf_pool.stat.n_ra_pages_read_rnd; pool_info->n_ra_pages_read = buf_pool.stat.n_ra_pages_read; @@ -4603,8 +4604,8 @@ void buf_stats_get_pool_info(buf_pool_info_t *pool_info) - buf_pool.old_stat.n_pages_written) / time_elapsed; - pool_info->n_page_get_delta = buf_pool.stat.n_page_gets - - buf_pool.old_stat.n_page_gets; + pool_info->n_page_get_delta + = COUNTER_LOAD(N_PAGE_GETS) - buf_pool.old_n_page_gets; if (pool_info->n_page_get_delta) { pool_info->page_read_delta = buf_pool.stat.n_pages_read diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index f870cf7793b..a58831e65d3 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -8785,8 +8785,7 @@ ha_innobase::index_read( srv_stats.n_system_rows_read.add( thd_get_thread_id(m_prebuilt->trx->mysql_thd), 1); } else { - srv_stats.n_rows_read.add( - thd_get_thread_id(m_prebuilt->trx->mysql_thd), 1); + COUNTER(N_ROWS_READ)++; } break; @@ -9038,8 +9037,7 @@ ha_innobase::general_fetch( srv_stats.n_system_rows_read.add( thd_get_thread_id(trx->mysql_thd), 1); } else { - srv_stats.n_rows_read.add( - thd_get_thread_id(trx->mysql_thd), 1); + COUNTER(N_ROWS_READ)++; } break; case DB_RECORD_NOT_FOUND: diff --git a/storage/innobase/include/buf0buf.h b/storage/innobase/include/buf0buf.h index 12fb139f1bf..6c2833762b8 100644 --- a/storage/innobase/include/buf0buf.h +++ b/storage/innobase/include/buf0buf.h @@ -111,7 +111,7 @@ struct buf_pool_info_t ulint n_pages_read; /*!< buf_pool.n_pages_read */ ulint n_pages_created; /*!< buf_pool.n_pages_created */ ulint n_pages_written; /*!< buf_pool.n_pages_written */ - ulint n_page_gets; /*!< buf_pool.n_page_gets */ + ulint n_page_gets; /*!< COUNTER(N_PAGE_GETS) */ ulint n_ra_pages_read_rnd; /*!< buf_pool.n_ra_pages_read_rnd, number of pages readahead */ ulint n_ra_pages_read; /*!< buf_pool.n_ra_pages_read, number @@ -1332,12 +1332,6 @@ struct buf_buddy_free_t { /** @brief The buffer pool statistics structure. */ struct buf_pool_stat_t{ - ulint n_page_gets; /*!< number of page gets performed; - also successful searches through - the adaptive hash index are - counted as page gets; this field - is NOT protected by the buffer - pool mutex */ ulint n_pages_read; /*!< number read operations */ ulint n_pages_written;/*!< number write operations */ ulint n_pages_created;/*!< number of pages created @@ -1930,6 +1924,7 @@ public: indexed by block size */ buf_pool_stat_t stat; /*!< current statistics */ buf_pool_stat_t old_stat; /*!< old statistics */ + size_t old_n_page_gets= 0; /* @} */ diff --git a/storage/innobase/include/srv0srv.h b/storage/innobase/include/srv0srv.h index 7dcbf2cd1cb..1714ec709d0 100644 --- a/storage/innobase/include/srv0srv.h +++ b/storage/innobase/include/srv0srv.h @@ -48,9 +48,20 @@ Created 10/10/1995 Heikki Tuuri #include "mysql/psi/mysql_stage.h" #include "mysql/psi/psi.h" +#include "distributable_counter.h" #include <tpool.h> #include <memory> +enum class counters_t : size_t +{ + N_ROWS_READ, + N_ROWS_UPDATED, + N_ROWS_DELETED, + N_ROWS_INSERTED, + N_PAGE_GETS, + SIZE, +}; + /** Global counters used inside InnoDB. */ struct srv_stats_t { @@ -132,18 +143,6 @@ struct srv_stats_t MY_ALIGNED(CACHE_LINE_SIZE) Atomic_counter<ulint> n_lock_wait_current_count; - /** Number of rows read. */ - ulint_ctr_64_t n_rows_read; - - /** Number of rows updated */ - ulint_ctr_64_t n_rows_updated; - - /** Number of rows deleted */ - ulint_ctr_64_t n_rows_deleted; - - /** Number of rows inserted */ - ulint_ctr_64_t n_rows_inserted; - /** Number of system rows read. */ ulint_ctr_64_t n_system_rows_read; @@ -480,6 +479,16 @@ extern struct export_var_t export_vars; /** Global counters */ extern srv_stats_t srv_stats; +/** Global singleton counters */ +extern singleton_counter_array<size_t, + static_cast<size_t>(counters_t::SIZE)> + counter_array; + +#define COUNTER(IDX) counter_array[static_cast<size_t>(counters_t::IDX)] + +#define COUNTER_LOAD(IDX) \ + counter_array.load(static_cast<size_t>(counters_t::IDX)) + /** Simulate compression failures. */ extern uint srv_simulate_comp_failures; @@ -709,7 +718,7 @@ struct export_var_t{ ulint innodb_buffer_pool_pages_made_not_young; ulint innodb_buffer_pool_pages_made_young; ulint innodb_buffer_pool_pages_old; - ulint innodb_buffer_pool_read_requests; /*!< buf_pool.stat.n_page_gets */ + ulint innodb_buffer_pool_read_requests; /*!< COUNTER(N_PAGE_GETS) */ ulint innodb_buffer_pool_reads; /*!< srv_buf_pool_reads */ ulint innodb_buffer_pool_write_requests;/*!< srv_stats.buf_pool_write_requests */ ulint innodb_buffer_pool_read_ahead_rnd;/*!< srv_read_ahead_rnd */ diff --git a/storage/innobase/row/row0mysql.cc b/storage/innobase/row/row0mysql.cc index 096407611d8..67d3d869d9a 100644 --- a/storage/innobase/row/row0mysql.cc +++ b/storage/innobase/row/row0mysql.cc @@ -1519,7 +1519,7 @@ error_exit: if (table->is_system_db) { srv_stats.n_system_rows_inserted.inc(size_t(trx->id)); } else { - srv_stats.n_rows_inserted.inc(size_t(trx->id)); + COUNTER(N_ROWS_INSERTED)++; } /* Not protected by dict_sys.mutex for performance @@ -1901,7 +1901,7 @@ row_update_for_mysql(row_prebuilt_t* prebuilt) if (table->is_system_db) { srv_stats.n_system_rows_deleted.inc(size_t(trx->id)); } else { - srv_stats.n_rows_deleted.inc(size_t(trx->id)); + COUNTER(N_ROWS_DELETED)++; } update_statistics = !srv_stats_include_delete_marked; @@ -1909,7 +1909,7 @@ row_update_for_mysql(row_prebuilt_t* prebuilt) if (table->is_system_db) { srv_stats.n_system_rows_updated.inc(size_t(trx->id)); } else { - srv_stats.n_rows_updated.inc(size_t(trx->id)); + COUNTER(N_ROWS_UPDATED)++; } update_statistics @@ -2206,8 +2206,7 @@ static dberr_t row_update_vers_insert(que_thr_t* thr, upd_node_t* node) goto exit; case DB_SUCCESS: - srv_stats.n_rows_inserted.inc( - static_cast<size_t>(trx->id)); + COUNTER(N_ROWS_INSERTED)++; dict_stats_update_if_needed(table, *trx); goto exit; } @@ -2293,11 +2292,11 @@ row_update_cascade_for_mysql( dict_table_n_rows_dec(node->table); stats = !srv_stats_include_delete_marked; - srv_stats.n_rows_deleted.inc(size_t(trx->id)); + COUNTER(N_ROWS_DELETED)++; } else { stats = !(node->cmpl_info & UPD_NODE_NO_ORD_CHANGE); - srv_stats.n_rows_updated.inc(size_t(trx->id)); + COUNTER(N_ROWS_UPDATED)++; } if (stats) { diff --git a/storage/innobase/srv/srv0mon.cc b/storage/innobase/srv/srv0mon.cc index f13af13c8e3..3f7999fc7f2 100644 --- a/storage/innobase/srv/srv0mon.cc +++ b/storage/innobase/srv/srv0mon.cc @@ -1554,7 +1554,7 @@ srv_mon_process_existing_counter( /* innodb_buffer_pool_read_requests, the number of logical read requests */ case MONITOR_OVLD_BUF_POOL_READ_REQUESTS: - value = buf_pool.stat.n_page_gets; + value = COUNTER_LOAD(N_PAGE_GETS); break; /* innodb_buffer_pool_write_requests, the number of @@ -1779,22 +1779,22 @@ srv_mon_process_existing_counter( /* innodb_rows_read */ case MONITOR_OLVD_ROW_READ: - value = srv_stats.n_rows_read; + value = COUNTER_LOAD(N_ROWS_READ); break; /* innodb_rows_inserted */ case MONITOR_OLVD_ROW_INSERTED: - value = srv_stats.n_rows_inserted; + value = COUNTER_LOAD(N_ROWS_INSERTED); break; /* innodb_rows_deleted */ case MONITOR_OLVD_ROW_DELETED: - value = srv_stats.n_rows_deleted; + value = COUNTER_LOAD(N_ROWS_DELETED); break; /* innodb_rows_updated */ case MONITOR_OLVD_ROW_UPDTATED: - value = srv_stats.n_rows_updated; + value = COUNTER_LOAD(N_ROWS_UPDATED); break; /* innodb_system_rows_read */ diff --git a/storage/innobase/srv/srv0srv.cc b/storage/innobase/srv/srv0srv.cc index d1f34adeeae..1d8a3ce6bb2 100644 --- a/storage/innobase/srv/srv0srv.cc +++ b/storage/innobase/srv/srv0srv.cc @@ -285,6 +285,9 @@ srv_stats_t srv_stats; /* structure to pass status variables to MySQL */ export_var_t export_vars; +singleton_counter_array<size_t, static_cast<size_t>(counters_t::SIZE)> + counter_array; + /** Normally 0. When nonzero, skip some phases of crash recovery, starting from SRV_FORCE_IGNORE_CORRUPT, so that data can be recovered by SELECT or mysqldump. When this is nonzero, we do not allow any user @@ -756,10 +759,10 @@ static void srv_refresh_innodb_monitor_stats(time_t current_time) buf_refresh_io_stats(); - srv_n_rows_inserted_old = srv_stats.n_rows_inserted; - srv_n_rows_updated_old = srv_stats.n_rows_updated; - srv_n_rows_deleted_old = srv_stats.n_rows_deleted; - srv_n_rows_read_old = srv_stats.n_rows_read; + srv_n_rows_inserted_old = COUNTER_LOAD(N_ROWS_INSERTED); + srv_n_rows_updated_old = COUNTER_LOAD(N_ROWS_UPDATED); + srv_n_rows_deleted_old = COUNTER_LOAD(N_ROWS_DELETED); + srv_n_rows_read_old = COUNTER_LOAD(N_ROWS_READ); srv_n_system_rows_inserted_old = srv_stats.n_system_rows_inserted; srv_n_system_rows_updated_old = srv_stats.n_system_rows_updated; @@ -943,33 +946,28 @@ srv_printf_innodb_monitor( srv_main_thread_id, srv_main_thread_op_info); fprintf(file, - "Number of rows inserted " ULINTPF - ", updated " ULINTPF - ", deleted " ULINTPF - ", read " ULINTPF "\n", - (ulint) srv_stats.n_rows_inserted, - (ulint) srv_stats.n_rows_updated, - (ulint) srv_stats.n_rows_deleted, - (ulint) srv_stats.n_rows_read); + "Number of rows inserted %zu, updated %zu, deleted %zu, read " + "%zu\n", + COUNTER_LOAD(N_ROWS_INSERTED), COUNTER_LOAD(N_ROWS_UPDATED), + COUNTER_LOAD(N_ROWS_DELETED), COUNTER_LOAD(N_ROWS_READ)); fprintf(file, "%.2f inserts/s, %.2f updates/s," " %.2f deletes/s, %.2f reads/s\n", - static_cast<double>(srv_stats.n_rows_inserted + static_cast<double>(COUNTER_LOAD(N_ROWS_INSERTED) - srv_n_rows_inserted_old) / time_elapsed, - static_cast<double>(srv_stats.n_rows_updated + static_cast<double>(COUNTER_LOAD(N_ROWS_UPDATED) - srv_n_rows_updated_old) / time_elapsed, - static_cast<double>(srv_stats.n_rows_deleted + static_cast<double>(COUNTER_LOAD(N_ROWS_DELETED) - srv_n_rows_deleted_old) / time_elapsed, - static_cast<double>(srv_stats.n_rows_read + static_cast<double>(COUNTER_LOAD(N_ROWS_READ) - srv_n_rows_read_old) / time_elapsed); fprintf(file, - "Number of system rows inserted " ULINTPF - ", updated " ULINTPF ", deleted " ULINTPF - ", read " ULINTPF "\n", + "Number of system rows inserted %zu, updated %zu, deleted " + "%zu, read %zu\n", (ulint) srv_stats.n_system_rows_inserted, (ulint) srv_stats.n_system_rows_updated, (ulint) srv_stats.n_system_rows_deleted, @@ -989,10 +987,10 @@ srv_printf_innodb_monitor( static_cast<double>(srv_stats.n_system_rows_read - srv_n_system_rows_read_old) / time_elapsed); - srv_n_rows_inserted_old = srv_stats.n_rows_inserted; - srv_n_rows_updated_old = srv_stats.n_rows_updated; - srv_n_rows_deleted_old = srv_stats.n_rows_deleted; - srv_n_rows_read_old = srv_stats.n_rows_read; + srv_n_rows_inserted_old = COUNTER_LOAD(N_ROWS_INSERTED); + srv_n_rows_updated_old = COUNTER_LOAD(N_ROWS_UPDATED); + srv_n_rows_deleted_old = COUNTER_LOAD(N_ROWS_DELETED); + srv_n_rows_read_old = COUNTER_LOAD(N_ROWS_READ); srv_n_system_rows_inserted_old = srv_stats.n_system_rows_inserted; srv_n_system_rows_updated_old = srv_stats.n_system_rows_updated; srv_n_system_rows_deleted_old = srv_stats.n_system_rows_deleted; @@ -1070,7 +1068,7 @@ srv_export_innodb_status(void) export_vars.innodb_data_written = srv_stats.data_written + dblwr; export_vars.innodb_buffer_pool_read_requests - = buf_pool.stat.n_page_gets; + = COUNTER_LOAD(N_PAGE_GETS); export_vars.innodb_buffer_pool_write_requests = srv_stats.buf_pool_write_requests; @@ -1166,13 +1164,13 @@ srv_export_innodb_status(void) export_vars.innodb_row_lock_time_max = lock_sys.n_lock_max_wait_time / 1000; - export_vars.innodb_rows_read = srv_stats.n_rows_read; + export_vars.innodb_rows_read = COUNTER_LOAD(N_ROWS_READ); - export_vars.innodb_rows_inserted = srv_stats.n_rows_inserted; + export_vars.innodb_rows_inserted = COUNTER_LOAD(N_ROWS_INSERTED); - export_vars.innodb_rows_updated = srv_stats.n_rows_updated; + export_vars.innodb_rows_updated = COUNTER_LOAD(N_ROWS_UPDATED); - export_vars.innodb_rows_deleted = srv_stats.n_rows_deleted; + export_vars.innodb_rows_deleted = COUNTER_LOAD(N_ROWS_DELETED); export_vars.innodb_system_rows_read = srv_stats.n_system_rows_read; |