diff options
author | Marko Mäkelä <marko.makela@mariadb.com> | 2020-05-18 14:49:44 +0300 |
---|---|---|
committer | Marko Mäkelä <marko.makela@mariadb.com> | 2020-05-18 15:02:55 +0300 |
commit | 386f168ab340791631e4d8979c4370ecef7e6b05 (patch) | |
tree | 6dd67b95b02ef42b7d76b138f31554a7cc433a9e /storage/innobase | |
parent | fde94b4cd6c916f118ccb2785c09dafef391298c (diff) | |
download | mariadb-git-386f168ab340791631e4d8979c4370ecef7e6b05.tar.gz |
MDEV-22456 after-merge fix: introduce Atomic_relaxed
In the merge 9e6e43551fc61bc34152f8d60f5d72f0d3814787
we made Atomic_counter a more generic wrapper of std::atomic
so that dict_index_t would support the implicit assignment operator.
It is better to revert the changes to Atomic_counter and
instead introduce Atomic_relaxed as a generic wrapper to std::atomic.
Unlike Atomic_counter, we will not define operator++, operator+=
or similar, because we want to make the operations more explicit
in the users of Atomic_wrapper, because unlike loads and stores,
atomic read-modify-write operations always incur some overhead.
Diffstat (limited to 'storage/innobase')
-rw-r--r-- | storage/innobase/dict/dict0dict.cc | 11 | ||||
-rw-r--r-- | storage/innobase/include/dict0mem.h | 6 | ||||
-rw-r--r-- | storage/innobase/include/sync0rw.h | 6 | ||||
-rw-r--r-- | storage/innobase/include/sync0rw.ic | 11 | ||||
-rw-r--r-- | storage/innobase/sync/sync0rw.cc | 4 |
5 files changed, 15 insertions, 23 deletions
diff --git a/storage/innobase/dict/dict0dict.cc b/storage/innobase/dict/dict0dict.cc index c9abc03f682..e04d1dcbbe0 100644 --- a/storage/innobase/dict/dict0dict.cc +++ b/storage/innobase/dict/dict0dict.cc @@ -6158,10 +6158,7 @@ dict_index_zip_pad_update( beyond max pad size. */ if (info->pad + ZIP_PAD_INCR < (srv_page_size * zip_pad_max) / 100) { - /* Use atomics even though we have the mutex. - This is to ensure that we are able to read - info->pad atomically. */ - info->pad += ZIP_PAD_INCR; + info->pad.fetch_add(ZIP_PAD_INCR); MONITOR_INC(MONITOR_PAD_INCREMENTS); } @@ -6178,11 +6175,7 @@ dict_index_zip_pad_update( padding. */ if (info->n_rounds >= ZIP_PAD_SUCCESSFUL_ROUND_LIMIT && info->pad > 0) { - - /* Use atomics even though we have the mutex. - This is to ensure that we are able to read - info->pad atomically. */ - info->pad -= ZIP_PAD_INCR; + info->pad.fetch_sub(ZIP_PAD_INCR); info->n_rounds = 0; diff --git a/storage/innobase/include/dict0mem.h b/storage/innobase/include/dict0mem.h index b2be01ec637..78c723b8a76 100644 --- a/storage/innobase/include/dict0mem.h +++ b/storage/innobase/include/dict0mem.h @@ -931,7 +931,7 @@ an uncompressed page should be left as padding to avoid compression failures. This estimate is based on a self-adapting heuristic. */ struct zip_pad_info_t { SysMutex mutex; /*!< mutex protecting the info */ - Atomic_counter<ulint> + Atomic_relaxed<ulint> pad; /*!< number of bytes used as pad */ ulint success;/*!< successful compression ops during current round */ @@ -1107,10 +1107,10 @@ struct dict_index_t { /* @} */ private: /** R-tree split sequence number */ - Atomic_counter<node_seq_t> rtr_ssn; + Atomic_relaxed<node_seq_t> rtr_ssn; public: void set_ssn(node_seq_t ssn) { rtr_ssn= ssn; } - node_seq_t assign_ssn() { return ++rtr_ssn; } + node_seq_t assign_ssn() { return rtr_ssn.fetch_add(1) + 1; } node_seq_t ssn() const { return rtr_ssn; } rtr_info_track_t* diff --git a/storage/innobase/include/sync0rw.h b/storage/innobase/include/sync0rw.h index 6592988def8..48528eb4d30 100644 --- a/storage/innobase/include/sync0rw.h +++ b/storage/innobase/include/sync0rw.h @@ -569,10 +569,10 @@ struct rw_lock_t #endif /* UNIV_DEBUG */ { /** Holds the state of the lock. */ - Atomic_counter<int32_t> lock_word; + Atomic_relaxed<int32_t> lock_word; - /** 1: there are waiters */ - Atomic_counter<uint32_t> waiters; + /** 0=no waiters, 1=waiters for X or SX lock exist */ + Atomic_relaxed<uint32_t> waiters; /** number of granted SX locks. */ volatile ulint sx_recursive; diff --git a/storage/innobase/include/sync0rw.ic b/storage/innobase/include/sync0rw.ic index 2a7b008a532..70723b05944 100644 --- a/storage/innobase/include/sync0rw.ic +++ b/storage/innobase/include/sync0rw.ic @@ -355,16 +355,15 @@ rw_lock_s_unlock_func( ut_d(rw_lock_remove_debug_info(lock, pass, RW_LOCK_S)); /* Increment lock_word to indicate 1 less reader */ - int32_t lock_word = ++lock->lock_word; + int32_t lock_word = lock->lock_word.fetch_add(1); - if (lock_word == 0 || lock_word == -X_LOCK_HALF_DECR) { + if (lock_word == -1 || lock_word == -X_LOCK_HALF_DECR - 1) { /* wait_ex waiter exists. It may not be asleep, but we signal anyway. We do not wake other waiters, because they can't exist without wait_ex waiter and wait_ex waiter goes first.*/ os_event_set(lock->wait_ex_event); sync_array_object_signalled(); } else { - ut_ad(--lock_word); ut_ad(lock_word > -X_LOCK_DECR); ut_ad(lock_word < X_LOCK_DECR); } @@ -414,11 +413,11 @@ rw_lock_x_unlock_func( } else if (lock_word == -X_LOCK_DECR || lock_word == -(X_LOCK_DECR + X_LOCK_HALF_DECR)) { /* There are 2 x-locks */ - lock->lock_word += X_LOCK_DECR; + lock->lock_word.fetch_add(X_LOCK_DECR); } else { /* There are more than 2 x-locks. */ ut_ad(lock_word < -X_LOCK_DECR); - lock->lock_word++; + lock->lock_word.fetch_add(1); } ut_ad(rw_lock_validate(lock)); @@ -470,7 +469,7 @@ rw_lock_sx_unlock_func( /* still has x-lock */ ut_ad(lock_word == -X_LOCK_HALF_DECR || lock_word <= -(X_LOCK_DECR + X_LOCK_HALF_DECR)); - lock->lock_word += X_LOCK_HALF_DECR; + lock->lock_word.fetch_add(X_LOCK_HALF_DECR); } } diff --git a/storage/innobase/sync/sync0rw.cc b/storage/innobase/sync/sync0rw.cc index 22bebdb33da..fea94cc05f9 100644 --- a/storage/innobase/sync/sync0rw.cc +++ b/storage/innobase/sync/sync0rw.cc @@ -528,10 +528,10 @@ rw_lock_x_lock_low( exists. Add another. */ if (lock_word == 0 || lock_word == -X_LOCK_HALF_DECR) { - lock->lock_word -= X_LOCK_DECR; + lock->lock_word.fetch_sub(X_LOCK_DECR); } else { ut_ad(lock_word <= -X_LOCK_DECR); - lock->lock_word--; + lock->lock_word.fetch_sub(1); } } |