summaryrefslogtreecommitdiff
path: root/storage/innobase/sync/sync0rw.cc
diff options
context:
space:
mode:
Diffstat (limited to 'storage/innobase/sync/sync0rw.cc')
-rw-r--r--storage/innobase/sync/sync0rw.cc82
1 files changed, 32 insertions, 50 deletions
diff --git a/storage/innobase/sync/sync0rw.cc b/storage/innobase/sync/sync0rw.cc
index 658a0f906a7..d05b9475b34 100644
--- a/storage/innobase/sync/sync0rw.cc
+++ b/storage/innobase/sync/sync0rw.cc
@@ -205,8 +205,8 @@ rw_lock_create_func(
/* If this is the very first time a synchronization object is
created, then the following call initializes the sync system. */
- lock->lock_word = X_LOCK_DECR;
- lock->waiters = 0;
+ lock->lock_word.store(X_LOCK_DECR, std::memory_order_relaxed);
+ lock->waiters.store(0, std::memory_order_relaxed);
lock->sx_recursive = 0;
lock->writer_thread= 0;
@@ -238,9 +238,7 @@ rw_lock_create_func(
lock->is_block_lock = 0;
mutex_enter(&rw_lock_list_mutex);
-
UT_LIST_ADD_FIRST(rw_lock_list, lock);
-
mutex_exit(&rw_lock_list_mutex);
}
@@ -254,8 +252,7 @@ rw_lock_free_func(
rw_lock_t* lock) /*!< in/out: rw-lock */
{
ut_ad(rw_lock_validate(lock));
- ut_a(my_atomic_load32_explicit(&lock->lock_word,
- MY_MEMORY_ORDER_RELAXED) == X_LOCK_DECR);
+ ut_a(lock->lock_word.load(std::memory_order_relaxed) == X_LOCK_DECR);
mutex_enter(&rw_lock_list_mutex);
@@ -297,8 +294,7 @@ lock_loop:
/* Spin waiting for the writer field to become free */
HMT_low();
while (i < srv_n_spin_wait_rounds &&
- my_atomic_load32_explicit(&lock->lock_word,
- MY_MEMORY_ORDER_RELAXED) <= 0) {
+ lock->lock_word.load(std::memory_order_relaxed) <= 0) {
ut_delay(srv_spin_wait_delay);
i++;
}
@@ -338,7 +334,7 @@ lock_loop:
/* Set waiters before checking lock_word to ensure wake-up
signal is sent. This may lead to some unnecessary signals. */
- my_atomic_fas32_explicit(&lock->waiters, 1, MY_MEMORY_ORDER_ACQUIRE);
+ lock->waiters.exchange(1, std::memory_order_acquire);
if (rw_lock_s_lock_low(lock, pass, file_name, line)) {
@@ -416,10 +412,10 @@ rw_lock_x_lock_wait_func(
sync_array_t* sync_arr;
int64_t count_os_wait = 0;
- ut_ad(my_atomic_load32_explicit(&lock->lock_word, MY_MEMORY_ORDER_RELAXED) <= threshold);
+ ut_ad(lock->lock_word.load(std::memory_order_relaxed) <= threshold);
HMT_low();
- while (my_atomic_load32_explicit(&lock->lock_word, MY_MEMORY_ORDER_RELAXED) < threshold) {
+ while (lock->lock_word.load(std::memory_order_relaxed) < threshold) {
ut_delay(srv_spin_wait_delay);
if (i < srv_n_spin_wait_rounds) {
@@ -438,7 +434,7 @@ rw_lock_x_lock_wait_func(
i = 0;
/* Check lock_word to ensure wake-up isn't missed.*/
- if (my_atomic_load32_explicit(&lock->lock_word, MY_MEMORY_ORDER_RELAXED) < threshold) {
+ if (lock->lock_word.load(std::memory_order_relaxed) < threshold) {
++count_os_wait;
@@ -528,18 +524,17 @@ rw_lock_x_lock_low(
file_name, line);
} else {
- int32_t lock_word = my_atomic_load32_explicit(&lock->lock_word,
- MY_MEMORY_ORDER_RELAXED);
+ auto lock_word = lock->lock_word.load(std::memory_order_relaxed);
/* At least one X lock by this thread already
exists. Add another. */
if (lock_word == 0
|| lock_word == -X_LOCK_HALF_DECR) {
- my_atomic_add32_explicit(&lock->lock_word, -X_LOCK_DECR,
- MY_MEMORY_ORDER_RELAXED);
+ lock->lock_word.fetch_sub(X_LOCK_DECR,
+ std::memory_order_relaxed);
} else {
ut_ad(lock_word <= -X_LOCK_DECR);
- my_atomic_add32_explicit(&lock->lock_word, -1,
- MY_MEMORY_ORDER_RELAXED);
+ lock->lock_word.fetch_sub(1,
+ std::memory_order_relaxed);
}
}
@@ -611,10 +606,10 @@ rw_lock_sx_lock_low(
read and write to the lock_word. */
#ifdef UNIV_DEBUG
- int32_t lock_word =
+ auto lock_word =
#endif
- my_atomic_add32_explicit(&lock->lock_word, -X_LOCK_HALF_DECR,
- MY_MEMORY_ORDER_RELAXED);
+ lock->lock_word.fetch_sub(X_LOCK_HALF_DECR,
+ std::memory_order_relaxed);
ut_ad((lock_word == 0)
|| ((lock_word <= -X_LOCK_DECR)
@@ -682,7 +677,7 @@ lock_loop:
/* Spin waiting for the lock_word to become free */
HMT_low();
while (i < srv_n_spin_wait_rounds
- && my_atomic_load32_explicit(&lock->lock_word, MY_MEMORY_ORDER_RELAXED) <= X_LOCK_HALF_DECR) {
+ && lock->lock_word.load(std::memory_order_relaxed) <= X_LOCK_HALF_DECR) {
ut_delay(srv_spin_wait_delay);
i++;
}
@@ -707,7 +702,7 @@ lock_loop:
/* Waiters must be set before checking lock_word, to ensure signal
is sent. This could lead to a few unnecessary wake-up signals. */
- my_atomic_fas32_explicit(&lock->waiters, 1, MY_MEMORY_ORDER_ACQUIRE);
+ lock->waiters.exchange(1, std::memory_order_acquire);
if (rw_lock_x_lock_low(lock, pass, file_name, line)) {
sync_array_free_cell(sync_arr, cell);
@@ -783,7 +778,7 @@ lock_loop:
/* Spin waiting for the lock_word to become free */
while (i < srv_n_spin_wait_rounds
- && my_atomic_load32_explicit(&lock->lock_word, MY_MEMORY_ORDER_RELAXED) <= X_LOCK_HALF_DECR) {
+ && lock->lock_word.load(std::memory_order_relaxed) <= X_LOCK_HALF_DECR) {
ut_delay(srv_spin_wait_delay);
i++;
}
@@ -807,7 +802,7 @@ lock_loop:
/* Waiters must be set before checking lock_word, to ensure signal
is sent. This could lead to a few unnecessary wake-up signals. */
- my_atomic_fas32_explicit(&lock->waiters, 1, MY_MEMORY_ORDER_ACQUIRE);
+ lock->waiters.exchange(1, std::memory_order_acquire);
if (rw_lock_sx_lock_low(lock, pass, file_name, line)) {
@@ -850,11 +845,9 @@ rw_lock_validate(
ut_ad(lock);
- lock_word = my_atomic_load32_explicit(const_cast<int32_t*>(&lock->lock_word),
- MY_MEMORY_ORDER_RELAXED);
+ lock_word = lock->lock_word.load(std::memory_order_relaxed);
- ut_ad(my_atomic_load32_explicit(const_cast<int32_t*>(&lock->waiters),
- MY_MEMORY_ORDER_RELAXED) < 2);
+ ut_ad(lock->waiters.load(std::memory_order_relaxed) < 2);
ut_ad(lock_word > -(2 * X_LOCK_DECR));
ut_ad(lock_word <= X_LOCK_DECR);
@@ -917,8 +910,7 @@ rw_lock_add_debug_info(
rw_lock_debug_mutex_exit();
if (pass == 0 && lock_type != RW_LOCK_X_WAIT) {
- int32_t lock_word = my_atomic_load32_explicit(&lock->lock_word,
- MY_MEMORY_ORDER_RELAXED);
+ auto lock_word = lock->lock_word.load(std::memory_order_relaxed);
/* Recursive x while holding SX
(lock_type == RW_LOCK_X && lock_word == -X_LOCK_HALF_DECR)
@@ -1084,11 +1076,11 @@ rw_lock_list_print_info(
count++;
- if (my_atomic_load32_explicit(const_cast<int32_t*>(&lock->lock_word), MY_MEMORY_ORDER_RELAXED) != X_LOCK_DECR) {
+ if (lock->lock_word.load(std::memory_order_relaxed) != X_LOCK_DECR) {
fprintf(file, "RW-LOCK: %p ", (void*) lock);
- if (int32_t waiters= my_atomic_load32_explicit(const_cast<int32_t*>(&lock->waiters), MY_MEMORY_ORDER_RELAXED)) {
+ if (int32_t waiters= lock->waiters.load(std::memory_order_relaxed)) {
fprintf(file, " (%d waiters)\n", waiters);
} else {
putc('\n', file);
@@ -1152,10 +1144,10 @@ rw_lock_debug_print(
fprintf(f, "\n");
}
-/** Print where it was locked from
+/** Print the rw-lock information.
@return the string representation */
std::string
-rw_lock_t::locked_from() const
+rw_lock_t::to_string() const
{
/* Note: For X locks it can be locked form multiple places because
the same thread can call X lock recursively. */
@@ -1165,6 +1157,11 @@ rw_lock_t::locked_from() const
ut_ad(rw_lock_validate(this));
+ msg << "RW-LATCH: "
+ << "thread id " << os_thread_pf(os_thread_get_curr_id())
+ << " addr: " << this
+ << " Locked from: ";
+
rw_lock_debug_mutex_enter();
for (rw_lock_debug_t* info = UT_LIST_GET_FIRST(debug_list);
@@ -1187,19 +1184,4 @@ rw_lock_t::locked_from() const
return(msg.str());
}
-
-/** Print the rw-lock information.
-@return the string representation */
-std::string
-rw_lock_t::to_string() const
-{
- std::ostringstream msg;
-
- msg << "RW-LATCH: "
- << "thread id " << os_thread_pf(os_thread_get_curr_id())
- << " addr: " << this
- << " Locked from: " << locked_from().c_str();
-
- return(msg.str());
-}
#endif /* UNIV_DEBUG */