From ec619a1def69c175d9fa07201b2567c3478e9408 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Sat, 2 Oct 2021 09:25:40 +0300 Subject: MDEV-26467 fixup: Prefer fetch_add() to fetch_or() on IA-32 and AMD64 --- storage/innobase/include/srw_lock.h | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/storage/innobase/include/srw_lock.h b/storage/innobase/include/srw_lock.h index 3d0d82b81df..9e2eac15df0 100644 --- a/storage/innobase/include/srw_lock.h +++ b/storage/innobase/include/srw_lock.h @@ -226,8 +226,18 @@ public: void wr_lock() { writer.wr_lock(); +#if defined __i386__||defined __x86_64__||defined _M_IX86||defined _M_IX64 + /* On IA-32 and AMD64, this type of fetch_or() can only be implemented + as a loop around LOCK CMPXCHG. In this particular case, setting the + most significant bit using fetch_add() is equivalent, and is + translated into a simple LOCK XADD. */ + static_assert(WRITER == 1U << 31, "compatibility"); + if (uint32_t lk= readers.fetch_add(WRITER, std::memory_order_acquire)) + wr_wait(lk); +#else if (uint32_t lk= readers.fetch_or(WRITER, std::memory_order_acquire)) wr_wait(lk); +#endif } void u_wr_upgrade() -- cgit v1.2.1