summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarko Mäkelä <marko.makela@mariadb.com>2022-02-20 16:51:23 +0200
committerMarko Mäkelä <marko.makela@mariadb.com>2022-02-20 16:51:23 +0200
commit02f1dbff2721e9088ed7df38cd7a5e2a270719f2 (patch)
tree5e7b859b4cead68fecf1df05a6f4bdfd37e4b700
parent1c5b099a9619c953e7510bbafca89353ad0a020c (diff)
downloadmariadb-git-bb-10.8-mcs_lock.tar.gz
WIP: Implement mcs_lockbb-10.8-mcs_lock
-rw-r--r--storage/innobase/include/log0log.h8
-rw-r--r--storage/innobase/include/srw_lock.h13
-rw-r--r--storage/innobase/log/log0log.cc2
-rw-r--r--storage/innobase/mtr/mtr0mtr.cc17
-rw-r--r--storage/innobase/sync/srw_lock.cc33
5 files changed, 61 insertions, 12 deletions
diff --git a/storage/innobase/include/log0log.h b/storage/innobase/include/log0log.h
index eb7c37f4699..904c72921e3 100644
--- a/storage/innobase/include/log0log.h
+++ b/storage/innobase/include/log0log.h
@@ -214,7 +214,7 @@ public:
private:
/** spin lock protecting lsn, buf_free in append_prepare() */
- MY_ALIGNED(CPU_LEVEL1_DCACHE_LINESIZE) srw_mutex lsn_lock;
+ MY_ALIGNED(CPU_LEVEL1_DCACHE_LINESIZE) mcspin_lock lsn_lock;
public:
/** first free offset within buf use; protected by lsn_lock */
Atomic_relaxed<size_t> buf_free;
@@ -365,8 +365,10 @@ public:
private:
/** Wait in append_prepare() for buffer to become available
- @param ex whether log_sys.latch is exclusively locked */
- ATTRIBUTE_COLD static void append_prepare_wait(bool ex) noexcept;
+ @param ex whether log_sys.latch is exclusively locked
+ @param q lsn_lock queue position */
+ ATTRIBUTE_COLD
+ static void append_prepare_wait(bool ex, mcspin_lock::queue *q) noexcept;
public:
/** Reserve space in the log buffer for appending data.
@tparam pmem log_sys.is_pmem()
diff --git a/storage/innobase/include/srw_lock.h b/storage/innobase/include/srw_lock.h
index f3c7456b701..01d8fcf60e6 100644
--- a/storage/innobase/include/srw_lock.h
+++ b/storage/innobase/include/srw_lock.h
@@ -520,3 +520,16 @@ typedef srw_lock_impl<false> srw_lock;
typedef srw_lock_impl<true> srw_spin_lock;
#endif
+
+/** Simple spin lock */
+struct mcspin_lock
+{
+ struct queue
+ {
+ std::atomic<queue*> next{nullptr};
+ std::atomic<bool> held{false};
+ };
+ std::atomic<queue*> state{nullptr};
+ void lock(queue *own) noexcept;
+ void unlock(queue *own) noexcept;
+};
diff --git a/storage/innobase/log/log0log.cc b/storage/innobase/log/log0log.cc
index e15fa0b241d..6c930db4a6c 100644
--- a/storage/innobase/log/log0log.cc
+++ b/storage/innobase/log/log0log.cc
@@ -102,7 +102,6 @@ void log_t::create()
ut_ad(!is_initialised());
latch.SRW_LOCK_INIT(log_latch_key);
- lsn_lock.init();
/* LSN 0 and 1 are reserved; @see buf_page_t::oldest_modification_ */
lsn.store(FIRST_LSN, std::memory_order_relaxed);
@@ -1081,7 +1080,6 @@ void log_t::close()
#endif
latch.destroy();
- lsn_lock.destroy();
recv_sys.close();
diff --git a/storage/innobase/mtr/mtr0mtr.cc b/storage/innobase/mtr/mtr0mtr.cc
index 66d0fcc8967..075196741ae 100644
--- a/storage/innobase/mtr/mtr0mtr.cc
+++ b/storage/innobase/mtr/mtr0mtr.cc
@@ -789,11 +789,13 @@ ATTRIBUTE_COLD static void log_overwrite_warning(lsn_t age, lsn_t capacity)
}
/** Wait in append_prepare() for buffer to become available
-@param ex whether log_sys.latch is exclusively locked */
-ATTRIBUTE_COLD void log_t::append_prepare_wait(bool ex) noexcept
+@param ex whether log_sys.latch is exclusively locked
+@param q lsn_lock queue position */
+ATTRIBUTE_COLD
+void log_t::append_prepare_wait(bool ex, mcspin_lock::queue *q) noexcept
{
log_sys.waits++;
- log_sys.lsn_lock.wr_unlock();
+ log_sys.lsn_lock.unlock(q);
if (ex)
log_sys.latch.wr_unlock();
@@ -808,7 +810,7 @@ ATTRIBUTE_COLD void log_t::append_prepare_wait(bool ex) noexcept
else
log_sys.latch.rd_lock(SRW_LOCK_CALL);
- log_sys.lsn_lock.wr_lock();
+ log_sys.lsn_lock.lock(q);
}
/** Reserve space in the log buffer for appending data.
@@ -829,7 +831,8 @@ std::pair<lsn_t,byte*> log_t::append_prepare(size_t size, bool ex) noexcept
ut_ad(pmem == is_pmem());
const lsn_t checkpoint_margin{last_checkpoint_lsn + log_capacity - size};
const size_t avail{(pmem ? size_t(capacity()) : buf_size) - size};
- lsn_lock.wr_lock();
+ mcspin_lock::queue q;
+ lsn_lock.lock(&q);
write_to_buf++;
for (ut_d(int count= 50);
@@ -838,7 +841,7 @@ std::pair<lsn_t,byte*> log_t::append_prepare(size_t size, bool ex) noexcept
get_flushed_lsn(std::memory_order_relaxed))
: size_t{buf_free}) > avail); )
{
- append_prepare_wait(ex);
+ append_prepare_wait(ex, &q);
ut_ad(count--);
}
@@ -850,7 +853,7 @@ std::pair<lsn_t,byte*> log_t::append_prepare(size_t size, bool ex) noexcept
if (pmem && new_buf_free >= file_size)
new_buf_free-= size_t(capacity());
buf_free= new_buf_free;
- lsn_lock.wr_unlock();
+ lsn_lock.unlock(&q);
if (UNIV_UNLIKELY(l > checkpoint_margin) ||
(!pmem && b >= max_buf_free))
diff --git a/storage/innobase/sync/srw_lock.cc b/storage/innobase/sync/srw_lock.cc
index b0e2bf17e08..23476be3b5a 100644
--- a/storage/innobase/sync/srw_lock.cc
+++ b/storage/innobase/sync/srw_lock.cc
@@ -693,3 +693,36 @@ template void ssux_lock_impl<false>::u_unlock();
template void ssux_lock_impl<false>::wr_unlock();
# endif
#endif /* UNIV_PFS_RWLOCK */
+
+void mcspin_lock::lock(mcspin_lock::queue *q) noexcept
+{
+ q->next.store(nullptr, std::memory_order_relaxed);
+ q->held.store(false, std::memory_order_relaxed);
+
+ if (queue* prev= state.exchange(q, std::memory_order_acquire))
+ {
+ ut_ad(prev != q);
+ prev->next.store(q, std::memory_order_relaxed);
+ while (!q->held.load(std::memory_order_acquire))
+ srw_pause(1);
+ }
+}
+
+void mcspin_lock::unlock(mcspin_lock::queue *q) noexcept
+{
+ queue *next= q->next.load(std::memory_order_relaxed);
+
+ if (!next)
+ {
+ queue *qq{q};
+ if (state.compare_exchange_strong(qq, nullptr, std::memory_order_release,
+ std::memory_order_relaxed))
+ return;
+
+ while (!(next= q->next.load(std::memory_order_relaxed)))
+ srw_pause(1);
+ }
+
+ ut_ad(next != q);
+ next->held.store(true, std::memory_order_release);
+}