summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarko Mäkelä <marko.makela@mariadb.com>2020-11-27 20:28:53 +0200
committerMarko Mäkelä <marko.makela@mariadb.com>2020-11-27 20:28:53 +0200
commit8caebfed49930fe2f18558b8b1f0af1be0117853 (patch)
treea710c26913c24e1710b59f648fe408b9d38b18c7
parent81b4a160385ac5398ca8f2a79519da64282d1249 (diff)
downloadmariadb-git-bb-10.6-MDEV-24142.tar.gz
srw_lock, rw_lock: Implement update modebb-10.6-MDEV-24142
FIXME: Pass the line number information to performance_schema FIXME: Remove PSI_RWLOCK_SHAREDLOCK and friends, just use PSI_RWLOCK_READLOCK and friends.
-rw-r--r--storage/innobase/include/rw_lock.h68
-rw-r--r--storage/innobase/include/srw_lock.h73
-rw-r--r--storage/innobase/include/sux_lock.h153
-rw-r--r--storage/innobase/sync/srw_lock.cc63
4 files changed, 251 insertions, 106 deletions
diff --git a/storage/innobase/include/rw_lock.h b/storage/innobase/include/rw_lock.h
index 6277d952e7f..ac01c28f346 100644
--- a/storage/innobase/include/rw_lock.h
+++ b/storage/innobase/include/rw_lock.h
@@ -20,7 +20,7 @@ this program; if not, write to the Free Software Foundation, Inc.,
#include <atomic>
#include "my_dbug.h"
-/** Simple read-write lock based on std::atomic */
+/** Simple read-update-write lock based on std::atomic */
class rw_lock
{
/** The lock word */
@@ -35,6 +35,8 @@ protected:
static constexpr uint32_t WRITER_WAITING= 1U << 30;
/** Flag to indicate that write_lock() or write_lock_wait() is pending */
static constexpr uint32_t WRITER_PENDING= WRITER | WRITER_WAITING;
+ /** Flag to indicate that an update lock exists */
+ static constexpr uint32_t UPDATER= 1U << 29;
/** Start waiting for an exclusive lock.
@return current value of the lock word */
@@ -45,13 +47,14 @@ protected:
@return whether the exclusive lock was acquired */
bool write_lock_wait_try(uint32_t &l)
{
- l= WRITER_WAITING;
return lock.compare_exchange_strong(l, WRITER, std::memory_order_acquire,
std::memory_order_relaxed);
}
/** Try to acquire a shared lock.
+ @tparam prioritize_updater whether to ignore WRITER_WAITING for UPDATER
@param l the value of the lock word
@return whether the lock was acquired */
+ template<bool prioritize_updater= false>
bool read_trylock(uint32_t &l)
{
l= UNLOCKED;
@@ -59,16 +62,53 @@ protected:
std::memory_order_relaxed))
{
DBUG_ASSERT(!(WRITER & l) || !(~WRITER_PENDING & l));
- if (l & WRITER_PENDING)
+ DBUG_ASSERT((~(WRITER_PENDING | UPDATER) & l) < UPDATER);
+ if (prioritize_updater
+ ? (WRITER & l) || ((WRITER_WAITING | UPDATER) & l) == WRITER_WAITING
+ : (WRITER_PENDING & l))
return false;
}
return true;
}
+ /** Try to acquire an update lock.
+ @param l the value of the lock word
+ @return whether the lock was acquired */
+ bool update_trylock(uint32_t &l)
+ {
+ l= UNLOCKED;
+ while (!lock.compare_exchange_strong(l, l | UPDATER,
+ std::memory_order_acquire,
+ std::memory_order_relaxed))
+ {
+ DBUG_ASSERT(!(WRITER & l) || !(~WRITER_PENDING & l));
+ DBUG_ASSERT((~(WRITER_PENDING | UPDATER) & l) < UPDATER);
+ if ((WRITER_PENDING | UPDATER) & l)
+ return false;
+ }
+ return true;
+ }
+ /** Try to upgrade an update lock to an exclusive lock.
+ @return whether the update lock was upgraded to exclusive */
+ bool upgrade_trylock()
+ {
+ auto l= UPDATER;
+ while (!lock.compare_exchange_strong(l, l ^ (WRITER | UPDATER),
+ std::memory_order_acquire,
+ std::memory_order_relaxed))
+ {
+ DBUG_ASSERT(!(~l & (UPDATER - 1)));
+ DBUG_ASSERT(((WRITER | UPDATER) & l) == UPDATER);
+ if (~(WRITER_WAITING | UPDATER) & l)
+ return false;
+ }
+ DBUG_ASSERT((l & ~WRITER_WAITING) == UPDATER);
+ return true;
+ }
/** Wait for an exclusive lock.
@return whether the exclusive lock was acquired */
bool write_lock_poll()
{
- uint32_t l;
+ auto l= WRITER_WAITING;
if (write_lock_wait_try(l))
return true;
if (!(l & WRITER_WAITING))
@@ -88,15 +128,26 @@ public:
bool read_unlock()
{
auto l= lock.fetch_sub(1, std::memory_order_release);
- DBUG_ASSERT(~WRITER_PENDING & l); /* at least one read lock */
+ DBUG_ASSERT(~(WRITER_PENDING | UPDATER) & l); /* at least one read lock */
DBUG_ASSERT(!(l & WRITER)); /* no write lock must have existed */
return (~WRITER_PENDING & l) == 1;
}
+ /** Release an update lock.
+ @return whether any writers may have to be woken up */
+ bool update_unlock()
+ {
+ auto l= lock.fetch_and(~UPDATER, std::memory_order_release);
+ /* the update lock must have existed */
+ DBUG_ASSERT((l & (WRITER | UPDATER)) == UPDATER);
+ return !(~(WRITER_PENDING | UPDATER) & l);
+ }
/** Release an exclusive lock */
void write_unlock()
{
- IF_DBUG_ASSERT(auto l=,) lock.fetch_sub(WRITER, std::memory_order_release);
- DBUG_ASSERT(l & WRITER); /* the write lock must have existed */
+ IF_DBUG_ASSERT(auto l=,)
+ lock.fetch_and(~WRITER, std::memory_order_release);
+ /* the write lock must have existed */
+ DBUG_ASSERT((l & (WRITER | UPDATER)) == WRITER);
}
/** Try to acquire a shared lock.
@return whether the lock was acquired */
@@ -113,6 +164,9 @@ public:
/** @return whether an exclusive lock is being held by any thread */
bool is_write_locked() const
{ return !!(lock.load(std::memory_order_relaxed) & WRITER); }
+ /** @return whether an update lock is being held by any thread */
+ bool is_update_locked() const
+ { return !!(lock.load(std::memory_order_relaxed) & UPDATER); }
/** @return whether a shared lock is being held by any thread */
bool is_read_locked() const
{
diff --git a/storage/innobase/include/srw_lock.h b/storage/innobase/include/srw_lock.h
index 23098d5f51a..f31d630f2fe 100644
--- a/storage/innobase/include/srw_lock.h
+++ b/storage/innobase/include/srw_lock.h
@@ -33,7 +33,7 @@ class srw_mutex
public:
void init() { pthread_mutex_init(&lock, nullptr); }
void destroy() { pthread_mutex_destroy(&lock); }
- void wr_lock() { pthread_mutex_lock(&lock); }
+ template<bool update=false> void wr_lock() { pthread_mutex_lock(&lock); }
void wr_unlock() { pthread_mutex_unlock(&lock); }
bool wr_lock_try() { return !pthread_mutex_trylock(&lock); }
};
@@ -44,8 +44,11 @@ public:
#include "rw_lock.h"
/** Slim reader-writer lock with no recursion */
-class srw_lock_low final : protected rw_lock
+class srw_lock_low final : private rw_lock
{
+#ifdef UNIV_PFS_RWLOCK
+ friend class srw_lock;
+#endif
#ifdef SRW_LOCK_DUMMY
pthread_mutex_t mutex;
pthread_cond_t cond;
@@ -55,8 +58,12 @@ class srw_lock_low final : protected rw_lock
/** Wait for a read lock.
@param l lock word from a failed read_trylock() */
void read_lock(uint32_t l);
- /** Wait for a write lock after a failed write_trylock() */
- void write_lock();
+ /** Wait for an update lock.
+ @param l lock word from a failed update_trylock() */
+ void update_lock(uint32_t l);
+ /** Wait for a write lock after a failed write_trylock() or upgrade_trylock()
+ @param holding_u whether we already hold u_lock() */
+ void write_lock(bool holding_u);
/** Wait for signal
@param l lock word from a failed acquisition */
inline void wait(uint32_t l);
@@ -74,9 +81,15 @@ public:
#endif
bool rd_lock_try() { uint32_t l; return read_trylock(l); }
bool wr_lock_try() { return write_trylock(); }
+ template<bool update=false>
void rd_lock() { uint32_t l; if (!read_trylock(l)) read_lock(l); }
- void wr_lock() { if (!write_trylock()) write_lock(); }
+ void u_lock() { uint32_t l; if (!update_trylock(l)) update_lock(l); }
+ bool u_lock_try() { uint32_t l; return update_trylock(l); }
+ void u_wr_upgrade() { if (!upgrade_trylock()) write_lock(true); }
+ template<bool update=false>
+ void wr_lock() { if (!write_trylock()) write_lock(false); }
void rd_unlock();
+ void u_unlock();
void wr_unlock();
};
@@ -86,7 +99,7 @@ typedef srw_lock_low srw_lock;
#else
# define SRW_LOCK_INIT(key) init(key)
-/** Slim reader-writer lock with optional PERFORMANCE_SCHEMA instrumentation */
+/** Slim reader-writer lock with PERFORMANCE_SCHEMA instrumentation */
class srw_lock
{
srw_lock_low lock;
@@ -110,6 +123,7 @@ public:
}
lock.destroy();
}
+ template<bool update= false>
void rd_lock()
{
if (pfs_psi)
@@ -118,7 +132,8 @@ public:
return;
PSI_rwlock_locker_state state;
PSI_rwlock_locker *locker= PSI_RWLOCK_CALL(start_rwlock_rdwait)
- (&state, pfs_psi, PSI_RWLOCK_READLOCK, __FILE__, __LINE__);
+ (&state, pfs_psi, update ? PSI_RWLOCK_SHAREDLOCK : PSI_RWLOCK_READLOCK,
+ __FILE__, __LINE__);
lock.rd_lock();
if (locker)
PSI_RWLOCK_CALL(end_rwlock_rdwait)(locker, 0);
@@ -132,6 +147,29 @@ public:
PSI_RWLOCK_CALL(unlock_rwlock)(pfs_psi);
lock.rd_unlock();
}
+ void u_lock()
+ {
+ if (pfs_psi)
+ {
+ if (lock.u_lock_try())
+ return;
+ PSI_rwlock_locker_state state;
+ PSI_rwlock_locker *locker= PSI_RWLOCK_CALL(start_rwlock_wrwait)
+ (&state, pfs_psi, PSI_RWLOCK_SHAREDEXCLUSIVELOCK, __FILE__, __LINE__);
+ lock.u_lock();
+ if (locker)
+ PSI_RWLOCK_CALL(end_rwlock_rdwait)(locker, 0);
+ return;
+ }
+ lock.u_lock();
+ }
+ void u_unlock()
+ {
+ if (pfs_psi)
+ PSI_RWLOCK_CALL(unlock_rwlock)(pfs_psi);
+ lock.u_unlock();
+ }
+ template<bool update= false>
void wr_lock()
{
if (pfs_psi)
@@ -140,7 +178,9 @@ public:
return;
PSI_rwlock_locker_state state;
PSI_rwlock_locker *locker= PSI_RWLOCK_CALL(start_rwlock_wrwait)
- (&state, pfs_psi, PSI_RWLOCK_WRITELOCK, __FILE__, __LINE__);
+ (&state, pfs_psi,
+ update ? PSI_RWLOCK_EXCLUSIVELOCK : PSI_RWLOCK_WRITELOCK,
+ __FILE__, __LINE__);
lock.wr_lock();
if (locker)
PSI_RWLOCK_CALL(end_rwlock_rdwait)(locker, 0);
@@ -154,7 +194,24 @@ public:
PSI_RWLOCK_CALL(unlock_rwlock)(pfs_psi);
lock.wr_unlock();
}
+ void u_wr_upgrade()
+ {
+ if (lock.upgrade_trylock())
+ return;
+ if (pfs_psi)
+ {
+ PSI_rwlock_locker_state state;
+ PSI_rwlock_locker *locker= PSI_RWLOCK_CALL(start_rwlock_wrwait)
+ (&state, pfs_psi, PSI_RWLOCK_WRITELOCK, __FILE__, __LINE__);
+ lock.write_lock(true);
+ if (locker)
+ PSI_RWLOCK_CALL(end_rwlock_rdwait)(locker, 0);
+ return;
+ }
+ lock.write_lock(true);
+ }
bool rd_lock_try() { return lock.rd_lock_try(); }
+ bool u_lock_try() { return lock.u_lock_try(); }
bool wr_lock_try() { return lock.wr_lock_try(); }
};
#endif
diff --git a/storage/innobase/include/sux_lock.h b/storage/innobase/include/sux_lock.h
index 23df56688c8..fb60a7fc87b 100644
--- a/storage/innobase/include/sux_lock.h
+++ b/storage/innobase/include/sux_lock.h
@@ -24,7 +24,7 @@ this program; if not, write to the Free Software Foundation, Inc.,
# include <set>
#endif
-#if 0 // FIXME: defined UNIV_PFS_RWLOCK
+#ifdef UNIV_PFS_RWLOCK
# define SUX_LOCK_INIT(key, level) init(key, level)
#else
# define SUX_LOCK_INIT(key, level) init(level)
@@ -35,21 +35,19 @@ S (shared), U (update, or shared-exclusive), and X (exclusive) modes
as well as recursive U and X latch acquisition */
class sux_lock final ut_d(: public latch_t)
{
- /** The first lock component for U and X modes. Only acquired in X mode. */
- srw_mutex write_lock;
- /** The owner of the U or X lock (0 if none); protected by write_lock */
+ /** The underlying non-recursive lock */
+ srw_lock lock;
+ /** The owner of the U or X lock (0 if none); protected by lock */
std::atomic<os_thread_id_t> writer;
/** Special writer!=0 value to indicate that the lock is non-recursive
and will be released by an I/O thread */
static constexpr os_thread_id_t FOR_IO= os_thread_id_t(~0UL);
- /** Numbers of U and X locks. Protected by write_lock. */
+ /** Numbers of U and X locks. Protected by lock. */
uint32_t recursive;
- /** The second component for U and X modes; the only component for S mode */
- srw_lock_low read_lock;
#ifdef UNIV_DEBUG
/** Protects readers */
mutable srw_mutex readers_lock;
- /** Threads that hold read_lock in shared mode */
+ /** Threads that hold the lock in shared mode */
std::atomic<std::set<os_thread_id_t>*> readers;
#endif
@@ -64,10 +62,9 @@ public:
void SUX_LOCK_INIT(mysql_pfs_key_t key= PFS_NOT_INSTRUMENTED,
latch_level_t level= SYNC_LEVEL_VARYING)
{
- write_lock.init();
+ lock.SRW_LOCK_INIT(key);
ut_ad(!writer.load(std::memory_order_relaxed));
ut_ad(!recursive);
- read_lock.init();
ut_d(readers_lock.init());
ut_ad(!readers.load(std::memory_order_relaxed));
ut_d(m_rw_lock= true);
@@ -92,8 +89,7 @@ public:
readers.store(nullptr, std::memory_order_relaxed);
}
#endif
- write_lock.destroy();
- read_lock.destroy();
+ lock.destroy();
ut_d(level= SYNC_UNKNOWN);
}
@@ -121,46 +117,6 @@ public:
}
private:
- /** Acquire the writer lock component (for U or X lock)
- @param for_io whether the lock will be released by another thread
- @return whether this was a recursive acquisition */
- template<bool allow_readers> bool writer_lock(bool for_io= false)
- {
- os_thread_id_t id= os_thread_get_curr_id();
- if (writer.load(std::memory_order_relaxed) == id)
- {
- ut_ad(!for_io);
- writer_recurse<allow_readers>();
- return true;
- }
- else
- {
- write_lock.wr_lock();
- ut_ad(!recursive);
- recursive= allow_readers ? RECURSIVE_U : RECURSIVE_X;
- set_first_owner(for_io ? FOR_IO : id);
- return false;
- }
- }
- /** Release the writer lock component (for U or X lock)
- @param allow_readers whether we are releasing a U lock
- @param claim_ownership whether the lock was acquired by another thread
- @return whether this was a recursive release */
- bool writer_unlock(bool allow_readers, bool claim_ownership= false)
- {
- ut_d(auto owner= writer.load(std::memory_order_relaxed));
- ut_ad(owner == os_thread_get_curr_id() ||
- (owner == FOR_IO && claim_ownership &&
- recursive == (allow_readers ? RECURSIVE_U : RECURSIVE_X)));
- ut_d(auto rec= (recursive / (allow_readers ? RECURSIVE_U : RECURSIVE_X)) &
- RECURSIVE_MAX);
- ut_ad(rec);
- if (recursive-= allow_readers ? RECURSIVE_U : RECURSIVE_X)
- return true;
- set_new_owner(0);
- write_lock.wr_unlock();
- return false;
- }
/** Transfer the ownership of a write lock to another thread
@param id the new owner of the U or X lock */
void set_new_owner(os_thread_id_t id)
@@ -228,19 +184,44 @@ public:
#endif
/** Acquire a shared lock */
- void s_lock()
- {
+ void s_lock() {
ut_ad(!have_x());
ut_ad(!have_s());
- read_lock.rd_lock();
+ lock.rd_lock<true>();
ut_d(s_lock_register());
}
/** Acquire an update lock */
- void u_lock() { if (!writer_lock<true>()) read_lock.rd_lock(); }
+ void u_lock()
+ {
+ os_thread_id_t id= os_thread_get_curr_id();
+ if (writer.load(std::memory_order_relaxed) == id)
+ writer_recurse<true>();
+ else
+ {
+ lock.u_lock();
+ ut_ad(!recursive);
+ recursive= RECURSIVE_U;
+ set_first_owner(id);
+ }
+ }
/** Acquire an exclusive lock
@param for_io whether the lock will be released by another thread */
void x_lock(bool for_io= false)
- { if (!writer_lock<false>(for_io)) read_lock.wr_lock(); }
+ {
+ os_thread_id_t id= os_thread_get_curr_id();
+ if (writer.load(std::memory_order_relaxed) == id)
+ {
+ ut_ad(!for_io);
+ writer_recurse<false>();
+ }
+ else
+ {
+ lock.wr_lock<true>();
+ ut_ad(!recursive);
+ recursive= RECURSIVE_X;
+ set_first_owner(for_io ? FOR_IO : id);
+ }
+ }
/** Acquire a recursive exclusive lock */
void x_lock_recursive() { writer_recurse<false>(); }
/** Acquire a shared lock */
@@ -264,18 +245,16 @@ public:
return false;
}
/* Upgrade the lock. */
- read_lock.rd_unlock();
- read_lock.wr_lock();
+ lock.u_wr_upgrade();
recursive/= RECURSIVE_U;
return true;
}
else
{
- write_lock.wr_lock();
+ lock.wr_lock<true>();
ut_ad(!recursive);
recursive= RECURSIVE_X;
set_first_owner(id);
- read_lock.wr_lock();
return false;
}
}
@@ -286,7 +265,7 @@ public:
/** @return whether a shared lock was acquired */
bool s_lock_try()
{
- bool acquired= read_lock.rd_lock_try();
+ bool acquired= lock.rd_lock_try();
ut_d(if (acquired) s_lock_register());
return acquired;
}
@@ -306,18 +285,12 @@ public:
writer_recurse<allow_readers>();
return true;
}
-
- if (write_lock.wr_lock_try())
+ if (allow_readers ? lock.u_lock_try() : lock.wr_lock_try())
{
ut_ad(!recursive);
- if (allow_readers ? read_lock.rd_lock_try() : read_lock.wr_lock_try())
- {
- ut_ad(!recursive);
- recursive= allow_readers ? RECURSIVE_U : RECURSIVE_X;
- set_first_owner(for_io ? FOR_IO : id);
- return true;
- }
- write_lock.wr_unlock();
+ recursive= allow_readers ? RECURSIVE_U : RECURSIVE_X;
+ set_first_owner(for_io ? FOR_IO : id);
+ return true;
}
return false;
}
@@ -342,23 +315,35 @@ public:
ut_ad(r->erase(os_thread_get_curr_id()) == 1);
readers_lock.wr_unlock();
#endif
- read_lock.rd_unlock();
+ lock.rd_unlock();
+ }
+ /** Release an update or exclusive lock
+ @param allow_readers whether we are releasing a U lock
+ @param claim_ownership whether the lock was acquired by another thread */
+ void u_or_x_unlock(bool allow_readers, bool claim_ownership= false)
+ {
+ ut_d(auto owner= writer.load(std::memory_order_relaxed));
+ ut_ad(owner == os_thread_get_curr_id() ||
+ (owner == FOR_IO && claim_ownership &&
+ recursive == (allow_readers ? RECURSIVE_U : RECURSIVE_X)));
+ ut_d(auto rec= (recursive / (allow_readers ? RECURSIVE_U : RECURSIVE_X)) &
+ RECURSIVE_MAX);
+ ut_ad(rec);
+ if (!(recursive-= allow_readers ? RECURSIVE_U : RECURSIVE_X))
+ {
+ set_new_owner(0);
+ if (allow_readers)
+ lock.u_unlock();
+ else
+ lock.wr_unlock();
+ }
}
/** Release an update lock */
void u_unlock(bool claim_ownership= false)
- { if (!writer_unlock(true, claim_ownership)) read_lock.rd_unlock(); }
+ { u_or_x_unlock(true, claim_ownership); }
/** Release an exclusive lock */
void x_unlock(bool claim_ownership= false)
- { if (!writer_unlock(false, claim_ownership)) read_lock.wr_unlock(); }
- /** Release an update or exclusive lock */
- void u_or_x_unlock(bool allow_readers= false)
- {
- if (writer_unlock(allow_readers));
- else if (allow_readers)
- read_lock.rd_unlock();
- else
- read_lock.wr_unlock();
- }
+ { u_or_x_unlock(false, claim_ownership); }
/** Count of os_waits. May not be accurate */
static constexpr uint32_t count_os_wait= 0; /* FIXME: move to dict_index_t */
diff --git a/storage/innobase/sync/srw_lock.cc b/storage/innobase/sync/srw_lock.cc
index e3ddd5d9661..8efae5be329 100644
--- a/storage/innobase/sync/srw_lock.cc
+++ b/storage/innobase/sync/srw_lock.cc
@@ -113,7 +113,7 @@ void srw_lock_low::read_lock(uint32_t l)
for (auto spin= srv_n_spin_wait_rounds; spin; spin--)
{
ut_delay(srv_spin_wait_delay);
- if (read_trylock(l))
+ if (read_trylock<true>(l))
return;
else if (l == WRITER_WAITING)
goto wake_writer;
@@ -121,11 +121,50 @@ void srw_lock_low::read_lock(uint32_t l)
wait(l);
}
- while (!read_trylock(l));
+ while (!read_trylock<true>(l));
}
-/** Wait for a write lock after a failed write_trylock() */
-void srw_lock_low::write_lock()
+/** Wait for an update lock.
+@param lock word value from a failed update_trylock() */
+void srw_lock_low::update_lock(uint32_t l)
+{
+ do
+ {
+ if (l == WRITER_WAITING)
+ {
+ wake_writer:
+#ifdef SRW_LOCK_DUMMY
+ pthread_mutex_lock(&mutex);
+ {
+ pthread_cond_signal(&cond);
+ pthread_cond_wait(&cond, &mutex);
+ l= value();
+ }
+ while (l == WRITER_WAITING);
+ pthread_mutex_unlock(&mutex);
+ continue;
+#else
+ wake_one();
+#endif
+ }
+ else
+ for (auto spin= srv_n_spin_wait_rounds; spin; spin--)
+ {
+ ut_delay(srv_spin_wait_delay);
+ if (update_trylock(l))
+ return;
+ else if (l == WRITER_WAITING)
+ goto wake_writer;
+ }
+
+ wait(l);
+ }
+ while (!update_trylock(l));
+}
+
+/** Wait for a write lock after a failed write_trylock() or upgrade_trylock()
+@param holding_u whether we already hold u_lock() */
+void srw_lock_low::write_lock(bool holding_u)
{
for (;;)
{
@@ -133,6 +172,7 @@ void srw_lock_low::write_lock()
/* We are the first writer to be granted the lock. Spin for a while. */
for (auto spin= srv_n_spin_wait_rounds; spin; spin--)
{
+ l= holding_u ? WRITER_WAITING | UPDATER : WRITER_WAITING;
if (write_lock_wait_try(l))
return;
if (!(l & WRITER_WAITING))
@@ -140,13 +180,22 @@ void srw_lock_low::write_lock()
ut_delay(srv_spin_wait_delay);
}
+ l= holding_u ? WRITER_WAITING | UPDATER : WRITER_WAITING;
if (write_lock_wait_try(l))
return;
if (!(l & WRITER_WAITING))
{
- if (l == UNLOCKED && write_trylock())
- return;
+ switch (l) {
+ case UNLOCKED:
+ DBUG_ASSERT(!holding_u);
+ if (write_trylock())
+ return;
+ break;
+ case UPDATER:
+ if (holding_u && upgrade_trylock())
+ return;
+ }
l= write_lock_wait_start() | WRITER_WAITING;
}
else
@@ -157,5 +206,5 @@ void srw_lock_low::write_lock()
}
void srw_lock_low::rd_unlock() { if (read_unlock()) wake_one(); }
-
+void srw_lock_low::u_unlock() { if (update_unlock()) wake_one(); }
void srw_lock_low::wr_unlock() { write_unlock(); wake_all(); }