summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarko Mäkelä <marko.makela@mariadb.com>2021-01-20 10:12:04 +0200
committerMarko Mäkelä <marko.makela@mariadb.com>2021-01-20 10:12:04 +0200
commite334d4dee77df6c31a8f7f0046ba4be139c48dc8 (patch)
tree1923351a0d019189c65257940d52288f2e289a90
parentbec347b8be9e6327583599f5e519f63ea8fc68d1 (diff)
downloadmariadb-git-10.6-MDEV-20612-2-WIP.tar.gz
WIP: Partition lock_sys.latch10.6-MDEV-20612-2-WIP
FIXME: Fix DeadlockChecker. For now, we hard-wire innodb_deadlock_detect=OFF (which will break a few tests, which would fail with lock wait timeout instead of deadlock). TODO: Implement trx_locks_version and the optimization in lock_release()
-rw-r--r--mysql-test/suite/sys_vars/r/sysvars_innodb.result2
-rw-r--r--storage/innobase/handler/ha_innodb.cc2
-rw-r--r--storage/innobase/include/lock0lock.h88
-rw-r--r--storage/innobase/lock/lock0lock.cc122
4 files changed, 175 insertions, 39 deletions
diff --git a/mysql-test/suite/sys_vars/r/sysvars_innodb.result b/mysql-test/suite/sys_vars/r/sysvars_innodb.result
index f23970cd6e5..9f62dcbd908 100644
--- a/mysql-test/suite/sys_vars/r/sysvars_innodb.result
+++ b/mysql-test/suite/sys_vars/r/sysvars_innodb.result
@@ -407,7 +407,7 @@ READ_ONLY YES
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME INNODB_DEADLOCK_DETECT
SESSION_VALUE NULL
-DEFAULT_VALUE ON
+DEFAULT_VALUE OFF
VARIABLE_SCOPE GLOBAL
VARIABLE_TYPE BOOLEAN
VARIABLE_COMMENT Enable/disable InnoDB deadlock detector (default ON). if set to OFF, deadlock detection is skipped, and we rely on innodb_lock_wait_timeout in case of deadlock.
diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc
index 266845576f9..e9720310239 100644
--- a/storage/innobase/handler/ha_innodb.cc
+++ b/storage/innobase/handler/ha_innodb.cc
@@ -18584,7 +18584,7 @@ static MYSQL_SYSVAR_BOOL(deadlock_detect, innobase_deadlock_detect,
"Enable/disable InnoDB deadlock detector (default ON)."
" if set to OFF, deadlock detection is skipped,"
" and we rely on innodb_lock_wait_timeout in case of deadlock.",
- NULL, NULL, TRUE);
+ NULL, NULL, FALSE);
static MYSQL_SYSVAR_UINT(fill_factor, innobase_fill_factor,
PLUGIN_VAR_RQCMDARG,
diff --git a/storage/innobase/include/lock0lock.h b/storage/innobase/include/lock0lock.h
index 89530544bed..341f35d8d91 100644
--- a/storage/innobase/include/lock0lock.h
+++ b/storage/innobase/include/lock0lock.h
@@ -647,7 +647,7 @@ class lock_sys_t
bool m_initialised;
/** mutex proteting the locks */
- MY_ALIGNED(CACHE_LINE_SIZE) srw_lock latch;
+ MY_ALIGNED(CPU_LEVEL1_DCACHE_LINESIZE) srw_lock latch;
#ifdef UNIV_DEBUG
/** The owner of exclusive latch (0 if none); protected by latch */
std::atomic<os_thread_id_t> writer{0};
@@ -663,7 +663,7 @@ public:
hash_table_t prdt_page_hash;
/** mutex covering lock waits; @see trx_lock_t::wait_lock */
- MY_ALIGNED(CACHE_LINE_SIZE) mysql_mutex_t wait_mutex;
+ MY_ALIGNED(CPU_LEVEL1_DCACHE_LINESIZE) mysql_mutex_t wait_mutex;
private:
/** Pending number of lock waits; protected by wait_mutex */
ulint wait_pending;
@@ -673,6 +673,20 @@ private:
ulint wait_time;
/** Longest wait time; protected by wait_mutex */
ulint wait_time_max;
+
+ /** Number of page_latches and table_latches */
+ static constexpr size_t LATCHES= 256;
+
+ /** Protection of rec_hash, prdt_hash, prdt_page_hash with latch.rd_lock() */
+ MY_ALIGNED(CPU_LEVEL1_DCACHE_LINESIZE) srw_mutex page_latches[LATCHES];
+ /** Protection of table locks together with latch.rd_lock() */
+ MY_ALIGNED(CPU_LEVEL1_DCACHE_LINESIZE) srw_mutex table_latches[LATCHES];
+#ifdef UNIV_DEBUG
+ MY_ALIGNED(CPU_LEVEL1_DCACHE_LINESIZE)
+ Atomic_relaxed<os_thread_id_t> page_latch_owners[LATCHES];
+ MY_ALIGNED(CPU_LEVEL1_DCACHE_LINESIZE)
+ Atomic_relaxed<os_thread_id_t> table_latch_owners[LATCHES];
+#endif
public:
/**
Constructor.
@@ -734,20 +748,56 @@ public:
std::memory_order_relaxed));
return true;
}
+
+ /** Acquire a table lock mutex.
+ @return parameter to unlock_table_latch() that the caller must invoke */
+ inline unsigned lock_table_latch(table_id_t id);
+ /** Get a page lock mutex.
+ @return parameter to lock_page_latch() and unlock_page_latch() */
+ static unsigned get_page_latch(page_id_t id)
+ { return (id.space() + id.page_no()) % LATCHES; }
+ /** Acquire a page lock mutex. */
+ inline void lock_page_latch(unsigned shard);
+
+ /** Acquire a page lock mutex.
+ @return parameter to unlock_page_latch() that the caller must invoke */
+ unsigned lock_page_latch(page_id_t id);
+
+ /** Release a page latch mutex */
+ void unlock_page_latch(unsigned shard)
+ {
+ ut_ad(shard < LATCHES);
+ ut_ad(page_latch_owners[shard] == os_thread_get_curr_id());
+ page_latch_owners[shard]= 0;
+ page_latches[shard].wr_unlock();
+ }
+ /** Release a table latch mutex */
+ void unlock_table_latch(unsigned shard)
+ {
+ ut_ad(shard < LATCHES);
+ ut_ad(table_latch_owners[shard] == os_thread_get_curr_id());
+ table_latch_owners[shard]= 0;
+ table_latches[shard].wr_unlock();
+ }
+
+private:
+ /** @return whether the current thread is the lock_sys.latch writer */
+ bool is_writer() const
+ { return writer.load(std::memory_order_relaxed) == os_thread_get_curr_id(); }
+public:
/** Assert that wr_lock() has been invoked by this thread */
- void assert_locked() const
- { ut_ad(writer.load(std::memory_order_relaxed) == os_thread_get_curr_id()); }
+ void assert_locked() const { ut_ad(is_writer()); }
/** Assert that wr_lock() has not been invoked by this thread */
- void assert_unlocked() const
- { ut_ad(writer.load(std::memory_order_relaxed) != os_thread_get_curr_id()); }
- /** Assert that a page shard is exclusively latched by this thread */
- void assert_locked(const page_id_t) const { assert_locked(); }
+ void assert_unlocked() const { ut_ad(!is_writer()); }
#ifdef UNIV_DEBUG
+ /** Assert that a page shard is exclusively latched by this thread */
+ void assert_locked(const page_id_t id) const;
/** Assert that a lock shard is exclusively latched by this thread */
void assert_locked(const lock_t &lock) const;
/** Assert that a table lock shard is exclusively latched by this thread */
void assert_locked(const dict_table_t &table) const;
#else
+ void assert_locked(const page_id_t) const {}
void assert_locked(const lock_t &) const {}
void assert_locked(const dict_table_t &) const {}
#endif
@@ -839,26 +889,24 @@ struct LockMutexGuard
~LockMutexGuard() { lock_sys.wr_unlock(); }
};
-/** lock_sys.latch guard for a dict_table_t::id shard */
-struct LockTableGuard
-{
- LockTableGuard(const dict_table_t &) { lock_sys.wr_lock(SRW_LOCK_CALL); }
- ~LockTableGuard() { lock_sys.wr_unlock(); }
-};
-
/** lock_sys.latch guard for a page_id_t shard */
struct LockGuard
{
- LockGuard(const page_id_t) { lock_sys.wr_lock(SRW_LOCK_CALL); }
- ~LockGuard() { lock_sys.wr_unlock(); }
+ LockGuard(const page_id_t id);
+ ~LockGuard() { lock_sys.rd_unlock(); lock_sys.unlock_page_latch(shard); }
+private:
+ /** The shard */
+ unsigned shard;
};
/** lock_sys.latch guard for 2 page_id_t shards */
struct LockMultiGuard
{
- LockMultiGuard(const page_id_t, const page_id_t)
- { lock_sys.wr_lock(SRW_LOCK_CALL); }
- ~LockMultiGuard() { lock_sys.wr_unlock(); }
+ LockMultiGuard(const page_id_t id1, const page_id_t id2);
+ ~LockMultiGuard();
+private:
+ /** The shards */
+ unsigned shard1, shard2;
};
/*********************************************************************//**
diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc
index 385f2b05262..a8d21134d48 100644
--- a/storage/innobase/lock/lock0lock.cc
+++ b/storage/innobase/lock/lock0lock.cc
@@ -53,6 +53,98 @@ Created 5/7/1996 Heikki Tuuri
/** The value of innodb_deadlock_detect */
my_bool innobase_deadlock_detect;
+#ifdef UNIV_DEBUG
+/** Assert that a lock shard is exclusively latched by this thread */
+void lock_sys_t::assert_locked(const lock_t &lock) const
+{
+ if (is_writer())
+ return;
+ if (lock.is_table())
+ assert_locked(*lock.un_member.tab_lock.table);
+ else
+ assert_locked(lock.un_member.rec_lock.page_id);
+}
+
+/** Assert that a table lock shard is exclusively latched by this thread */
+void lock_sys_t::assert_locked(const dict_table_t &table) const
+{
+ ut_ad(!table.is_temporary());
+
+ const os_thread_id_t current_thread= os_thread_get_curr_id();
+ if (writer.load(std::memory_order_relaxed) == current_thread)
+ return;
+ ut_ad(readers);
+ ut_ad(current_thread == table_latch_owners[table.id % LATCHES]);
+}
+
+/** Assert that a page shard is exclusively latched by this thread */
+void lock_sys_t::assert_locked(const page_id_t id) const
+{
+ const os_thread_id_t current_thread= os_thread_get_curr_id();
+ if (writer.load(std::memory_order_relaxed) == current_thread)
+ return;
+ ut_ad(readers);
+ ut_ad(current_thread == page_latch_owners[get_page_latch(id)]);
+}
+#endif
+
+/** Acquire a table lock mutex.
+@return parameter to unlock_table_latch() that the caller must invoke */
+inline unsigned lock_sys_t::lock_table_latch(table_id_t id)
+{
+ unsigned shard= static_cast<unsigned>(id % LATCHES);
+ ut_ad(readers);
+ table_latches[shard].wr_lock();
+ ut_ad(!table_latch_owners[shard]);
+ ut_d(table_latch_owners[shard]= os_thread_get_curr_id());
+ return shard;
+}
+
+/** Acquire a page lock mutex. */
+inline void lock_sys_t::lock_page_latch(unsigned shard)
+{
+ ut_ad(readers);
+ page_latches[shard].wr_lock();
+ ut_ad(!page_latch_owners[shard]);
+ ut_d(page_latch_owners[shard]= os_thread_get_curr_id());
+}
+
+/** Acquire a page lock mutex.
+@return parameter to unlock_page_latch() that the caller must invoke */
+inline unsigned lock_sys_t::lock_page_latch(page_id_t id)
+{
+ unsigned shard= get_page_latch(id);
+ lock_page_latch(shard);
+ return shard;
+}
+
+LockGuard::LockGuard(page_id_t id)
+{
+ lock_sys.rd_lock(SRW_LOCK_CALL);
+ shard= lock_sys.lock_page_latch(id);
+}
+
+LockMultiGuard::LockMultiGuard(const page_id_t id1, const page_id_t id2)
+{
+ ut_ad(id1.space() == id2.space());
+ shard1= lock_sys.get_page_latch(id1);
+ shard2= lock_sys.get_page_latch(id2);
+ if (shard1 > shard2)
+ std::swap(shard1, shard2);
+ lock_sys.rd_lock(SRW_LOCK_CALL);
+ lock_sys.lock_page_latch(shard1);
+ if (shard1 != shard2)
+ lock_sys.lock_page_latch(shard2);
+}
+
+LockMultiGuard::~LockMultiGuard()
+{
+ lock_sys.rd_unlock();
+ lock_sys.unlock_page_latch(shard1);
+ if (shard1 != shard2)
+ lock_sys.unlock_page_latch(shard2);
+}
+
extern "C" void thd_rpl_deadlock_check(MYSQL_THD thd, MYSQL_THD other_thd);
extern "C" int thd_need_wait_reports(const MYSQL_THD thd);
extern "C" int thd_need_ordering_with(const MYSQL_THD thd, const MYSQL_THD other_thd);
@@ -429,6 +521,10 @@ void lock_sys_t::create(ulint n_cells)
m_initialised= true;
latch.SRW_LOCK_INIT(lock_latch_key);
+ for (size_t i= 0; i < LATCHES; i++)
+ page_latches[i].init();
+ for (size_t i= 0; i < LATCHES; i++)
+ table_latches[i].init();
mysql_mutex_init(lock_wait_mutex_key, &wait_mutex, nullptr);
rec_hash.create(n_cells);
@@ -442,7 +538,6 @@ void lock_sys_t::create(ulint n_cells)
}
}
-
#ifdef UNIV_PFS_RWLOCK
/** Acquire exclusive lock_sys.latch */
void lock_sys_t::wr_lock(const char *file, unsigned line)
@@ -534,6 +629,10 @@ void lock_sys_t::close()
prdt_page_hash.free();
latch.destroy();
+ for (size_t i= 0; i < LATCHES; i++)
+ page_latches[i].destroy();
+ for (size_t i= 0; i < LATCHES; i++)
+ table_latches[i].destroy();
mysql_mutex_destroy(&wait_mutex);
m_initialised= false;
@@ -1151,20 +1250,6 @@ wsrep_print_wait_locks(
}
#endif /* WITH_WSREP */
-#ifdef UNIV_DEBUG
-/** Assert that a lock shard is exclusively latched by this thread */
-void lock_sys_t::assert_locked(const lock_t &) const
-{
- assert_locked();
-}
-
-/** Assert that a table lock shard is exclusively latched by this thread */
-void lock_sys_t::assert_locked(const dict_table_t &) const
-{
- assert_locked();
-}
-#endif
-
/** Reset the wait status of a lock.
@param[in,out] lock lock that was possibly being waited for */
static void lock_reset_lock_and_trx_wait(lock_t *lock)
@@ -3454,7 +3539,8 @@ lock_table(
trx_set_rw_mode(trx);
}
- LockTableGuard g{*table};
+ lock_sys.rd_lock(SRW_LOCK_CALL);
+ const unsigned shard = lock_sys.lock_table_latch(table->id);
/* We have to check if the new lock is compatible with any locks
other transactions have in the table lock queue. */
@@ -3482,7 +3568,9 @@ lock_table(
err = DB_SUCCESS;
}
+ lock_sys.rd_unlock();
trx->mutex.wr_unlock();
+ lock_sys.unlock_table_latch(shard);
return(err);
}
@@ -6086,7 +6174,6 @@ or there is no deadlock (any more) */
const trx_t*
DeadlockChecker::check_and_resolve(const lock_t* lock, trx_t* trx)
{
- lock_sys.assert_locked();
check_trx_state(trx);
ut_ad(!srv_read_only_mode);
@@ -6094,6 +6181,7 @@ DeadlockChecker::check_and_resolve(const lock_t* lock, trx_t* trx)
return(NULL);
}
+ lock_sys.assert_locked();
/* Release the mutex to obey the latching order.
This is safe, because DeadlockChecker::check_and_resolve()
is invoked when a lock wait is enqueued for the currently