summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarko Mäkelä <marko.makela@mariadb.com>2021-02-05 18:36:46 +0200
committerMarko Mäkelä <marko.makela@mariadb.com>2021-02-05 18:36:46 +0200
commit82179f5e83dfe671d2b2d25ed313a1c48c4e31cb (patch)
treea13d9c8c5fce82b80a85bee214d0feb232554f48
parentc7620328cc66d9d6a721d9066f470ac06affed4d (diff)
downloadmariadb-git-82179f5e83dfe671d2b2d25ed313a1c48c4e31cb.tar.gz
MDEV-20612 preparation: LockMutexGuard, LockGuard
Let us use the RAII wrapper LockMutexGuard for most operations where lock_sys.mutex is acquired. Let us use the RAII wrapper LockGuard for operations that are related to a single buffer pool page.
-rw-r--r--storage/innobase/btr/btr0btr.cc6
-rw-r--r--storage/innobase/btr/btr0cur.cc11
-rw-r--r--storage/innobase/buf/buf0buf.cc3
-rw-r--r--storage/innobase/gis/gis0sea.cc28
-rw-r--r--storage/innobase/handler/ha_innodb.cc31
-rw-r--r--storage/innobase/ibuf/ibuf0ibuf.cc6
-rw-r--r--storage/innobase/include/lock0lock.h16
-rw-r--r--storage/innobase/include/lock0priv.h2
-rw-r--r--storage/innobase/include/lock0priv.ic6
-rw-r--r--storage/innobase/include/trx0trx.h2
-rw-r--r--storage/innobase/lock/lock0lock.cc526
-rw-r--r--storage/innobase/lock/lock0prdt.cc194
-rw-r--r--storage/innobase/row/row0ins.cc11
-rw-r--r--storage/innobase/row/row0mysql.cc7
-rw-r--r--storage/innobase/trx/trx0i_s.cc5
-rw-r--r--storage/innobase/trx/trx0trx.cc31
16 files changed, 364 insertions, 521 deletions
diff --git a/storage/innobase/btr/btr0btr.cc b/storage/innobase/btr/btr0btr.cc
index b95e41374a2..22da9ab63d5 100644
--- a/storage/innobase/btr/btr0btr.cc
+++ b/storage/innobase/btr/btr0btr.cc
@@ -3355,7 +3355,7 @@ btr_lift_page_up(
const page_id_t id{block->page.id()};
/* Free predicate page locks on the block */
if (index->is_spatial()) {
- LockMutexGuard g;
+ LockGuard g{id};
lock_prdt_page_free_from_discard(
id, &lock_sys.prdt_page_hash);
}
@@ -3609,7 +3609,7 @@ retry:
}
/* No GAP lock needs to be worrying about */
- LockMutexGuard g;
+ LockGuard g{id};
lock_prdt_page_free_from_discard(
id, &lock_sys.prdt_page_hash);
lock_rec_free_all_from_discard_page(id);
@@ -3762,7 +3762,7 @@ retry:
merge_page, mtr);
}
const page_id_t id{block->page.id()};
- LockMutexGuard g;
+ LockGuard g{id};
lock_prdt_page_free_from_discard(
id, &lock_sys.prdt_page_hash);
lock_rec_free_all_from_discard_page(id);
diff --git a/storage/innobase/btr/btr0cur.cc b/storage/innobase/btr/btr0cur.cc
index c127d326c69..2b7bdd6176e 100644
--- a/storage/innobase/btr/btr0cur.cc
+++ b/storage/innobase/btr/btr0cur.cc
@@ -1998,11 +1998,12 @@ retry_page_get:
trx_t* trx = thr_get_trx(cursor->thr);
lock_prdt_t prdt;
- lock_sys.mutex_lock();
- lock_init_prdt_from_mbr(
- &prdt, &cursor->rtr_info->mbr, mode,
- trx->lock.lock_heap);
- lock_sys.mutex_unlock();
+ {
+ LockMutexGuard g;
+ lock_init_prdt_from_mbr(
+ &prdt, &cursor->rtr_info->mbr, mode,
+ trx->lock.lock_heap);
+ }
if (rw_latch == RW_NO_LATCH && height != 0) {
block->lock.s_lock();
diff --git a/storage/innobase/buf/buf0buf.cc b/storage/innobase/buf/buf0buf.cc
index fc9816d02a1..df01c675a9e 100644
--- a/storage/innobase/buf/buf0buf.cc
+++ b/storage/innobase/buf/buf0buf.cc
@@ -2038,9 +2038,8 @@ withdraw_retry:
{found, withdraw_started, my_hrtime_coarse()};
withdraw_started = current_time;
- lock_sys.mutex_lock();
+ LockMutexGuard g;
trx_sys.trx_list.for_each(f);
- lock_sys.mutex_unlock();
}
if (should_retry_withdraw) {
diff --git a/storage/innobase/gis/gis0sea.cc b/storage/innobase/gis/gis0sea.cc
index 1f08b224e1f..cadf30221fa 100644
--- a/storage/innobase/gis/gis0sea.cc
+++ b/storage/innobase/gis/gis0sea.cc
@@ -386,11 +386,12 @@ rtr_pcur_getnext_from_path(
trx_t* trx = thr_get_trx(
btr_cur->rtr_info->thr);
- lock_sys.mutex_lock();
- lock_init_prdt_from_mbr(
- &prdt, &btr_cur->rtr_info->mbr,
- mode, trx->lock.lock_heap);
- lock_sys.mutex_unlock();
+ {
+ LockMutexGuard g;
+ lock_init_prdt_from_mbr(
+ &prdt, &btr_cur->rtr_info->mbr,
+ mode, trx->lock.lock_heap);
+ }
if (rw_latch == RW_NO_LATCH) {
block->lock.s_lock();
@@ -1182,24 +1183,21 @@ rtr_check_discard_page(
}
mysql_mutex_unlock(&rtr_info->rtr_path_mutex);
- if (rtr_info->matches) {
- mysql_mutex_lock(&rtr_info->matches->rtr_match_mutex);
+ if (auto matches = rtr_info->matches) {
+ mysql_mutex_lock(&matches->rtr_match_mutex);
- if ((&rtr_info->matches->block)->page.id() == id) {
- if (!rtr_info->matches->matched_recs->empty()) {
- rtr_info->matches->matched_recs->clear();
- }
- ut_ad(rtr_info->matches->matched_recs->empty());
- rtr_info->matches->valid = false;
+ if (matches->block.page.id() == id) {
+ matches->matched_recs->clear();
+ matches->valid = false;
}
- mysql_mutex_unlock(&rtr_info->matches->rtr_match_mutex);
+ mysql_mutex_unlock(&matches->rtr_match_mutex);
}
}
mysql_mutex_unlock(&index->rtr_track->rtr_active_mutex);
- LockMutexGuard g;
+ LockGuard g{id};
lock_prdt_page_free_from_discard(id, &lock_sys.prdt_hash);
lock_prdt_page_free_from_discard(id, &lock_sys.prdt_page_hash);
}
diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc
index e47b4699893..0ba81e23145 100644
--- a/storage/innobase/handler/ha_innodb.cc
+++ b/storage/innobase/handler/ha_innodb.cc
@@ -2692,17 +2692,18 @@ static bool innobase_query_caching_table_check_low(
For read-only transaction: should satisfy (1) and (3)
For read-write transaction: should satisfy (1), (2), (3) */
- if (lock_table_get_n_locks(table)) {
+ if (trx->id && trx->id < table->query_cache_inv_trx_id) {
return false;
}
- if (trx->id && trx->id < table->query_cache_inv_trx_id) {
+ if (trx->read_view.is_open()
+ && trx->read_view.low_limit_id()
+ < table->query_cache_inv_trx_id) {
return false;
}
- return !trx->read_view.is_open()
- || trx->read_view.low_limit_id()
- >= table->query_cache_inv_trx_id;
+ LockMutexGuard g;
+ return UT_LIST_GET_LEN(table->locks) == 0;
}
/** Checks if MySQL at the moment is allowed for this table to retrieve a
@@ -4474,16 +4475,17 @@ static void innobase_kill_query(handlerton*, THD *thd, enum thd_kill_levels)
#endif /* WITH_WSREP */
if (trx->lock.wait_lock)
{
- lock_sys.mutex_lock();
- mysql_mutex_lock(&lock_sys.wait_mutex);
- if (lock_t *lock= trx->lock.wait_lock)
{
- trx->mutex_lock();
- trx->error_state= DB_INTERRUPTED;
- lock_cancel_waiting_and_release(lock);
- trx->mutex_unlock();
+ LockMutexGuard g;
+ mysql_mutex_lock(&lock_sys.wait_mutex);
+ if (lock_t *lock= trx->lock.wait_lock)
+ {
+ trx->mutex_lock();
+ trx->error_state= DB_INTERRUPTED;
+ lock_cancel_waiting_and_release(lock);
+ trx->mutex_unlock();
+ }
}
- lock_sys.mutex_unlock();
mysql_mutex_unlock(&lock_sys.wait_mutex);
}
}
@@ -18110,11 +18112,10 @@ wsrep_abort_transaction(
wsrep_thd_transaction_state_str(victim_thd));
if (victim_trx) {
- lock_sys.mutex_lock();
+ LockMutexGuard g;
victim_trx->mutex_lock();
int rcode= wsrep_innobase_kill_one_trx(bf_thd,
victim_trx, signal);
- lock_sys.mutex_unlock();
victim_trx->mutex_unlock();
DBUG_RETURN(rcode);
} else {
diff --git a/storage/innobase/ibuf/ibuf0ibuf.cc b/storage/innobase/ibuf/ibuf0ibuf.cc
index 97269fe90db..c3fbc2696f1 100644
--- a/storage/innobase/ibuf/ibuf0ibuf.cc
+++ b/storage/innobase/ibuf/ibuf0ibuf.cc
@@ -3280,10 +3280,8 @@ commit_exit:
ibuf_mtr_commit(&bitmap_mtr);
goto fail_exit;
} else {
- lock_sys.mutex_lock();
- const auto lock_exists = lock_sys.get_first(page_id);
- lock_sys.mutex_unlock();
- if (lock_exists) {
+ LockGuard g{page_id};
+ if (lock_sys.get_first(page_id)) {
goto commit_exit;
}
}
diff --git a/storage/innobase/include/lock0lock.h b/storage/innobase/include/lock0lock.h
index 880eaf5fc96..17ca53dcda0 100644
--- a/storage/innobase/include/lock0lock.h
+++ b/storage/innobase/include/lock0lock.h
@@ -539,13 +539,6 @@ dberr_t
lock_trx_handle_wait(
/*=================*/
trx_t* trx); /*!< in/out: trx lock state */
-/*********************************************************************//**
-Get the number of locks on a table.
-@return number of locks */
-ulint
-lock_table_get_n_locks(
-/*===================*/
- const dict_table_t* table); /*!< in: table */
/*******************************************************************//**
Initialise the trx lock list. */
void
@@ -649,6 +642,8 @@ public:
void mutex_assert_locked() const { mysql_mutex_assert_owner(&mutex); }
/** Assert that mutex_lock() has not been invoked */
void mutex_assert_unlocked() const { mysql_mutex_assert_not_owner(&mutex); }
+ /** Assert that a page shared is exclusively latched */
+ void assert_locked(const page_id_t) { mutex_assert_locked(); }
/** Wait for a lock to be granted */
void wait_lock(lock_t **lock, mysql_cond_t *cond)
@@ -740,6 +735,13 @@ struct LockMutexGuard
~LockMutexGuard() { lock_sys.mutex_unlock(); }
};
+/** lock_sys.mutex guard for a page_id_t shard */
+struct LockGuard
+{
+ LockGuard(const page_id_t) { lock_sys.mutex_lock(); }
+ ~LockGuard() { lock_sys.mutex_unlock(); }
+};
+
/*********************************************************************//**
Creates a new record lock and inserts it to the lock queue. Does NOT check
for deadlocks or lock compatibility!
diff --git a/storage/innobase/include/lock0priv.h b/storage/innobase/include/lock0priv.h
index d9dbbf5dbc4..5b800deda91 100644
--- a/storage/innobase/include/lock0priv.h
+++ b/storage/innobase/include/lock0priv.h
@@ -481,7 +481,7 @@ lock_rec_set_nth_bit(
inline byte lock_rec_reset_nth_bit(lock_t* lock, ulint i)
{
ut_ad(!lock->is_table());
- lock_sys.mutex_assert_locked();
+ lock_sys.assert_locked(lock->un_member.rec_lock.page_id);
ut_ad(i < lock->un_member.rec_lock.n_bits);
byte* b = reinterpret_cast<byte*>(&lock[1]) + (i >> 3);
diff --git a/storage/innobase/include/lock0priv.ic b/storage/innobase/include/lock0priv.ic
index 65d0cd04610..30557c72af0 100644
--- a/storage/innobase/include/lock0priv.ic
+++ b/storage/innobase/include/lock0priv.ic
@@ -78,7 +78,7 @@ lock_rec_set_nth_bit(
ulint bit_index;
ut_ad(!lock->is_table());
- lock_sys.mutex_assert_locked();
+ lock_sys.assert_locked(lock->un_member.rec_lock.page_id);
ut_ad(i < lock->un_member.rec_lock.n_bits);
byte_index = i / 8;
@@ -117,7 +117,7 @@ lock_rec_get_next(
ulint heap_no,/*!< in: heap number of the record */
lock_t* lock) /*!< in: lock */
{
- lock_sys.mutex_assert_locked();
+ lock_sys.assert_locked(lock->un_member.rec_lock.page_id);
do {
lock = lock_rec_get_next_on_page(lock);
@@ -175,7 +175,7 @@ lock_rec_get_next_on_page_const(
ut_ad(!lock->is_table());
const page_id_t page_id{lock->un_member.rec_lock.page_id};
- lock_sys.mutex_assert_locked();
+ lock_sys.assert_locked(page_id);
while (!!(lock= static_cast<const lock_t*>(HASH_GET_NEXT(hash, lock))))
if (lock->un_member.rec_lock.page_id == page_id)
diff --git a/storage/innobase/include/trx0trx.h b/storage/innobase/include/trx0trx.h
index a5c0617cc62..9ab7a37d636 100644
--- a/storage/innobase/include/trx0trx.h
+++ b/storage/innobase/include/trx0trx.h
@@ -485,7 +485,7 @@ struct trx_lock_t
/** List of pending trx_t::evict_table() */
UT_LIST_BASE_NODE_T(dict_table_t) evicted_tables;
- /** number of record locks; writes are protected by lock_sys.mutex */
+ /** number of record locks; writers use LockGuard or LockMutexGuard */
ulint n_rec_locks;
};
diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc
index d0a957a7ff0..e0501ffbdbd 100644
--- a/storage/innobase/lock/lock0lock.cc
+++ b/storage/innobase/lock/lock0lock.cc
@@ -263,13 +263,8 @@ ib_uint64_t DeadlockChecker::s_lock_mark_counter = 0;
DeadlockChecker::state_t DeadlockChecker::s_states[4096];
#ifdef UNIV_DEBUG
-/*********************************************************************//**
-Validates the lock system.
-@return TRUE if ok */
-static
-bool
-lock_validate();
-/*============*/
+/** Validate the transactional locks. */
+static void lock_validate();
/** Validate the record lock queues on a page.
@param block buffer pool block
@@ -390,7 +385,7 @@ void lock_sys_t::resize(ulint n_cells)
{
ut_ad(this == &lock_sys);
- mutex_lock();
+ LockMutexGuard g;
hash_table_t old_hash(rec_hash);
rec_hash.create(n_cells);
@@ -409,7 +404,6 @@ void lock_sys_t::resize(ulint n_cells)
HASH_MIGRATE(&old_hash, &prdt_page_hash, lock_t, hash,
lock_rec_lock_fold);
old_hash.free();
- mutex_unlock();
}
@@ -832,8 +826,6 @@ lock_rec_other_has_expl_req(
requests by all transactions
are taken into account */
{
-
- lock_sys.mutex_assert_locked();
ut_ad(mode == LOCK_X || mode == LOCK_S);
/* Only GAP lock can be on SUPREMUM, and we are not looking for
@@ -934,13 +926,11 @@ lock_rec_other_has_conflicting(
ulint heap_no,/*!< in: heap number of the record */
const trx_t* trx) /*!< in: our transaction */
{
- lock_t* lock;
-
- lock_sys.mutex_assert_locked();
+ lock_sys.assert_locked(id);
bool is_supremum = (heap_no == PAGE_HEAP_NO_SUPREMUM);
- for (lock = lock_rec_get_first(&lock_sys.rec_hash, id, heap_no);
+ for (lock_t*lock = lock_rec_get_first(&lock_sys.rec_hash, id, heap_no);
lock != NULL;
lock = lock_rec_get_next(heap_no, lock)) {
@@ -1551,7 +1541,7 @@ lock_rec_lock(
MONITOR_ATOMIC_INC(MONITOR_NUM_RECLOCK_REQ);
const page_id_t id{block->page.id()};
- LockMutexGuard g;
+ LockGuard g{id};
if (lock_t *lock= lock_sys.get_first(id))
{
@@ -1889,9 +1879,6 @@ lock_rec_cancel(
/*============*/
lock_t* lock) /*!< in: waiting record lock request */
{
- ut_ad(!lock->is_table());
- lock_sys.mutex_assert_locked();
-
/* Reset the bit (there can be only one set bit) in the lock bitmap */
lock_rec_reset_nth_bit(lock, lock_rec_find_set_bit(lock));
@@ -2086,7 +2073,8 @@ lock_rec_inherit_to_gap(
ulint heap_no) /*!< in: heap_no of the
donating record */
{
- lock_sys.mutex_assert_locked();
+ lock_sys.assert_locked(id);
+ lock_sys.assert_locked(heir);
/* At READ UNCOMMITTED or READ COMMITTED isolation level,
we do not want locks set
@@ -2127,7 +2115,7 @@ lock_rec_inherit_to_gap_if_gap_lock(
on this record */
{
const page_id_t id{block->page.id()};
- LockMutexGuard g;
+ LockGuard g{id};
for (lock_t *lock= lock_rec_get_first(&lock_sys.rec_hash, id, heap_no);
lock; lock= lock_rec_get_next(heap_no, lock))
@@ -2259,124 +2247,122 @@ lock_move_reorganize_page(
const buf_block_t* oblock) /*!< in: copy of the old, not
reorganized page */
{
- lock_t* lock;
- UT_LIST_BASE_NODE_T(lock_t) old_locks;
- mem_heap_t* heap = NULL;
- ulint comp;
- const page_id_t id{block->page.id()};
+ mem_heap_t *heap;
- lock_sys.mutex_lock();
-
- /* FIXME: This needs to deal with predicate lock too */
- lock = lock_sys.get_first(id);
+ {
+ UT_LIST_BASE_NODE_T(lock_t) old_locks;
+ UT_LIST_INIT(old_locks, &lock_t::trx_locks);
- if (lock == NULL) {
- lock_sys.mutex_unlock();
+ const page_id_t id{block->page.id()};
+ LockGuard g{id};
- return;
- }
+ /* FIXME: This needs to deal with predicate lock too */
+ lock_t *lock= lock_sys.get_first(id);
- heap = mem_heap_create(256);
+ if (!lock)
+ return;
- /* Copy first all the locks on the page to heap and reset the
- bitmaps in the original locks; chain the copies of the locks
- using the trx_locks field in them. */
+ heap= mem_heap_create(256);
- UT_LIST_INIT(old_locks, &lock_t::trx_locks);
+ /* Copy first all the locks on the page to heap and reset the
+ bitmaps in the original locks; chain the copies of the locks
+ using the trx_locks field in them. */
- do {
- /* Make a copy of the lock */
- lock_t* old_lock = lock_rec_copy(lock, heap);
+ do
+ {
+ /* Make a copy of the lock */
+ lock_t *old_lock= lock_rec_copy(lock, heap);
- UT_LIST_ADD_LAST(old_locks, old_lock);
+ UT_LIST_ADD_LAST(old_locks, old_lock);
- /* Reset bitmap of lock */
- lock_rec_bitmap_reset(lock);
+ /* Reset bitmap of lock */
+ lock_rec_bitmap_reset(lock);
- if (lock->is_waiting()) {
- ut_ad(lock->trx->lock.wait_lock == lock);
- lock->type_mode &= ~LOCK_WAIT;
- }
+ if (lock->is_waiting())
+ {
+ ut_ad(lock->trx->lock.wait_lock == lock);
+ lock->type_mode&= ~LOCK_WAIT;
+ }
- lock = lock_rec_get_next_on_page(lock);
- } while (lock != NULL);
+ lock= lock_rec_get_next_on_page(lock);
+ }
+ while (lock);
- comp = page_is_comp(block->frame);
- ut_ad(comp == page_is_comp(oblock->frame));
+ const ulint comp= page_is_comp(block->frame);
+ ut_ad(comp == page_is_comp(oblock->frame));
- lock_move_granted_locks_to_front(old_locks);
+ lock_move_granted_locks_to_front(old_locks);
- DBUG_EXECUTE_IF("do_lock_reverse_page_reorganize",
- ut_list_reverse(old_locks););
+ DBUG_EXECUTE_IF("do_lock_reverse_page_reorganize",
+ ut_list_reverse(old_locks););
- for (lock = UT_LIST_GET_FIRST(old_locks); lock;
- lock = UT_LIST_GET_NEXT(trx_locks, lock)) {
+ for (lock= UT_LIST_GET_FIRST(old_locks); lock;
+ lock= UT_LIST_GET_NEXT(trx_locks, lock))
+ {
+ /* NOTE: we copy also the locks set on the infimum and
+ supremum of the page; the infimum may carry locks if an
+ update of a record is occurring on the page, and its locks
+ were temporarily stored on the infimum */
+ const rec_t *rec1= page_get_infimum_rec(block->frame);
+ const rec_t *rec2= page_get_infimum_rec(oblock->frame);
+
+ /* Set locks according to old locks */
+ for (;;)
+ {
+ ulint old_heap_no;
+ ulint new_heap_no;
+ ut_d(const rec_t* const orec= rec1);
+ ut_ad(page_rec_is_metadata(rec1) == page_rec_is_metadata(rec2));
- /* NOTE: we copy also the locks set on the infimum and
- supremum of the page; the infimum may carry locks if an
- update of a record is occurring on the page, and its locks
- were temporarily stored on the infimum */
- const rec_t* rec1 = page_get_infimum_rec(
- buf_block_get_frame(block));
- const rec_t* rec2 = page_get_infimum_rec(
- buf_block_get_frame(oblock));
-
- /* Set locks according to old locks */
- for (;;) {
- ulint old_heap_no;
- ulint new_heap_no;
- ut_d(const rec_t* const orec = rec1);
- ut_ad(page_rec_is_metadata(rec1)
- == page_rec_is_metadata(rec2));
-
- if (comp) {
- old_heap_no = rec_get_heap_no_new(rec2);
- new_heap_no = rec_get_heap_no_new(rec1);
-
- rec1 = page_rec_get_next_low(rec1, TRUE);
- rec2 = page_rec_get_next_low(rec2, TRUE);
- } else {
- old_heap_no = rec_get_heap_no_old(rec2);
- new_heap_no = rec_get_heap_no_old(rec1);
- ut_ad(!memcmp(rec1, rec2,
- rec_get_data_size_old(rec2)));
-
- rec1 = page_rec_get_next_low(rec1, FALSE);
- rec2 = page_rec_get_next_low(rec2, FALSE);
- }
+ if (comp)
+ {
+ old_heap_no= rec_get_heap_no_new(rec2);
+ new_heap_no= rec_get_heap_no_new(rec1);
- /* Clear the bit in old_lock. */
- if (old_heap_no < lock->un_member.rec_lock.n_bits
- && lock_rec_reset_nth_bit(lock, old_heap_no)) {
- ut_ad(!page_rec_is_metadata(orec));
+ rec1= page_rec_get_next_low(rec1, TRUE);
+ rec2= page_rec_get_next_low(rec2, TRUE);
+ }
+ else
+ {
+ old_heap_no= rec_get_heap_no_old(rec2);
+ new_heap_no= rec_get_heap_no_old(rec1);
+ ut_ad(!memcmp(rec1, rec2, rec_get_data_size_old(rec2)));
- /* NOTE that the old lock bitmap could be too
- small for the new heap number! */
+ rec1= page_rec_get_next_low(rec1, FALSE);
+ rec2= page_rec_get_next_low(rec2, FALSE);
+ }
- lock_rec_add_to_queue(
- lock->type_mode, id, block->frame,
- new_heap_no,
- lock->index, lock->trx, FALSE);
- }
+ /* Clear the bit in old_lock. */
+ if (old_heap_no < lock->un_member.rec_lock.n_bits &&
+ lock_rec_reset_nth_bit(lock, old_heap_no))
+ {
+ ut_ad(!page_rec_is_metadata(orec));
- if (new_heap_no == PAGE_HEAP_NO_SUPREMUM) {
- ut_ad(old_heap_no == PAGE_HEAP_NO_SUPREMUM);
- break;
- }
- }
+ /* NOTE that the old lock bitmap could be too
+ small for the new heap number! */
+ lock_rec_add_to_queue(lock->type_mode, id, block->frame, new_heap_no,
+ lock->index, lock->trx, FALSE);
+ }
- ut_ad(lock_rec_find_set_bit(lock) == ULINT_UNDEFINED);
- }
+ if (new_heap_no == PAGE_HEAP_NO_SUPREMUM)
+ {
+ ut_ad(old_heap_no == PAGE_HEAP_NO_SUPREMUM);
+ break;
+ }
+ }
- lock_sys.mutex_unlock();
+ ut_ad(lock_rec_find_set_bit(lock) == ULINT_UNDEFINED);
+ }
+ }
- mem_heap_free(heap);
+ mem_heap_free(heap);
#ifdef UNIV_DEBUG_LOCK_VALIDATE
- if (fil_space_t* space = fil_space_t::get(page_id.space())) {
- ut_ad(lock_rec_validate_page(block, space->is_latched()));
- space->release();
- }
+ if (fil_space_t *space= fil_space_t::get(id.space()))
+ {
+ ut_ad(lock_rec_validate_page(block, space->is_latched()));
+ space->release();
+ }
#endif
}
@@ -2864,8 +2850,7 @@ lock_update_discard(
ulint heap_no;
const page_id_t heir(heir_block->page.id());
const page_id_t page_id(block->page.id());
-
- LockMutexGuard g;
+ LockMutexGuard g;
if (lock_sys.get_first(page_id)) {
ut_ad(!lock_sys.get_first_prdt(page_id));
@@ -3443,7 +3428,7 @@ lock_table(
err = DB_SUCCESS;
- lock_sys.mutex_lock();
+ LockMutexGuard g;
/* We have to check if the new lock is compatible with any locks
other transactions have in the table lock queue. */
@@ -3463,8 +3448,6 @@ lock_table(
lock_table_create(table, mode, trx);
}
- lock_sys.mutex_unlock();
-
trx->mutex_unlock();
return(err);
@@ -3681,7 +3664,7 @@ lock_rec_unlock(
heap_no = page_rec_get_heap_no(rec);
- lock_sys.mutex_lock();
+ LockGuard g{id};
first_lock = lock_rec_get_first(&lock_sys.rec_hash, id, heap_no);
@@ -3695,8 +3678,6 @@ lock_rec_unlock(
}
}
- lock_sys.mutex_unlock();
-
{
ib::error err;
err << "Unlock row could not find a " << lock_mode
@@ -3734,8 +3715,6 @@ released:
#endif /* WITH_WSREP */
}
}
-
- lock_sys.mutex_unlock();
}
#ifdef UNIV_DEBUG
@@ -4190,14 +4169,12 @@ lock_print_info_all_transactions(
/*=============================*/
FILE* file) /*!< in/out: file where to print */
{
- lock_sys.mutex_assert_locked();
-
fprintf(file, "LIST OF TRANSACTIONS FOR EACH SESSION:\n");
trx_sys.trx_list.for_each(lock_print_info(file, my_hrtime_coarse()));
lock_sys.mutex_unlock();
- ut_ad(lock_validate());
+ ut_d(lock_validate());
}
#ifdef UNIV_DEBUG
@@ -4464,7 +4441,7 @@ static bool lock_rec_validate_page(const buf_block_t *block, bool latched)
const page_id_t id{block->page.id()};
- LockMutexGuard g;
+ LockGuard g{id};
loop:
lock = lock_sys.get_first(id);
@@ -4622,44 +4599,30 @@ static my_bool lock_validate_table_locks(rw_trx_hash_element_t *element, void*)
}
-/*********************************************************************//**
-Validates the lock system.
-@return TRUE if ok */
-static
-bool
-lock_validate()
-/*===========*/
+/** Validate the transactional locks. */
+static void lock_validate()
{
- std::set<page_id_t> pages;
-
- lock_sys.mutex_lock();
-
- /* Validate table locks */
- trx_sys.rw_trx_hash.iterate(lock_validate_table_locks);
-
- /* Iterate over all the record locks and validate the locks. We
- don't want to hog the lock_sys_t::mutex. Release it during the
- validation check. */
-
- for (ulint i = 0; i < lock_sys.rec_hash.n_cells; i++) {
- page_id_t limit(0, 0);
-
- while (const lock_t* lock = lock_rec_validate(i, &limit)) {
- if (lock_rec_find_set_bit(lock) == ULINT_UNDEFINED) {
- /* The lock bitmap is empty; ignore it. */
- continue;
- }
- pages.insert(lock->un_member.rec_lock.page_id);
- }
- }
-
- lock_sys.mutex_unlock();
+ std::set<page_id_t> pages;
+ {
+ LockMutexGuard g;
+ /* Validate table locks */
+ trx_sys.rw_trx_hash.iterate(lock_validate_table_locks);
- for (page_id_t page_id : pages) {
- lock_rec_block_validate(page_id);
- }
+ for (ulint i= 0; i < lock_sys.rec_hash.n_cells; i++)
+ {
+ page_id_t limit{0, 0};
+ while (const lock_t *lock= lock_rec_validate(i, &limit))
+ {
+ if (lock_rec_find_set_bit(lock) == ULINT_UNDEFINED)
+ /* The lock bitmap is empty; ignore it. */
+ continue;
+ pages.insert(lock->un_member.rec_lock.page_id);
+ }
+ }
+ }
- return(true);
+ for (page_id_t page_id : pages)
+ lock_rec_block_validate(page_id);
}
#endif /* UNIV_DEBUG */
/*============ RECORD LOCK CHECKS FOR ROW OPERATIONS ====================*/
@@ -4684,129 +4647,97 @@ lock_rec_insert_check_and_lock(
LOCK_GAP type locks from the successor
record */
{
- ut_ad(block->frame == page_align(rec));
- ut_ad(mtr->is_named_space(index->table->space));
- ut_ad(page_rec_is_leaf(rec));
-
- ut_ad(!index->table->is_temporary());
- ut_ad(page_is_leaf(block->frame));
-
- dberr_t err;
- lock_t* lock;
- bool inherit_in = *inherit;
- trx_t* trx = thr_get_trx(thr);
- const rec_t* next_rec = page_rec_get_next_const(rec);
- ulint heap_no = page_rec_get_heap_no(next_rec);
- ut_ad(!rec_is_metadata(next_rec, *index));
-
- const page_id_t id{block->page.id()};
-
- lock_sys.mutex_lock();
- /* Because this code is invoked for a running transaction by
- the thread that is serving the transaction, it is not necessary
- to hold trx->mutex here. */
-
- /* When inserting a record into an index, the table must be at
- least IX-locked. When we are building an index, we would pass
- BTR_NO_LOCKING_FLAG and skip the locking altogether. */
- ut_ad(lock_table_has(trx, index->table, LOCK_IX));
-
- lock = lock_rec_get_first(&lock_sys.rec_hash, id, heap_no);
-
- if (lock == NULL) {
- /* We optimize CPU time usage in the simplest case */
-
- lock_sys.mutex_unlock();
-
- if (inherit_in && !dict_index_is_clust(index)) {
- /* Update the page max trx id field */
- page_update_max_trx_id(block,
- buf_block_get_page_zip(block),
- trx->id, mtr);
- }
-
- *inherit = false;
-
- return(DB_SUCCESS);
- }
-
- /* Spatial index does not use GAP lock protection. It uses
- "predicate lock" to protect the "range" */
- if (dict_index_is_spatial(index)) {
- return(DB_SUCCESS);
- }
-
- *inherit = true;
+ ut_ad(block->frame == page_align(rec));
+ ut_ad(mtr->is_named_space(index->table->space));
+ ut_ad(page_is_leaf(block->frame));
+ ut_ad(!index->table->is_temporary());
- /* If another transaction has an explicit lock request which locks
- the gap, waiting or granted, on the successor, the insert has to wait.
+ dberr_t err= DB_SUCCESS;
+ bool inherit_in= *inherit;
+ trx_t *trx= thr_get_trx(thr);
+ const rec_t *next_rec= page_rec_get_next_const(rec);
+ ulint heap_no= page_rec_get_heap_no(next_rec);
+ const page_id_t id{block->page.id()};
+ ut_ad(!rec_is_metadata(next_rec, *index));
- An exception is the case where the lock by the another transaction
- is a gap type lock which it placed to wait for its turn to insert. We
- do not consider that kind of a lock conflicting with our insert. This
- eliminates an unnecessary deadlock which resulted when 2 transactions
- had to wait for their insert. Both had waiting gap type lock requests
- on the successor, which produced an unnecessary deadlock. */
+ {
+ LockGuard g{id};
+ /* Because this code is invoked for a running transaction by
+ the thread that is serving the transaction, it is not necessary
+ to hold trx->mutex here. */
- const unsigned type_mode = LOCK_X | LOCK_GAP | LOCK_INSERT_INTENTION;
+ /* When inserting a record into an index, the table must be at
+ least IX-locked. When we are building an index, we would pass
+ BTR_NO_LOCKING_FLAG and skip the locking altogether. */
+ ut_ad(lock_table_has(trx, index->table, LOCK_IX));
- if (
-#ifdef WITH_WSREP
- lock_t* c_lock =
-#endif /* WITH_WSREP */
- lock_rec_other_has_conflicting(type_mode, id, heap_no, trx)) {
- trx->mutex_lock();
+ *inherit= lock_rec_get_first(&lock_sys.rec_hash, id, heap_no);
- err = lock_rec_enqueue_waiting(
+ if (*inherit)
+ {
+ /* Spatial index does not use GAP lock protection. It uses
+ "predicate lock" to protect the "range" */
+ if (index->is_spatial())
+ return DB_SUCCESS;
+
+ /* If another transaction has an explicit lock request which locks
+ the gap, waiting or granted, on the successor, the insert has to wait.
+
+ An exception is the case where the lock by the another transaction
+ is a gap type lock which it placed to wait for its turn to insert. We
+ do not consider that kind of a lock conflicting with our insert. This
+ eliminates an unnecessary deadlock which resulted when 2 transactions
+ had to wait for their insert. Both had waiting gap type lock requests
+ on the successor, which produced an unnecessary deadlock. */
+ const unsigned type_mode= LOCK_X | LOCK_GAP | LOCK_INSERT_INTENTION;
+
+ lock_t *c_lock= lock_rec_other_has_conflicting(type_mode, id,
+ heap_no, trx);
+ if (c_lock)
+ {
+ trx->mutex_lock();
+ err= lock_rec_enqueue_waiting(
#ifdef WITH_WSREP
- c_lock,
-#endif /* WITH_WSREP */
- type_mode, id, block->frame, heap_no, index,
- thr, nullptr);
-
- trx->mutex_unlock();
- } else {
- err = DB_SUCCESS;
- }
-
- lock_sys.mutex_unlock();
-
- switch (err) {
- case DB_SUCCESS_LOCKED_REC:
- err = DB_SUCCESS;
- /* fall through */
- case DB_SUCCESS:
- if (!inherit_in || dict_index_is_clust(index)) {
- break;
- }
+ c_lock,
+#endif
+ type_mode, id, block->frame, heap_no, index, thr, nullptr);
+ trx->mutex_unlock();
+ }
+ }
+ }
- /* Update the page max trx id field */
- page_update_max_trx_id(
- block, buf_block_get_page_zip(block), trx->id, mtr);
- default:
- /* We only care about the two return values. */
- break;
- }
+ switch (err) {
+ case DB_SUCCESS_LOCKED_REC:
+ err = DB_SUCCESS;
+ /* fall through */
+ case DB_SUCCESS:
+ if (!inherit_in || index->is_clust())
+ break;
+ /* Update the page max trx id field */
+ page_update_max_trx_id(block, buf_block_get_page_zip(block), trx->id, mtr);
+ default:
+ /* We only care about the two return values. */
+ break;
+ }
#ifdef UNIV_DEBUG
- {
- mem_heap_t* heap = NULL;
- rec_offs offsets_[REC_OFFS_NORMAL_SIZE];
- const rec_offs* offsets;
- rec_offs_init(offsets_);
+ {
+ mem_heap_t *heap= nullptr;
+ rec_offs offsets_[REC_OFFS_NORMAL_SIZE];
+ const rec_offs *offsets;
+ rec_offs_init(offsets_);
- offsets = rec_get_offsets(next_rec, index, offsets_, true,
- ULINT_UNDEFINED, &heap);
+ offsets= rec_get_offsets(next_rec, index, offsets_, true,
+ ULINT_UNDEFINED, &heap);
- ut_ad(lock_rec_queue_validate(false, id, next_rec, index, offsets));
+ ut_ad(lock_rec_queue_validate(false, id, next_rec, index, offsets));
- if (heap != NULL) {
- mem_heap_free(heap);
- }
- }
+ if (UNIV_LIKELY_NULL(heap))
+ mem_heap_free(heap);
+ }
#endif /* UNIV_DEBUG */
- return(err);
+ return err;
}
/*********************************************************************//**
@@ -5514,25 +5445,6 @@ lock_trx_handle_wait(
return err;
}
-/*********************************************************************//**
-Get the number of locks on a table.
-@return number of locks */
-ulint
-lock_table_get_n_locks(
-/*===================*/
- const dict_table_t* table) /*!< in: table */
-{
- ulint n_table_locks;
-
- lock_sys.mutex_lock();
-
- n_table_locks = UT_LIST_GET_LEN(table->locks);
-
- lock_sys.mutex_unlock();
-
- return(n_table_locks);
-}
-
#ifdef UNIV_DEBUG
/**
Do an exhaustive check for any locks (table or rec) against the table.
@@ -5584,22 +5496,13 @@ lock_table_has_locks(
held on records in this table or on the
table itself */
{
- ibool has_locks;
-
- ut_ad(table != NULL);
- lock_sys.mutex_lock();
-
- has_locks = UT_LIST_GET_LEN(table->locks) > 0 || table->n_rec_locks > 0;
-
+ LockMutexGuard g;
+ bool has_locks= UT_LIST_GET_LEN(table->locks) > 0 || table->n_rec_locks > 0;
#ifdef UNIV_DEBUG
- if (!has_locks) {
- trx_sys.rw_trx_hash.iterate(lock_table_locks_lookup, table);
- }
+ if (!has_locks)
+ trx_sys.rw_trx_hash.iterate(lock_table_locks_lookup, table);
#endif /* UNIV_DEBUG */
-
- lock_sys.mutex_unlock();
-
- return(has_locks);
+ return has_locks;
}
/*******************************************************************//**
@@ -5636,7 +5539,7 @@ lock_trx_has_sys_table_locks(
const lock_t* strongest_lock = 0;
lock_mode strongest = LOCK_NONE;
- lock_sys.mutex_lock();
+ LockMutexGuard g;
const lock_list::const_iterator end = trx->lock.table_locks.end();
lock_list::const_iterator it = trx->lock.table_locks.begin();
@@ -5657,7 +5560,6 @@ lock_trx_has_sys_table_locks(
}
if (strongest == LOCK_NONE) {
- lock_sys.mutex_unlock();
return(NULL);
}
@@ -5682,8 +5584,6 @@ lock_trx_has_sys_table_locks(
}
}
- lock_sys.mutex_unlock();
-
return(strongest_lock);
}
@@ -5697,10 +5597,12 @@ bool lock_trx_has_expl_x_lock(const trx_t &trx, const dict_table_t &table,
page_id_t id, ulint heap_no)
{
ut_ad(heap_no > PAGE_HEAP_NO_SUPREMUM);
- LockMutexGuard g;
ut_ad(lock_table_has(&trx, &table, LOCK_IX));
- ut_ad(lock_table_has(&trx, &table, LOCK_X) ||
- lock_rec_has_expl(LOCK_X | LOCK_REC_NOT_GAP, id, heap_no, &trx));
+ if (!lock_table_has(&trx, &table, LOCK_X))
+ {
+ LockGuard g{id};
+ ut_ad(lock_rec_has_expl(LOCK_X | LOCK_REC_NOT_GAP, id, heap_no, &trx));
+ }
return true;
}
#endif /* UNIV_DEBUG */
diff --git a/storage/innobase/lock/lock0prdt.cc b/storage/innobase/lock/lock0prdt.cc
index b2f71c48a46..ad79cfa37e2 100644
--- a/storage/innobase/lock/lock0prdt.cc
+++ b/storage/innobase/lock/lock0prdt.cc
@@ -234,16 +234,13 @@ lock_prdt_has_lock(
attached to the new lock */
const trx_t* trx) /*!< in: transaction */
{
- lock_t* lock;
-
- lock_sys.mutex_assert_locked();
+ lock_sys.assert_locked(id);
ut_ad((precise_mode & LOCK_MODE_MASK) == LOCK_S
|| (precise_mode & LOCK_MODE_MASK) == LOCK_X);
ut_ad(!(precise_mode & LOCK_INSERT_INTENTION));
- for (lock = lock_rec_get_first(
- lock_hash_get(type_mode), id, PRDT_HEAPNO);
- lock != NULL;
+ for (lock_t* lock = lock_rec_get_first(lock_hash_get(type_mode), id,
+ PRDT_HEAPNO); lock;
lock = lock_rec_get_next(PRDT_HEAPNO, lock)) {
ut_ad(lock->type_mode & (LOCK_PREDICATE | LOCK_PRDT_PAGE));
@@ -291,10 +288,8 @@ lock_prdt_other_has_conflicting(
the new lock will be on */
const trx_t* trx) /*!< in: our transaction */
{
- lock_sys.mutex_assert_locked();
-
- for (lock_t* lock = lock_rec_get_first(
- lock_hash_get(mode), id, PRDT_HEAPNO);
+ for (lock_t* lock = lock_rec_get_first(lock_hash_get(mode), id,
+ PRDT_HEAPNO);
lock != NULL;
lock = lock_rec_get_next(PRDT_HEAPNO, lock)) {
@@ -388,8 +383,6 @@ lock_prdt_find_on_page(
{
lock_t* lock;
- lock_sys.mutex_assert_locked();
-
for (lock = lock_sys.get_first(*lock_hash_get(type_mode),
block->page.id());
lock != NULL;
@@ -433,7 +426,7 @@ lock_prdt_add_to_queue(
transaction mutex */
{
const page_id_t id{block->page.id()};
- lock_sys.mutex_assert_locked();
+ lock_sys.assert_locked(id);
ut_ad(caller_owns_trx_mutex == trx->mutex_is_owner());
ut_ad(index->is_spatial());
ut_ad(!dict_index_is_online_ddl(index));
@@ -502,83 +495,58 @@ lock_prdt_insert_check_and_lock(
lock_prdt_t* prdt) /*!< in: Predicates with Minimum Bound
Rectangle */
{
- ut_ad(block->frame == page_align(rec));
- ut_ad(!index->table->is_temporary());
- ut_ad(index->is_spatial());
-
- trx_t* trx = thr_get_trx(thr);
- const page_id_t id{block->page.id()};
-
- lock_sys.mutex_lock();
-
- /* Because this code is invoked for a running transaction by
- the thread that is serving the transaction, it is not necessary
- to hold trx->mutex here. */
-
- ut_ad(lock_table_has(trx, index->table, LOCK_IX));
-
- lock_t* lock;
-
- /* Only need to check locks on prdt_hash */
- lock = lock_rec_get_first(&lock_sys.prdt_hash, id, PRDT_HEAPNO);
-
- if (lock == NULL) {
- lock_sys.mutex_unlock();
-
- /* Update the page max trx id field */
- page_update_max_trx_id(block, buf_block_get_page_zip(block),
- trx->id, mtr);
-
- return(DB_SUCCESS);
- }
-
- ut_ad(lock->type_mode & LOCK_PREDICATE);
-
- dberr_t err;
-
- /* If another transaction has an explicit lock request which locks
- the predicate, waiting or granted, on the successor, the insert
- has to wait.
-
- Similar to GAP lock, we do not consider lock from inserts conflicts
- with each other */
-
- const ulint mode = LOCK_X | LOCK_PREDICATE | LOCK_INSERT_INTENTION;
-
- const lock_t* wait_for = lock_prdt_other_has_conflicting(
- mode, id, prdt, trx);
-
- if (wait_for != NULL) {
- rtr_mbr_t* mbr = prdt_get_mbr_from_prdt(prdt);
-
- /* Allocate MBR on the lock heap */
- lock_init_prdt_from_mbr(prdt, mbr, 0, trx->lock.lock_heap);
-
- /* Note that we may get DB_SUCCESS also here! */
- trx->mutex_lock();
-
- err = lock_rec_enqueue_waiting(
+ ut_ad(block->frame == page_align(rec));
+ ut_ad(!index->table->is_temporary());
+ ut_ad(index->is_spatial());
+
+ trx_t *trx= thr_get_trx(thr);
+ const page_id_t id{block->page.id()};
+ dberr_t err= DB_SUCCESS;
+
+ {
+ LockGuard g{id};
+ /* Because this code is invoked for a running transaction by
+ the thread that is serving the transaction, it is not necessary
+ to hold trx->mutex here. */
+ ut_ad(lock_table_has(trx, index->table, LOCK_IX));
+
+ /* Only need to check locks on prdt_hash */
+ if (ut_d(lock_t *lock=)
+ lock_rec_get_first(&lock_sys.prdt_hash, id, PRDT_HEAPNO))
+ {
+ ut_ad(lock->type_mode & LOCK_PREDICATE);
+
+ /* If another transaction has an explicit lock request which locks
+ the predicate, waiting or granted, on the successor, the insert
+ has to wait.
+
+ Similar to GAP lock, we do not consider lock from inserts conflicts
+ with each other */
+
+ const ulint mode= LOCK_X | LOCK_PREDICATE | LOCK_INSERT_INTENTION;
+ lock_t *c_lock= lock_prdt_other_has_conflicting(mode, id, prdt, trx);
+
+ if (c_lock)
+ {
+ rtr_mbr_t *mbr= prdt_get_mbr_from_prdt(prdt);
+ /* Allocate MBR on the lock heap */
+ lock_init_prdt_from_mbr(prdt, mbr, 0, trx->lock.lock_heap);
+ trx->mutex_lock();
+ err= lock_rec_enqueue_waiting(
#ifdef WITH_WSREP
- NULL, /* FIXME: replicate SPATIAL INDEX locks */
+ c_lock,
#endif
- LOCK_X | LOCK_PREDICATE | LOCK_INSERT_INTENTION,
- id, block->frame, PRDT_HEAPNO, index, thr, prdt);
-
- trx->mutex_unlock();
- } else {
- err = DB_SUCCESS;
- }
+ mode, id, block->frame, PRDT_HEAPNO, index, thr, prdt);
+ trx->mutex_unlock();
+ }
+ }
+ }
- lock_sys.mutex_unlock();
+ if (err == DB_SUCCESS)
+ /* Update the page max trx id field */
+ page_update_max_trx_id(block, buf_block_get_page_zip(block), trx->id, mtr);
- if (err == DB_SUCCESS) {
- /* Update the page max trx id field */
- page_update_max_trx_id(block,
- buf_block_get_page_zip(block),
- trx->id, mtr);
- }
-
- return(err);
+ return err;
}
/**************************************************************//**
@@ -593,7 +561,7 @@ lock_prdt_update_parent(
lock_prdt_t* right_prdt, /*!< in: MBR on the new page */
const page_id_t page_id) /*!< in: parent page */
{
- lock_sys.mutex_lock();
+ LockMutexGuard g;
/* Get all locks in parent */
for (lock_t *lock = lock_sys.get_first_prdt(page_id);
@@ -630,8 +598,6 @@ lock_prdt_update_parent(
lock_prdt, false);
}
}
-
- lock_sys.mutex_unlock();
}
/**************************************************************//**
@@ -694,15 +660,13 @@ lock_prdt_update_split(
lock_prdt_t* new_prdt, /*!< in: MBR on the new page */
const page_id_t page_id) /*!< in: page number */
{
- lock_sys.mutex_lock();
+ LockMutexGuard g;
lock_prdt_update_split_low(new_block, prdt, new_prdt,
page_id, LOCK_PREDICATE);
lock_prdt_update_split_low(new_block, NULL, NULL,
page_id, LOCK_PRDT_PAGE);
-
- lock_sys.mutex_unlock();
}
/*********************************************************************//**
@@ -768,7 +732,7 @@ lock_prdt_lock(
index record, and this would not have been possible if another active
transaction had modified this secondary index record. */
- lock_sys.mutex_lock();
+ LockGuard g{id};
const unsigned prdt_mode = type_mode | mode;
lock_t* lock = lock_sys.get_first(hash, id);
@@ -831,8 +795,6 @@ lock_prdt_lock(
}
}
- lock_sys.mutex_unlock();
-
if (status == LOCK_REC_SUCCESS_CREATED && type_mode == LOCK_PREDICATE) {
/* Append the predicate in the lock record */
lock_prdt_set_prdt(lock, prdt);
@@ -861,7 +823,7 @@ lock_place_prdt_page_lock(
index record, and this would not have been possible if another active
transaction had modified this secondary index record. */
- lock_sys.mutex_lock();
+ LockGuard g{page_id};
const lock_t* lock = lock_sys.get_first_prdt_page(page_id);
const ulint mode = LOCK_S | LOCK_PRDT_PAGE;
@@ -891,8 +853,6 @@ lock_place_prdt_page_lock(
#endif /* PRDT_DIAG */
}
- lock_sys.mutex_unlock();
-
return(DB_SUCCESS);
}
@@ -902,15 +862,9 @@ lock_place_prdt_page_lock(
@return true if there is none */
bool lock_test_prdt_page_lock(const trx_t *trx, const page_id_t page_id)
{
- lock_t* lock;
-
- lock_sys.mutex_lock();
-
- lock = lock_sys.get_first_prdt_page(page_id);
-
- lock_sys.mutex_unlock();
-
- return(!lock || trx == lock->trx);
+ LockGuard g{page_id};
+ lock_t *lock= lock_sys.get_first_prdt_page(page_id);
+ return !lock || trx == lock->trx;
}
/*************************************************************//**
@@ -923,7 +877,7 @@ lock_prdt_rec_move(
the receiving record */
const page_id_t donator) /*!< in: target page */
{
- lock_sys.mutex_lock();
+ LockMutexGuard g;
for (lock_t *lock = lock_rec_get_first(&lock_sys.prdt_hash,
donator, PRDT_HEAPNO);
@@ -942,8 +896,6 @@ lock_prdt_rec_move(
type_mode, receiver, lock->index, lock->trx,
lock_prdt, false);
}
-
- lock_sys.mutex_unlock();
}
/** Removes predicate lock objects set on an index page which is discarded.
@@ -952,18 +904,12 @@ lock_prdt_rec_move(
void
lock_prdt_page_free_from_discard(const page_id_t id, hash_table_t *lock_hash)
{
- lock_t* lock;
- lock_t* next_lock;
-
- lock_sys.mutex_assert_locked();
-
- lock = lock_sys.get_first(*lock_hash, id);
-
- while (lock != NULL) {
- next_lock = lock_rec_get_next_on_page(lock);
-
- lock_rec_discard(lock);
-
- lock = next_lock;
- }
+ lock_sys.assert_locked(id);
+
+ for (lock_t *lock= lock_sys.get_first(*lock_hash, id), *next; lock;
+ lock= next)
+ {
+ next= lock_rec_get_next_on_page(lock);
+ lock_rec_discard(lock);
+ }
}
diff --git a/storage/innobase/row/row0ins.cc b/storage/innobase/row/row0ins.cc
index f1b1bbeaf6c..2ba367c1752 100644
--- a/storage/innobase/row/row0ins.cc
+++ b/storage/innobase/row/row0ins.cc
@@ -707,11 +707,12 @@ row_ins_foreign_trx_print(
ut_ad(!srv_read_only_mode);
- lock_sys.mutex_lock();
- n_rec_locks = trx->lock.n_rec_locks;
- n_trx_locks = UT_LIST_GET_LEN(trx->lock.trx_locks);
- heap_size = mem_heap_get_size(trx->lock.lock_heap);
- lock_sys.mutex_unlock();
+ {
+ LockMutexGuard g;
+ n_rec_locks = trx->lock.n_rec_locks;
+ n_trx_locks = UT_LIST_GET_LEN(trx->lock.trx_locks);
+ heap_size = mem_heap_get_size(trx->lock.lock_heap);
+ }
mysql_mutex_lock(&dict_foreign_err_mutex);
rewind(dict_foreign_err_file);
diff --git a/storage/innobase/row/row0mysql.cc b/storage/innobase/row/row0mysql.cc
index 2f3332ecc97..13bcc2a0a09 100644
--- a/storage/innobase/row/row0mysql.cc
+++ b/storage/innobase/row/row0mysql.cc
@@ -2613,9 +2613,10 @@ skip:
}
if (!srv_fast_shutdown && !trx_sys.any_active_transactions()) {
- lock_sys.mutex_lock();
- skip = UT_LIST_GET_LEN(table->locks) != 0;
- lock_sys.mutex_unlock();
+ {
+ LockMutexGuard g;
+ skip = UT_LIST_GET_LEN(table->locks) != 0;
+ }
if (skip) {
/* We cannot drop tables that are locked by XA
PREPARE transactions. */
diff --git a/storage/innobase/trx/trx0i_s.cc b/storage/innobase/trx/trx0i_s.cc
index 25afa768f3b..4a73b02df0c 100644
--- a/storage/innobase/trx/trx0i_s.cc
+++ b/storage/innobase/trx/trx0i_s.cc
@@ -1181,7 +1181,7 @@ static void fetch_data_into_cache_low(trx_i_s_cache_t *cache, const trx_t *trx)
static void fetch_data_into_cache(trx_i_s_cache_t *cache)
{
- lock_sys.mutex_assert_locked();
+ LockMutexGuard g;
trx_i_s_cache_clear(cache);
/* Capture the state of transactions */
@@ -1211,10 +1211,7 @@ trx_i_s_possibly_fetch_data_into_cache(
}
/* We need to read trx_sys and record/table lock queues */
-
- lock_sys.mutex_lock();
fetch_data_into_cache(cache);
- lock_sys.mutex_unlock();
/* update cache last read time */
cache->last_read = my_interval_timer();
diff --git a/storage/innobase/trx/trx0trx.cc b/storage/innobase/trx/trx0trx.cc
index 77b87c84a20..5ec784bfc25 100644
--- a/storage/innobase/trx/trx0trx.cc
+++ b/storage/innobase/trx/trx0trx.cc
@@ -1258,12 +1258,12 @@ trx_update_mod_tables_timestamp(
/* recheck while holding the mutex that blocks
table->acquire() */
dict_sys.mutex_lock();
- lock_sys.mutex_lock();
- const bool do_evict = !table->get_ref_count()
- && !UT_LIST_GET_LEN(table->locks);
- lock_sys.mutex_unlock();
- if (do_evict) {
- dict_sys.remove(table, true);
+ {
+ LockMutexGuard g;
+ if (!table->get_ref_count()
+ && !UT_LIST_GET_LEN(table->locks)) {
+ dict_sys.remove(table, true);
+ }
}
dict_sys.mutex_unlock();
#endif
@@ -1863,18 +1863,15 @@ trx_print(
ulint max_query_len) /*!< in: max query length to print,
or 0 to use the default max length */
{
- ulint n_rec_locks;
- ulint n_trx_locks;
- ulint heap_size;
-
- lock_sys.mutex_lock();
- n_rec_locks = trx->lock.n_rec_locks;
- n_trx_locks = UT_LIST_GET_LEN(trx->lock.trx_locks);
- heap_size = mem_heap_get_size(trx->lock.lock_heap);
- lock_sys.mutex_unlock();
+ ulint n_rec_locks, n_trx_locks, heap_size;
+ {
+ LockMutexGuard g;
+ n_rec_locks= trx->lock.n_rec_locks;
+ n_trx_locks= UT_LIST_GET_LEN(trx->lock.trx_locks);
+ heap_size= mem_heap_get_size(trx->lock.lock_heap);
+ }
- trx_print_low(f, trx, max_query_len,
- n_rec_locks, n_trx_locks, heap_size);
+ trx_print_low(f, trx, max_query_len, n_rec_locks, n_trx_locks, heap_size);
}
/** Prepare a transaction.