diff options
author | Sergey Vojtovich <svoj@mariadb.org> | 2018-02-22 20:46:42 +0400 |
---|---|---|
committer | Marko Mäkelä <marko.makela@mariadb.com> | 2018-02-23 04:46:39 +0200 |
commit | 916226669586b208e6eb6921ab22f499f9b55033 (patch) | |
tree | 2bb390efc28ccfd04ef66895534e9d518e9f8769 | |
parent | 2937d063b9ed84f848f899251e938c779e500919 (diff) | |
download | mariadb-git-bb-10.3-lock_sys.tar.gz |
Allocate lock_sys staticallybb-10.3-lock_sys
There is only one lock_sys. Allocate it statically in order to avoid
dereferencing a pointer whenever accessing it. Also, align some
members to their own cache line in order to avoid false sharing.
lock_sys_t::create(): The deferred constructor.
lock_sys_t::close(): The early destructor.
-rw-r--r-- | extra/mariabackup/xtrabackup.cc | 2 | ||||
-rw-r--r-- | storage/innobase/btr/btr0btr.cc | 6 | ||||
-rw-r--r-- | storage/innobase/buf/buf0buf.cc | 2 | ||||
-rw-r--r-- | storage/innobase/gis/gis0sea.cc | 4 | ||||
-rw-r--r-- | storage/innobase/include/dict0mem.h | 6 | ||||
-rw-r--r-- | storage/innobase/include/lock0lock.h | 100 | ||||
-rw-r--r-- | storage/innobase/include/lock0lock.ic | 8 | ||||
-rw-r--r-- | storage/innobase/include/lock0priv.h | 2 | ||||
-rw-r--r-- | storage/innobase/include/lock0types.h | 1 | ||||
-rw-r--r-- | storage/innobase/include/trx0sys.h | 4 | ||||
-rw-r--r-- | storage/innobase/include/trx0trx.h | 36 | ||||
-rw-r--r-- | storage/innobase/lock/lock0lock.cc | 212 | ||||
-rw-r--r-- | storage/innobase/lock/lock0prdt.cc | 16 | ||||
-rw-r--r-- | storage/innobase/lock/lock0wait.cc | 44 | ||||
-rw-r--r-- | storage/innobase/log/log0log.cc | 8 | ||||
-rw-r--r-- | storage/innobase/srv/srv0mon.cc | 2 | ||||
-rw-r--r-- | storage/innobase/srv/srv0srv.cc | 4 | ||||
-rw-r--r-- | storage/innobase/srv/srv0start.cc | 13 | ||||
-rw-r--r-- | storage/innobase/sync/sync0debug.cc | 2 | ||||
-rw-r--r-- | storage/innobase/trx/trx0i_s.cc | 8 | ||||
-rw-r--r-- | storage/innobase/trx/trx0trx.cc | 8 |
21 files changed, 248 insertions, 240 deletions
diff --git a/extra/mariabackup/xtrabackup.cc b/extra/mariabackup/xtrabackup.cc index 5c93a47b626..444e932ca1a 100644 --- a/extra/mariabackup/xtrabackup.cc +++ b/extra/mariabackup/xtrabackup.cc @@ -3672,8 +3672,6 @@ fail: "innodb_redo_log", SRV_LOG_SPACE_FIRST_ID, 0, FIL_TYPE_LOG, NULL); - lock_sys_create(srv_lock_table_size); - for (i = 0; i < srv_n_log_files; i++) { err = open_or_create_log_file(space, &log_file_created, i); if (err != DB_SUCCESS) { diff --git a/storage/innobase/btr/btr0btr.cc b/storage/innobase/btr/btr0btr.cc index 14eb37eaef6..5bd6ffb77f3 100644 --- a/storage/innobase/btr/btr0btr.cc +++ b/storage/innobase/btr/btr0btr.cc @@ -3675,7 +3675,7 @@ btr_lift_page_up( if (dict_index_is_spatial(index)) { lock_mutex_enter(); lock_prdt_page_free_from_discard( - block, lock_sys->prdt_page_hash); + block, lock_sys.prdt_page_hash); lock_mutex_exit(); } lock_update_copy_and_discard(father_block, block); @@ -3968,7 +3968,7 @@ retry: /* No GAP lock needs to be worrying about */ lock_mutex_enter(); lock_prdt_page_free_from_discard( - block, lock_sys->prdt_page_hash); + block, lock_sys.prdt_page_hash); lock_rec_free_all_from_discard_page(block); lock_mutex_exit(); } else { @@ -4126,7 +4126,7 @@ retry: } lock_mutex_enter(); lock_prdt_page_free_from_discard( - block, lock_sys->prdt_page_hash); + block, lock_sys.prdt_page_hash); lock_rec_free_all_from_discard_page(block); lock_mutex_exit(); } else { diff --git a/storage/innobase/buf/buf0buf.cc b/storage/innobase/buf/buf0buf.cc index 1c795f6405f..437e3390b86 100644 --- a/storage/innobase/buf/buf0buf.cc +++ b/storage/innobase/buf/buf0buf.cc @@ -3058,7 +3058,7 @@ calc_buf_pool_size: /* normalize lock_sys */ srv_lock_table_size = 5 * (srv_buf_pool_size / UNIV_PAGE_SIZE); - lock_sys_resize(srv_lock_table_size); + lock_sys.resize(srv_lock_table_size); /* normalize btr_search_sys */ btr_search_sys_resize( diff --git a/storage/innobase/gis/gis0sea.cc b/storage/innobase/gis/gis0sea.cc index 4912471f08b..65c346d6d36 100644 --- a/storage/innobase/gis/gis0sea.cc +++ b/storage/innobase/gis/gis0sea.cc @@ -1255,8 +1255,8 @@ rtr_check_discard_page( mutex_exit(&index->rtr_track->rtr_active_mutex); lock_mutex_enter(); - lock_prdt_page_free_from_discard(block, lock_sys->prdt_hash); - lock_prdt_page_free_from_discard(block, lock_sys->prdt_page_hash); + lock_prdt_page_free_from_discard(block, lock_sys.prdt_hash); + lock_prdt_page_free_from_discard(block, lock_sys.prdt_page_hash); lock_mutex_exit(); } diff --git a/storage/innobase/include/dict0mem.h b/storage/innobase/include/dict0mem.h index 428c418629e..0bab513d051 100644 --- a/storage/innobase/include/dict0mem.h +++ b/storage/innobase/include/dict0mem.h @@ -1908,7 +1908,7 @@ struct dict_table_t { ulong n_waiting_or_granted_auto_inc_locks; /** The transaction that currently holds the the AUTOINC lock on this - table. Protected by lock_sys->mutex. */ + table. Protected by lock_sys.mutex. */ const trx_t* autoinc_trx; /* @} */ @@ -1923,7 +1923,7 @@ struct dict_table_t { /** Count of the number of record locks on this table. We use this to determine whether we can evict the table from the dictionary cache. - It is protected by lock_sys->mutex. */ + It is protected by lock_sys.mutex. */ ulint n_rec_locks; #ifndef DBUG_ASSERT_EXISTS @@ -1935,7 +1935,7 @@ private: ulint n_ref_count; public: - /** List of locks on the table. Protected by lock_sys->mutex. */ + /** List of locks on the table. Protected by lock_sys.mutex. */ table_lock_list_t locks; /** Timestamp of the last modification of this table. */ diff --git a/storage/innobase/include/lock0lock.h b/storage/innobase/include/lock0lock.h index 0e8c6a75f28..462d0cd4051 100644 --- a/storage/innobase/include/lock0lock.h +++ b/storage/innobase/include/lock0lock.h @@ -65,23 +65,6 @@ ulint lock_get_size(void); /*===============*/ /*********************************************************************//** -Creates the lock system at database start. */ -void -lock_sys_create( -/*============*/ - ulint n_cells); /*!< in: number of slots in lock hash table */ -/** Resize the lock hash table. -@param[in] n_cells number of slots in lock hash table */ -void -lock_sys_resize( - ulint n_cells); - -/*********************************************************************//** -Closes the lock system at database shutdown. */ -void -lock_sys_close(void); -/*================*/ -/*********************************************************************//** Gets the heap_no of the smallest user record on a page. @return heap_no of smallest user record, or PAGE_HEAP_NO_SUPREMUM */ UNIV_INLINE @@ -605,7 +588,7 @@ lock_print_info_all_transactions( Return approximate number or record locks (bits set in the bitmap) for this transaction. Since delete-marked records may be removed, the record count will not be precise. -The caller must be holding lock_sys->mutex. */ +The caller must be holding lock_sys.mutex. */ ulint lock_number_of_rows_locked( /*=======================*/ @@ -614,7 +597,7 @@ lock_number_of_rows_locked( /*********************************************************************//** Return the number of table locks for a transaction. -The caller must be holding lock_sys->mutex. */ +The caller must be holding lock_sys.mutex. */ ulint lock_number_of_tables_locked( /*=========================*/ @@ -897,11 +880,12 @@ struct lock_op_t{ typedef ib_mutex_t LockMutex; /** The lock system struct */ -struct lock_sys_t{ - char pad1[CACHE_LINE_SIZE]; /*!< padding to prevent other - memory update hotspots from - residing on the same memory - cache line */ +class lock_sys_t +{ + bool m_initialised; + +public: + MY_ALIGNED(CACHE_LINE_SIZE) LockMutex mutex; /*!< Mutex protecting the locks */ hash_table_t* rec_hash; /*!< hash table of the record @@ -911,13 +895,13 @@ struct lock_sys_t{ hash_table_t* prdt_page_hash; /*!< hash table of the page lock */ - char pad2[CACHE_LINE_SIZE]; /*!< Padding */ + MY_ALIGNED(CACHE_LINE_SIZE) LockMutex wait_mutex; /*!< Mutex protecting the next two fields */ srv_slot_t* waiting_threads; /*!< Array of user threads suspended while waiting for locks within InnoDB, protected - by the lock_sys->wait_mutex; + by the lock_sys.wait_mutex; os_event_set() and os_event_reset() on waiting_threads[]->event @@ -926,7 +910,7 @@ struct lock_sys_t{ srv_slot_t* last_slot; /*!< highest slot ever used in the waiting_threads array, protected by - lock_sys->wait_mutex */ + lock_sys.wait_mutex */ ulint n_lock_max_wait_time; /*!< Max wait time */ @@ -938,6 +922,38 @@ struct lock_sys_t{ bool timeout_thread_active; /*!< True if the timeout thread is running */ + + + /** + Constructor. + + Some members may require late initialisation, thus we just mark object as + uninitialised. Real initialisation happens in create(). + */ + lock_sys_t(): m_initialised(false) {} + + + bool is_initialised() { return m_initialised; } + + + /** + Creates the lock system at database start. + + @param[in] n_cells number of slots in lock hash table + */ + void create(ulint n_cells); + + + /** + Resize the lock hash table. + + @param[in] n_cells number of slots in lock hash table + */ + void resize(ulint n_cells); + + + /** Closes the lock system at database shutdown. */ + void close(); }; /*************************************************************//** @@ -982,36 +998,36 @@ lock_rec_trx_wait( ulint type); /** The lock system */ -extern lock_sys_t* lock_sys; +extern lock_sys_t lock_sys; -/** Test if lock_sys->mutex can be acquired without waiting. */ +/** Test if lock_sys.mutex can be acquired without waiting. */ #define lock_mutex_enter_nowait() \ - (lock_sys->mutex.trylock(__FILE__, __LINE__)) + (lock_sys.mutex.trylock(__FILE__, __LINE__)) -/** Test if lock_sys->mutex is owned. */ -#define lock_mutex_own() (lock_sys->mutex.is_owned()) +/** Test if lock_sys.mutex is owned. */ +#define lock_mutex_own() (lock_sys.mutex.is_owned()) -/** Acquire the lock_sys->mutex. */ +/** Acquire the lock_sys.mutex. */ #define lock_mutex_enter() do { \ - mutex_enter(&lock_sys->mutex); \ + mutex_enter(&lock_sys.mutex); \ } while (0) -/** Release the lock_sys->mutex. */ +/** Release the lock_sys.mutex. */ #define lock_mutex_exit() do { \ - lock_sys->mutex.exit(); \ + lock_sys.mutex.exit(); \ } while (0) -/** Test if lock_sys->wait_mutex is owned. */ -#define lock_wait_mutex_own() (lock_sys->wait_mutex.is_owned()) +/** Test if lock_sys.wait_mutex is owned. */ +#define lock_wait_mutex_own() (lock_sys.wait_mutex.is_owned()) -/** Acquire the lock_sys->wait_mutex. */ +/** Acquire the lock_sys.wait_mutex. */ #define lock_wait_mutex_enter() do { \ - mutex_enter(&lock_sys->wait_mutex); \ + mutex_enter(&lock_sys.wait_mutex); \ } while (0) -/** Release the lock_sys->wait_mutex. */ +/** Release the lock_sys.wait_mutex. */ #define lock_wait_mutex_exit() do { \ - lock_sys->wait_mutex.exit(); \ + lock_sys.wait_mutex.exit(); \ } while (0) #ifdef WITH_WSREP diff --git a/storage/innobase/include/lock0lock.ic b/storage/innobase/include/lock0lock.ic index 76c491a8721..dad62c9685c 100644 --- a/storage/innobase/include/lock0lock.ic +++ b/storage/innobase/include/lock0lock.ic @@ -63,7 +63,7 @@ lock_rec_hash( ulint page_no)/*!< in: page number */ { return(unsigned(hash_calc_hash(lock_rec_fold(space, page_no), - lock_sys->rec_hash))); + lock_sys.rec_hash))); } /*********************************************************************//** @@ -99,11 +99,11 @@ lock_hash_get( ulint mode) /*!< in: lock mode */ { if (mode & LOCK_PREDICATE) { - return(lock_sys->prdt_hash); + return(lock_sys.prdt_hash); } else if (mode & LOCK_PRDT_PAGE) { - return(lock_sys->prdt_page_hash); + return(lock_sys.prdt_page_hash); } else { - return(lock_sys->rec_hash); + return(lock_sys.rec_hash); } } diff --git a/storage/innobase/include/lock0priv.h b/storage/innobase/include/lock0priv.h index d5e31057aac..ec596f6ca5b 100644 --- a/storage/innobase/include/lock0priv.h +++ b/storage/innobase/include/lock0priv.h @@ -111,7 +111,7 @@ operator<<(std::ostream& out, const lock_rec_t& lock) return(lock.print(out)); } -/** Lock struct; protected by lock_sys->mutex */ +/** Lock struct; protected by lock_sys.mutex */ struct lock_t { trx_t* trx; /*!< transaction owning the lock */ diff --git a/storage/innobase/include/lock0types.h b/storage/innobase/include/lock0types.h index d08eaabfb1e..792a5f21acb 100644 --- a/storage/innobase/include/lock0types.h +++ b/storage/innobase/include/lock0types.h @@ -31,7 +31,6 @@ Created 5/7/1996 Heikki Tuuri #define lock_t ib_lock_t struct lock_t; -struct lock_sys_t; struct lock_table_t; /* Basic lock modes */ diff --git a/storage/innobase/include/trx0sys.h b/storage/innobase/include/trx0sys.h index 1013df88e49..00f245a05c0 100644 --- a/storage/innobase/include/trx0sys.h +++ b/storage/innobase/include/trx0sys.h @@ -590,10 +590,10 @@ public: the transaction may get committed before this method returns. With do_ref_count == false the caller may dereference returned trx pointer - only if lock_sys->mutex was acquired before calling find(). + only if lock_sys.mutex was acquired before calling find(). With do_ref_count == true caller may dereference trx even if it is not - holding lock_sys->mutex. Caller is responsible for calling + holding lock_sys.mutex. Caller is responsible for calling trx->release_reference() when it is done playing with trx. Ideally this method should get caller rw_trx_hash_pins along with trx diff --git a/storage/innobase/include/trx0trx.h b/storage/innobase/include/trx0trx.h index 012f34099e8..685208853ee 100644 --- a/storage/innobase/include/trx0trx.h +++ b/storage/innobase/include/trx0trx.h @@ -266,7 +266,7 @@ This function is used to find one X/Open XA distributed transaction which is in the prepared state @return trx or NULL; on match, the trx->xid will be invalidated; note that the trx may have been committed, unless the caller is -holding lock_sys->mutex */ +holding lock_sys.mutex */ trx_t * trx_get_trx_by_xid( /*===============*/ @@ -327,7 +327,7 @@ trx_print_low( /**********************************************************************//** Prints info about a transaction. -The caller must hold lock_sys->mutex and trx_sys.mutex. +The caller must hold lock_sys.mutex and trx_sys.mutex. When possible, use trx_print() instead. */ void trx_print_latched( @@ -339,7 +339,7 @@ trx_print_latched( /**********************************************************************//** Prints info about a transaction. -Acquires and releases lock_sys->mutex. */ +Acquires and releases lock_sys.mutex. */ void trx_print( /*======*/ @@ -612,7 +612,7 @@ To query the state either of the mutexes is sufficient within the locking code and no mutex is required when the query thread is no longer waiting. */ /** The locks and state of an active transaction. Protected by -lock_sys->mutex, trx->mutex or both. */ +lock_sys.mutex, trx->mutex or both. */ struct trx_lock_t { ulint n_active_thrs; /*!< number of active query threads */ @@ -624,10 +624,10 @@ struct trx_lock_t { TRX_QUE_LOCK_WAIT, this points to the lock request, otherwise this is NULL; set to non-NULL when holding - both trx->mutex and lock_sys->mutex; + both trx->mutex and lock_sys.mutex; set to NULL when holding - lock_sys->mutex; readers should - hold lock_sys->mutex, except when + lock_sys.mutex; readers should + hold lock_sys.mutex, except when they are holding trx->mutex and wait_lock==NULL */ ib_uint64_t deadlock_mark; /*!< A mark field that is initialized @@ -641,13 +641,13 @@ struct trx_lock_t { resolution, it sets this to true. Protected by trx->mutex. */ time_t wait_started; /*!< lock wait started at this time, - protected only by lock_sys->mutex */ + protected only by lock_sys.mutex */ que_thr_t* wait_thr; /*!< query thread belonging to this trx that is in QUE_THR_LOCK_WAIT state. For threads suspended in a lock wait, this is protected by - lock_sys->mutex. Otherwise, this may + lock_sys.mutex. Otherwise, this may only be modified by the thread that is serving the running transaction. */ @@ -660,12 +660,12 @@ struct trx_lock_t { ulint table_cached; /*!< Next free table lock in pool */ mem_heap_t* lock_heap; /*!< memory heap for trx_locks; - protected by lock_sys->mutex */ + protected by lock_sys.mutex */ trx_lock_list_t trx_locks; /*!< locks requested by the transaction; insertions are protected by trx->mutex - and lock_sys->mutex; removals are - protected by lock_sys->mutex */ + and lock_sys.mutex; removals are + protected by lock_sys.mutex */ lock_pool_t table_locks; /*!< All table locks requested by this transaction, including AUTOINC locks */ @@ -788,7 +788,7 @@ transactions. The trx_sys.mutex prevents a race condition between it and lock_trx_release_locks() [invoked by trx_commit()]. * trx_print_low() may access transactions not associated with the current -thread. The caller must be holding lock_sys->mutex. +thread. The caller must be holding lock_sys.mutex. * When a transaction handle is in the trx_sys.mysql_trx_list or trx_sys.trx_list, some of its fields must not be modified without @@ -797,7 +797,7 @@ holding trx_sys.mutex exclusively. * The locking code (in particular, lock_deadlock_recursive() and lock_rec_convert_impl_to_expl()) will access transactions associated to other connections. The locks of transactions are protected by -lock_sys->mutex and sometimes by trx->mutex. */ +lock_sys.mutex and sometimes by trx->mutex. */ typedef enum { TRX_SERVER_ABORT = 0, @@ -870,7 +870,7 @@ public: TrxMutex mutex; /*!< Mutex protecting the fields state and lock (except some fields of lock, which are protected by - lock_sys->mutex) */ + lock_sys.mutex) */ /* Note: in_depth was split from in_innodb for fixing a RO performance issue. Acquiring the trx_t::mutex for each row @@ -961,7 +961,7 @@ public: ACTIVE->COMMITTED is possible when the transaction is in rw_trx_hash. - Transitions to COMMITTED are protected by both lock_sys->mutex + Transitions to COMMITTED are protected by both lock_sys.mutex and trx->mutex. NOTE: Some of these state change constraints are an overkill, @@ -974,7 +974,7 @@ public: transaction, or NULL if not yet set */ trx_lock_t lock; /*!< Information about the transaction locks and state. Protected by - trx->mutex or lock_sys->mutex + trx->mutex or lock_sys.mutex or both */ bool is_recovered; /*!< 0=normal transaction, 1=recovered, must be rolled back, @@ -1156,7 +1156,7 @@ public: also in the lock list trx_locks. This vector needs to be freed explicitly when the trx instance is destroyed. - Protected by lock_sys->mutex. */ + Protected by lock_sys.mutex. */ /*------------------------------*/ bool read_only; /*!< true if transaction is flagged as a READ-ONLY transaction. diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc index b9a70e29604..32648e2bdbf 100644 --- a/storage/innobase/lock/lock0lock.cc +++ b/storage/innobase/lock/lock0lock.cc @@ -81,7 +81,7 @@ lock_rec_has_to_wait_in_queue( /*************************************************************//** Grants a lock to a waiting lock request and releases the waiting transaction. -The caller must hold lock_sys->mutex. */ +The caller must hold lock_sys.mutex. */ static void lock_grant( @@ -279,7 +279,7 @@ private: ulint m_heap_no; /*!< heap number if rec lock */ }; - /** Used in deadlock tracking. Protected by lock_sys->mutex. */ + /** Used in deadlock tracking. Protected by lock_sys.mutex. */ static ib_uint64_t s_lock_mark_counter; /** Calculation steps thus far. It is the count of the nodes visited. */ @@ -335,7 +335,7 @@ lock_rec_validate_page( #endif /* UNIV_DEBUG */ /* The lock system */ -lock_sys_t* lock_sys = NULL; +lock_sys_t lock_sys; /** We store info on the latest deadlock error to this buffer. InnoDB Monitor will then fetch it and print */ @@ -471,34 +471,31 @@ lock_sec_rec_cons_read_sees( return(view->sees(max_trx_id)); } -/*********************************************************************//** -Creates the lock system at database start. */ -void -lock_sys_create( -/*============*/ - ulint n_cells) /*!< in: number of slots in lock hash table */ -{ - ulint lock_sys_sz; - - lock_sys_sz = sizeof(*lock_sys) + OS_THREAD_MAX_N * sizeof(srv_slot_t); - lock_sys = static_cast<lock_sys_t*>(ut_zalloc_nokey(lock_sys_sz)); +/** + Creates the lock system at database start. - void* ptr = &lock_sys[1]; + @param[in] n_cells number of slots in lock hash table +*/ +void lock_sys_t::create(ulint n_cells) +{ + ut_ad(this == &lock_sys); - lock_sys->waiting_threads = static_cast<srv_slot_t*>(ptr); + m_initialised= true; - lock_sys->last_slot = lock_sys->waiting_threads; + waiting_threads = static_cast<srv_slot_t*> + (ut_zalloc_nokey(srv_max_n_threads * sizeof *waiting_threads)); + last_slot = waiting_threads; - mutex_create(LATCH_ID_LOCK_SYS, &lock_sys->mutex); + mutex_create(LATCH_ID_LOCK_SYS, &mutex); - mutex_create(LATCH_ID_LOCK_SYS_WAIT, &lock_sys->wait_mutex); + mutex_create(LATCH_ID_LOCK_SYS_WAIT, &wait_mutex); - lock_sys->timeout_event = os_event_create(0); + timeout_event = os_event_create(0); - lock_sys->rec_hash = hash_create(n_cells); - lock_sys->prdt_hash = hash_create(n_cells); - lock_sys->prdt_page_hash = hash_create(n_cells); + rec_hash = hash_create(n_cells); + prdt_hash = hash_create(n_cells); + prdt_page_hash = hash_create(n_cells); if (!srv_read_only_mode) { lock_latest_err_file = os_file_create_tmpfile(NULL); @@ -518,31 +515,33 @@ lock_rec_lock_fold( lock->un_member.rec_lock.page_no)); } -/** Resize the lock hash tables. -@param[in] n_cells number of slots in lock hash table */ -void -lock_sys_resize( - ulint n_cells) + +/** + Resize the lock hash table. + + @param[in] n_cells number of slots in lock hash table +*/ +void lock_sys_t::resize(ulint n_cells) { - hash_table_t* old_hash; + ut_ad(this == &lock_sys); - lock_mutex_enter(); + mutex_enter(&mutex); - old_hash = lock_sys->rec_hash; - lock_sys->rec_hash = hash_create(n_cells); - HASH_MIGRATE(old_hash, lock_sys->rec_hash, lock_t, hash, + hash_table_t* old_hash = rec_hash; + rec_hash = hash_create(n_cells); + HASH_MIGRATE(old_hash, rec_hash, lock_t, hash, lock_rec_lock_fold); hash_table_free(old_hash); - old_hash = lock_sys->prdt_hash; - lock_sys->prdt_hash = hash_create(n_cells); - HASH_MIGRATE(old_hash, lock_sys->prdt_hash, lock_t, hash, + old_hash = prdt_hash; + prdt_hash = hash_create(n_cells); + HASH_MIGRATE(old_hash, prdt_hash, lock_t, hash, lock_rec_lock_fold); hash_table_free(old_hash); - old_hash = lock_sys->prdt_page_hash; - lock_sys->prdt_page_hash = hash_create(n_cells); - HASH_MIGRATE(old_hash, lock_sys->prdt_page_hash, lock_t, hash, + old_hash = prdt_page_hash; + prdt_page_hash = hash_create(n_cells); + HASH_MIGRATE(old_hash, prdt_page_hash, lock_t, hash, lock_rec_lock_fold); hash_table_free(old_hash); @@ -571,40 +570,39 @@ lock_sys_resize( buf_pool_mutex_exit(buf_pool); } - lock_mutex_exit(); + mutex_exit(&mutex); } -/*********************************************************************//** -Closes the lock system at database shutdown. */ -void -lock_sys_close(void) -/*================*/ + +/** Closes the lock system at database shutdown. */ +void lock_sys_t::close() { + ut_ad(this == &lock_sys); + + if (!m_initialised) return; + if (lock_latest_err_file != NULL) { fclose(lock_latest_err_file); lock_latest_err_file = NULL; } - hash_table_free(lock_sys->rec_hash); - hash_table_free(lock_sys->prdt_hash); - hash_table_free(lock_sys->prdt_page_hash); + hash_table_free(rec_hash); + hash_table_free(prdt_hash); + hash_table_free(prdt_page_hash); - os_event_destroy(lock_sys->timeout_event); + os_event_destroy(timeout_event); - mutex_destroy(&lock_sys->mutex); - mutex_destroy(&lock_sys->wait_mutex); + mutex_destroy(&mutex); + mutex_destroy(&wait_mutex); - srv_slot_t* slot = lock_sys->waiting_threads; - - for (ulint i = 0; i < OS_THREAD_MAX_N; i++, ++slot) { - if (slot->event != NULL) { - os_event_destroy(slot->event); + for (ulint i = srv_max_n_threads; i--; ) { + if (os_event_t& event = waiting_threads[i].event) { + os_event_destroy(event); } } - ut_free(lock_sys); - - lock_sys = NULL; + ut_free(waiting_threads); + m_initialised= false; } /*********************************************************************//** @@ -1029,7 +1027,7 @@ lock_rec_expl_exist_on_page( lock_mutex_enter(); /* Only used in ibuf pages, so rec_hash is good enough */ - lock = lock_rec_get_first_on_page_addr(lock_sys->rec_hash, + lock = lock_rec_get_first_on_page_addr(lock_sys.rec_hash, space, page_no); lock_mutex_exit(); @@ -1147,7 +1145,7 @@ lock_rec_has_expl( || (precise_mode & LOCK_MODE_MASK) == LOCK_X); ut_ad(!(precise_mode & LOCK_INSERT_INTENTION)); - for (lock = lock_rec_get_first(lock_sys->rec_hash, block, heap_no); + for (lock = lock_rec_get_first(lock_sys.rec_hash, block, heap_no); lock != NULL; lock = lock_rec_get_next(heap_no, lock)) { @@ -1200,7 +1198,7 @@ lock_rec_other_has_expl_req( return(NULL); } - for (lock_t* lock = lock_rec_get_first(lock_sys->rec_hash, + for (lock_t* lock = lock_rec_get_first(lock_sys.rec_hash, block, heap_no); lock != NULL; lock = lock_rec_get_next(heap_no, lock)) { @@ -1310,7 +1308,7 @@ lock_rec_other_has_conflicting( bool is_supremum = (heap_no == PAGE_HEAP_NO_SUPREMUM); - for (lock = lock_rec_get_first(lock_sys->rec_hash, block, heap_no); + for (lock = lock_rec_get_first(lock_sys.rec_hash, block, heap_no); lock != NULL; lock = lock_rec_get_next(heap_no, lock)) { @@ -1390,7 +1388,7 @@ lock_sec_rec_some_has_impl( Return approximate number or record locks (bits set in the bitmap) for this transaction. Since delete-marked records may be removed, the record count will not be precise. -The caller must be holding lock_sys->mutex. */ +The caller must be holding lock_sys.mutex. */ ulint lock_number_of_rows_locked( /*=======================*/ @@ -1403,7 +1401,7 @@ lock_number_of_rows_locked( /*********************************************************************//** Return the number of table locks for a transaction. -The caller must be holding lock_sys->mutex. */ +The caller must be holding lock_sys.mutex. */ ulint lock_number_of_tables_locked( /*=========================*/ @@ -2290,7 +2288,7 @@ lock_rec_lock( ut_ad((LOCK_MODE_MASK & mode) != LOCK_X || lock_table_has(trx, index->table, LOCK_IX)); - if (lock_t *lock= lock_rec_get_first_on_page(lock_sys->rec_hash, block)) + if (lock_t *lock= lock_rec_get_first_on_page(lock_sys.rec_hash, block)) { trx_mutex_enter(trx); if (lock_rec_get_next_on_page(lock) || @@ -2415,7 +2413,7 @@ lock_rec_has_to_wait_in_queue( /*************************************************************//** Grants a lock to a waiting lock request and releases the waiting transaction. -The caller must hold lock_sys->mutex but not lock->trx->mutex. */ +The caller must hold lock_sys.mutex but not lock->trx->mutex. */ static void lock_grant( @@ -2965,11 +2963,11 @@ lock_rec_free_all_from_discard_page( page_no = block->page.id.page_no(); lock_rec_free_all_from_discard_page_low( - space, page_no, lock_sys->rec_hash); + space, page_no, lock_sys.rec_hash); lock_rec_free_all_from_discard_page_low( - space, page_no, lock_sys->prdt_hash); + space, page_no, lock_sys.prdt_hash); lock_rec_free_all_from_discard_page_low( - space, page_no, lock_sys->prdt_page_hash); + space, page_no, lock_sys.prdt_page_hash); } /*============= RECORD LOCK MOVING AND INHERITING ===================*/ @@ -3014,12 +3012,12 @@ lock_rec_reset_and_release_wait( ulint heap_no)/*!< in: heap number of record */ { lock_rec_reset_and_release_wait_low( - lock_sys->rec_hash, block, heap_no); + lock_sys.rec_hash, block, heap_no); lock_rec_reset_and_release_wait_low( - lock_sys->prdt_hash, block, PAGE_HEAP_NO_INFIMUM); + lock_sys.prdt_hash, block, PAGE_HEAP_NO_INFIMUM); lock_rec_reset_and_release_wait_low( - lock_sys->prdt_page_hash, block, PAGE_HEAP_NO_INFIMUM); + lock_sys.prdt_page_hash, block, PAGE_HEAP_NO_INFIMUM); } /*************************************************************//** @@ -3052,7 +3050,7 @@ lock_rec_inherit_to_gap( DO want S-locks/X-locks(taken for replace) set by a consistency constraint to be inherited also then. */ - for (lock = lock_rec_get_first(lock_sys->rec_hash, block, heap_no); + for (lock = lock_rec_get_first(lock_sys.rec_hash, block, heap_no); lock != NULL; lock = lock_rec_get_next(heap_no, lock)) { @@ -3090,7 +3088,7 @@ lock_rec_inherit_to_gap_if_gap_lock( lock_mutex_enter(); - for (lock = lock_rec_get_first(lock_sys->rec_hash, block, heap_no); + for (lock = lock_rec_get_first(lock_sys.rec_hash, block, heap_no); lock != NULL; lock = lock_rec_get_next(heap_no, lock)) { @@ -3134,8 +3132,8 @@ lock_rec_move_low( /* If the lock is predicate lock, it resides on INFIMUM record */ ut_ad(lock_rec_get_first( lock_hash, receiver, receiver_heap_no) == NULL - || lock_hash == lock_sys->prdt_hash - || lock_hash == lock_sys->prdt_page_hash); + || lock_hash == lock_sys.prdt_hash + || lock_hash == lock_sys.prdt_page_hash); for (lock = lock_rec_get_first(lock_hash, donator, donator_heap_no); @@ -3158,7 +3156,7 @@ lock_rec_move_low( lock->index, lock->trx, FALSE); } - ut_ad(lock_rec_get_first(lock_sys->rec_hash, + ut_ad(lock_rec_get_first(lock_sys.rec_hash, donator, donator_heap_no) == NULL); } @@ -3213,7 +3211,7 @@ lock_rec_move( ulint donator_heap_no)/*!< in: heap_no of the record which gives the locks */ { - lock_rec_move_low(lock_sys->rec_hash, receiver, donator, + lock_rec_move_low(lock_sys.rec_hash, receiver, donator, receiver_heap_no, donator_heap_no); } @@ -3238,7 +3236,7 @@ lock_move_reorganize_page( lock_mutex_enter(); /* FIXME: This needs to deal with predicate lock too */ - lock = lock_rec_get_first_on_page(lock_sys->rec_hash, block); + lock = lock_rec_get_first_on_page(lock_sys.rec_hash, block); if (lock == NULL) { lock_mutex_exit(); @@ -3371,7 +3369,7 @@ lock_move_rec_list_end( table to the end of the hash chain, and lock_rec_add_to_queue does not reuse locks if there are waiters in the queue. */ - for (lock = lock_rec_get_first_on_page(lock_sys->rec_hash, block); lock; + for (lock = lock_rec_get_first_on_page(lock_sys.rec_hash, block); lock; lock = lock_rec_get_next_on_page(lock)) { const rec_t* rec1 = rec; const rec_t* rec2; @@ -3486,7 +3484,7 @@ lock_move_rec_list_start( lock_mutex_enter(); - for (lock = lock_rec_get_first_on_page(lock_sys->rec_hash, block); lock; + for (lock = lock_rec_get_first_on_page(lock_sys.rec_hash, block); lock; lock = lock_rec_get_next_on_page(lock)) { const rec_t* rec1; const rec_t* rec2; @@ -3598,7 +3596,7 @@ lock_rtr_move_rec_list( lock_mutex_enter(); - for (lock = lock_rec_get_first_on_page(lock_sys->rec_hash, block); lock; + for (lock = lock_rec_get_first_on_page(lock_sys.rec_hash, block); lock; lock = lock_rec_get_next_on_page(lock)) { ulint moved = 0; const rec_t* rec1; @@ -3710,7 +3708,7 @@ lock_update_merge_right( waiting transactions */ lock_rec_reset_and_release_wait_low( - lock_sys->rec_hash, left_block, PAGE_HEAP_NO_SUPREMUM); + lock_sys.rec_hash, left_block, PAGE_HEAP_NO_SUPREMUM); #ifdef UNIV_DEBUG /* there should exist no page lock on the left page, @@ -3718,7 +3716,7 @@ lock_update_merge_right( ulint space = left_block->page.id.space(); ulint page_no = left_block->page.id.page_no(); ut_ad(lock_rec_get_first_on_page_addr( - lock_sys->prdt_page_hash, space, page_no) == NULL); + lock_sys.prdt_page_hash, space, page_no) == NULL); #endif /* UNIV_DEBUG */ lock_rec_free_all_from_discard_page(left_block); @@ -3828,7 +3826,7 @@ lock_update_merge_left( releasing waiting transactions */ lock_rec_reset_and_release_wait_low( - lock_sys->rec_hash, left_block, PAGE_HEAP_NO_SUPREMUM); + lock_sys.rec_hash, left_block, PAGE_HEAP_NO_SUPREMUM); } /* Move the locks from the supremum of right page to the supremum @@ -3843,7 +3841,7 @@ lock_update_merge_left( ulint space = right_block->page.id.space(); ulint page_no = right_block->page.id.page_no(); lock_t* lock_test = lock_rec_get_first_on_page_addr( - lock_sys->prdt_page_hash, space, page_no); + lock_sys.prdt_page_hash, space, page_no); ut_ad(!lock_test); #endif /* UNIV_DEBUG */ @@ -3896,8 +3894,8 @@ lock_update_discard( lock_mutex_enter(); - if (!lock_rec_get_first_on_page(lock_sys->rec_hash, block) - && (!lock_rec_get_first_on_page(lock_sys->prdt_hash, block))) { + if (!lock_rec_get_first_on_page(lock_sys.rec_hash, block) + && (!lock_rec_get_first_on_page(lock_sys.prdt_hash, block))) { /* No locks exist on page, nothing to do */ lock_mutex_exit(); @@ -4828,7 +4826,7 @@ lock_rec_unlock( lock_mutex_enter(); trx_mutex_enter(trx); - first_lock = lock_rec_get_first(lock_sys->rec_hash, block, heap_no); + first_lock = lock_rec_get_first(lock_sys.rec_hash, block, heap_no); /* Find the last lock with the same lock_mode and transaction on the record. */ @@ -4877,7 +4875,7 @@ released: } } } else { - lock_grant_and_move_on_rec(lock_sys->rec_hash, first_lock, heap_no); + lock_grant_and_move_on_rec(lock_sys.rec_hash, first_lock, heap_no); } lock_mutex_exit(); @@ -5200,11 +5198,11 @@ lock_get_n_rec_locks(void) ut_ad(lock_mutex_own()); - for (i = 0; i < hash_get_n_cells(lock_sys->rec_hash); i++) { + for (i = 0; i < hash_get_n_cells(lock_sys.rec_hash); i++) { const lock_t* lock; for (lock = static_cast<const lock_t*>( - HASH_GET_FIRST(lock_sys->rec_hash, i)); + HASH_GET_FIRST(lock_sys.rec_hash, i)); lock != 0; lock = static_cast<const lock_t*>( HASH_GET_NEXT(hash, lock))) { @@ -5520,7 +5518,7 @@ lock_table_queue_validate( /* lock->trx->state cannot change from or to NOT_STARTED while we are holding the trx_sys.mutex. It may change from ACTIVE to PREPARED, but it may not change to - COMMITTED, because we are holding the lock_sys->mutex. */ + COMMITTED, because we are holding the lock_sys.mutex. */ ut_ad(trx_assert_started(lock->trx)); if (!lock_get_wait(lock)) { @@ -5576,7 +5574,7 @@ lock_rec_queue_validate( if (!page_rec_is_user_rec(rec)) { - for (lock = lock_rec_get_first(lock_sys->rec_hash, + for (lock = lock_rec_get_first(lock_sys.rec_hash, block, heap_no); lock != NULL; lock = lock_rec_get_next_const(heap_no, lock)) { @@ -5608,7 +5606,7 @@ lock_rec_queue_validate( ut_ad(lock_mutex_own()); /* impl_trx cannot be committed until lock_mutex_exit() - because lock_trx_release_locks() acquires lock_sys->mutex */ + because lock_trx_release_locks() acquires lock_sys.mutex */ if (impl_trx != NULL) { const lock_t* other_lock @@ -5658,7 +5656,7 @@ lock_rec_queue_validate( } } - for (lock = lock_rec_get_first(lock_sys->rec_hash, block, heap_no); + for (lock = lock_rec_get_first(lock_sys.rec_hash, block, heap_no); lock != NULL; lock = lock_rec_get_next_const(heap_no, lock)) { @@ -5734,7 +5732,7 @@ lock_rec_validate_page( mutex_enter(&trx_sys.mutex); loop: lock = lock_rec_get_first_on_page_addr( - lock_sys->rec_hash, + lock_sys.rec_hash, block->page.id.space(), block->page.id.page_no()); if (!lock) { @@ -5804,7 +5802,7 @@ static MY_ATTRIBUTE((warn_unused_result)) const lock_t* lock_rec_validate( /*==============*/ - ulint start, /*!< in: lock_sys->rec_hash + ulint start, /*!< in: lock_sys.rec_hash bucket */ ib_uint64_t* limit) /*!< in/out: upper limit of (space, page_no) */ @@ -5813,7 +5811,7 @@ lock_rec_validate( ut_ad(mutex_own(&trx_sys.mutex)); for (const lock_t* lock = static_cast<const lock_t*>( - HASH_GET_FIRST(lock_sys->rec_hash, start)); + HASH_GET_FIRST(lock_sys.rec_hash, start)); lock != NULL; lock = static_cast<const lock_t*>(HASH_GET_NEXT(hash, lock))) { @@ -5937,7 +5935,7 @@ lock_validate() don't want to hog the lock_sys_t::mutex and the trx_sys_t::mutex. Release both mutexes during the validation check. */ - for (ulint i = 0; i < hash_get_n_cells(lock_sys->rec_hash); i++) { + for (ulint i = 0; i < hash_get_n_cells(lock_sys.rec_hash); i++) { ib_uint64_t limit = 0; while (const lock_t* lock = lock_rec_validate(i, &limit)) { @@ -6018,7 +6016,7 @@ lock_rec_insert_check_and_lock( BTR_NO_LOCKING_FLAG and skip the locking altogether. */ ut_ad(lock_table_has(trx, index->table, LOCK_IX)); - lock = lock_rec_get_first(lock_sys->rec_hash, block, heap_no); + lock = lock_rec_get_first(lock_sys.rec_hash, block, heap_no); if (lock == NULL) { /* We optimize CPU time usage in the simplest case */ @@ -6953,11 +6951,11 @@ lock_trx_release_locks( bool release_lock = UT_LIST_GET_LEN(trx->lock.trx_locks) > 0; - /* Don't take lock_sys mutex if trx didn't acquire any lock. */ + /* Don't take lock_sys.mutex if trx didn't acquire any lock. */ if (release_lock) { /* The transition of trx->state to TRX_STATE_COMMITTED_IN_MEMORY - is protected by both the lock_sys->mutex and the trx->mutex. */ + is protected by both the lock_sys.mutex and the trx->mutex. */ lock_mutex_enter(); } @@ -7210,7 +7208,7 @@ void lock_set_timeout_event() /*====================*/ { - os_event_set(lock_sys->timeout_event); + os_event_set(lock_sys.timeout_event); } #ifdef UNIV_DEBUG @@ -7434,8 +7432,8 @@ DeadlockChecker::get_first_lock(ulint* heap_no) const hash_table_t* lock_hash; lock_hash = lock->type_mode & LOCK_PREDICATE - ? lock_sys->prdt_hash - : lock_sys->rec_hash; + ? lock_sys.prdt_hash + : lock_sys.rec_hash; /* We are only interested in records that match the heap_no. */ *heap_no = lock_rec_find_set_bit(lock); diff --git a/storage/innobase/lock/lock0prdt.cc b/storage/innobase/lock/lock0prdt.cc index 0e79dd5b460..6f677347eeb 100644 --- a/storage/innobase/lock/lock0prdt.cc +++ b/storage/innobase/lock/lock0prdt.cc @@ -539,7 +539,7 @@ lock_prdt_insert_check_and_lock( lock_t* lock; /* Only need to check locks on prdt_hash */ - lock = lock_rec_get_first(lock_sys->prdt_hash, block, PRDT_HEAPNO); + lock = lock_rec_get_first(lock_sys.prdt_hash, block, PRDT_HEAPNO); if (lock == NULL) { lock_mutex_exit(); @@ -626,7 +626,7 @@ lock_prdt_update_parent( /* Get all locks in parent */ for (lock = lock_rec_get_first_on_page_addr( - lock_sys->prdt_hash, space, page_no); + lock_sys.prdt_hash, space, page_no); lock; lock = lock_rec_get_next_on_page(lock)) { lock_prdt_t* lock_prdt; @@ -816,8 +816,8 @@ lock_prdt_lock( ut_ad(type_mode & (LOCK_PREDICATE | LOCK_PRDT_PAGE)); hash_table_t* hash = type_mode == LOCK_PREDICATE - ? lock_sys->prdt_hash - : lock_sys->prdt_page_hash; + ? lock_sys.prdt_hash + : lock_sys.prdt_page_hash; /* Another transaction cannot have an implicit lock on the record, because when we come here, we already have modified the clustered @@ -923,7 +923,7 @@ lock_place_prdt_page_lock( lock_mutex_enter(); const lock_t* lock = lock_rec_get_first_on_page_addr( - lock_sys->prdt_page_hash, space, page_no); + lock_sys.prdt_page_hash, space, page_no); const ulint mode = LOCK_S | LOCK_PRDT_PAGE; trx_t* trx = thr_get_trx(thr); @@ -977,7 +977,7 @@ lock_test_prdt_page_lock( lock_mutex_enter(); lock = lock_rec_get_first_on_page_addr( - lock_sys->prdt_page_hash, space, page_no); + lock_sys.prdt_page_hash, space, page_no); lock_mutex_exit(); @@ -997,13 +997,13 @@ lock_prdt_rec_move( { lock_t* lock; - if (!lock_sys->prdt_hash) { + if (!lock_sys.prdt_hash) { return; } lock_mutex_enter(); - for (lock = lock_rec_get_first(lock_sys->prdt_hash, + for (lock = lock_rec_get_first(lock_sys.prdt_hash, donator, PRDT_HEAPNO); lock != NULL; lock = lock_rec_get_next(PRDT_HEAPNO, lock)) { diff --git a/storage/innobase/lock/lock0wait.cc b/storage/innobase/lock/lock0wait.cc index fb291b2dd63..b2ed634e2a4 100644 --- a/storage/innobase/lock/lock0wait.cc +++ b/storage/innobase/lock/lock0wait.cc @@ -46,7 +46,7 @@ lock_wait_table_print(void) { ut_ad(lock_wait_mutex_own()); - const srv_slot_t* slot = lock_sys->waiting_threads; + const srv_slot_t* slot = lock_sys.waiting_threads; for (ulint i = 0; i < OS_THREAD_MAX_N; i++, ++slot) { @@ -72,7 +72,7 @@ lock_wait_table_release_slot( srv_slot_t* slot) /*!< in: slot to release */ { #ifdef UNIV_DEBUG - srv_slot_t* upper = lock_sys->waiting_threads + OS_THREAD_MAX_N; + srv_slot_t* upper = lock_sys.waiting_threads + OS_THREAD_MAX_N; #endif /* UNIV_DEBUG */ lock_wait_mutex_enter(); @@ -83,7 +83,7 @@ lock_wait_table_release_slot( ut_ad(slot->thr->slot == slot); /* Must be within the array boundaries. */ - ut_ad(slot >= lock_sys->waiting_threads); + ut_ad(slot >= lock_sys.waiting_threads); ut_ad(slot < upper); /* Note: When we reserve the slot we use the trx_t::mutex to update @@ -102,23 +102,23 @@ lock_wait_table_release_slot( lock_mutex_exit(); /* Scan backwards and adjust the last free slot pointer. */ - for (slot = lock_sys->last_slot; - slot > lock_sys->waiting_threads && !slot->in_use; + for (slot = lock_sys.last_slot; + slot > lock_sys.waiting_threads && !slot->in_use; --slot) { /* No op */ } /* Either the array is empty or the last scanned slot is in use. */ - ut_ad(slot->in_use || slot == lock_sys->waiting_threads); + ut_ad(slot->in_use || slot == lock_sys.waiting_threads); - lock_sys->last_slot = slot + 1; + lock_sys.last_slot = slot + 1; /* The last slot is either outside of the array boundary or it's on an empty slot. */ - ut_ad(lock_sys->last_slot == upper || !lock_sys->last_slot->in_use); + ut_ad(lock_sys.last_slot == upper || !lock_sys.last_slot->in_use); - ut_ad(lock_sys->last_slot >= lock_sys->waiting_threads); - ut_ad(lock_sys->last_slot <= upper); + ut_ad(lock_sys.last_slot >= lock_sys.waiting_threads); + ut_ad(lock_sys.last_slot <= upper); lock_wait_mutex_exit(); } @@ -140,7 +140,7 @@ lock_wait_table_reserve_slot( ut_ad(lock_wait_mutex_own()); ut_ad(trx_mutex_own(thr_get_trx(thr))); - slot = lock_sys->waiting_threads; + slot = lock_sys.waiting_threads; for (i = OS_THREAD_MAX_N; i--; ++slot) { if (!slot->in_use) { @@ -158,12 +158,12 @@ lock_wait_table_reserve_slot( slot->suspend_time = ut_time(); slot->wait_timeout = wait_timeout; - if (slot == lock_sys->last_slot) { - ++lock_sys->last_slot; + if (slot == lock_sys.last_slot) { + ++lock_sys.last_slot; } - ut_ad(lock_sys->last_slot - <= lock_sys->waiting_threads + OS_THREAD_MAX_N); + ut_ad(lock_sys.last_slot + <= lock_sys.waiting_threads + OS_THREAD_MAX_N); return(slot); } @@ -184,7 +184,7 @@ lock_wait_table_reserve_slot( check if lock timeout was for priority thread, as a side effect trigger lock monitor @param[in] trx transaction owning the lock -@param[in] locked true if trx and lock_sys_mutex is ownd +@param[in] locked true if trx and lock_sys.mutex is ownd @return false for regular lock timeout */ static bool @@ -394,11 +394,11 @@ lock_wait_suspend_thread( /* Only update the variable if we successfully retrieved the start and finish times. See Bug#36819. */ - if (diff_time > lock_sys->n_lock_max_wait_time + if (diff_time > lock_sys.n_lock_max_wait_time && start_time != -1 && finish_time != -1) { - lock_sys->n_lock_max_wait_time = diff_time; + lock_sys.n_lock_max_wait_time = diff_time; } /* Record the lock wait time for this thread */ @@ -530,7 +530,7 @@ os_thread_ret_t DECLARE_THREAD(lock_wait_timeout_thread)(void*) { int64_t sig_count = 0; - os_event_t event = lock_sys->timeout_event; + os_event_t event = lock_sys.timeout_event; ut_ad(!srv_read_only_mode); @@ -556,8 +556,8 @@ DECLARE_THREAD(lock_wait_timeout_thread)(void*) /* Check all slots for user threads that are waiting on locks, and if they have exceeded the time limit. */ - for (slot = lock_sys->waiting_threads; - slot < lock_sys->last_slot; + for (slot = lock_sys.waiting_threads; + slot < lock_sys.last_slot; ++slot) { /* We are doing a read without the lock mutex @@ -576,7 +576,7 @@ DECLARE_THREAD(lock_wait_timeout_thread)(void*) } while (srv_shutdown_state < SRV_SHUTDOWN_CLEANUP); - lock_sys->timeout_thread_active = false; + lock_sys.timeout_thread_active = false; /* We count the number of threads in os_thread_exit(). A created thread should always use that to exit and not use return() to exit. */ diff --git a/storage/innobase/log/log0log.cc b/storage/innobase/log/log0log.cc index 66b25305566..28717a22982 100644 --- a/storage/innobase/log/log0log.cc +++ b/storage/innobase/log/log0log.cc @@ -1881,7 +1881,7 @@ logs_empty_and_mark_files_at_shutdown(void) srv_shutdown_state = SRV_SHUTDOWN_CLEANUP; loop: - ut_ad(lock_sys || !srv_was_started); + ut_ad(lock_sys.is_initialised() || !srv_was_started); ut_ad(log_sys || !srv_was_started); ut_ad(fil_system || !srv_was_started); os_event_set(srv_buf_resize_event); @@ -1890,8 +1890,8 @@ loop: os_event_set(srv_error_event); os_event_set(srv_monitor_event); os_event_set(srv_buf_dump_event); - if (lock_sys) { - os_event_set(lock_sys->timeout_event); + if (lock_sys.timeout_thread_active) { + os_event_set(lock_sys.timeout_event); } if (dict_stats_event) { os_event_set(dict_stats_event); @@ -1940,7 +1940,7 @@ loop: goto wait_suspend_loop; } else if (srv_dict_stats_thread_active) { thread_name = "dict_stats_thread"; - } else if (lock_sys && lock_sys->timeout_thread_active) { + } else if (lock_sys.timeout_thread_active) { thread_name = "lock_wait_timeout_thread"; } else if (srv_buf_dump_thread_active) { thread_name = "buf_dump_thread"; diff --git a/storage/innobase/srv/srv0mon.cc b/storage/innobase/srv/srv0mon.cc index 37e50722e5c..569e476969f 100644 --- a/storage/innobase/srv/srv0mon.cc +++ b/storage/innobase/srv/srv0mon.cc @@ -1933,7 +1933,7 @@ srv_mon_process_existing_counter( /* innodb_row_lock_time_max */ case MONITOR_OVLD_LOCK_MAX_WAIT_TIME: - value = lock_sys->n_lock_max_wait_time / 1000; + value = lock_sys.n_lock_max_wait_time / 1000; break; /* innodb_row_lock_time_avg */ diff --git a/storage/innobase/srv/srv0srv.cc b/storage/innobase/srv/srv0srv.cc index 2b32deebb21..c94ff932651 100644 --- a/storage/innobase/srv/srv0srv.cc +++ b/storage/innobase/srv/srv0srv.cc @@ -1581,7 +1581,7 @@ srv_export_innodb_status(void) } export_vars.innodb_row_lock_time_max = - lock_sys->n_lock_max_wait_time / 1000; + lock_sys.n_lock_max_wait_time / 1000; export_vars.innodb_rows_read = srv_stats.n_rows_read; @@ -1717,7 +1717,7 @@ loop: if (srv_print_innodb_monitor) { /* Reset mutex_skipped counter everytime srv_print_innodb_monitor changes. This is to - ensure we will not be blocked by lock_sys->mutex + ensure we will not be blocked by lock_sys.mutex for short duration information printing, such as requested by sync_array_print_long_waits() */ if (!last_srv_print_monitor) { diff --git a/storage/innobase/srv/srv0start.cc b/storage/innobase/srv/srv0start.cc index f87a6303895..eb508409e58 100644 --- a/storage/innobase/srv/srv0start.cc +++ b/storage/innobase/srv/srv0start.cc @@ -1262,7 +1262,7 @@ srv_shutdown_all_bg_threads() if (srv_start_state_is_set(SRV_START_STATE_LOCK_SYS)) { /* a. Let the lock timeout thread exit */ - os_event_set(lock_sys->timeout_event); + os_event_set(lock_sys.timeout_event); } if (!srv_read_only_mode) { @@ -1853,7 +1853,7 @@ innobase_start_or_create_for_mysql() log_sys_init(); recv_sys_init(); - lock_sys_create(srv_lock_table_size); + lock_sys.create(srv_lock_table_size); /* Create i/o-handler threads: */ @@ -2565,7 +2565,7 @@ files_checked: lock_wait_timeout_thread, NULL, thread_ids + 2 + SRV_MAX_N_IO_THREADS); thread_started[2 + SRV_MAX_N_IO_THREADS] = true; - lock_sys->timeout_thread_active = true; + lock_sys.timeout_thread_active = true; /* Create the thread which warns of long semaphore waits */ srv_error_monitor_active = true; @@ -2836,7 +2836,7 @@ innodb_shutdown() ut_ad(trx_sys.is_initialised() || !srv_was_started); ut_ad(buf_dblwr || !srv_was_started || srv_read_only_mode || srv_force_recovery >= SRV_FORCE_NO_TRX_UNDO); - ut_ad(lock_sys || !srv_was_started); + ut_ad(lock_sys.is_initialised() || !srv_was_started); #ifdef BTR_CUR_HASH_ADAPT ut_ad(btr_search_sys || !srv_was_started); #endif /* BTR_CUR_HASH_ADAPT */ @@ -2876,10 +2876,7 @@ innodb_shutdown() if (buf_dblwr) { buf_dblwr_free(); } - if (lock_sys) { - lock_sys_close(); - } - + lock_sys.close(); trx_pool_close(); /* We don't create these mutexes in RO mode because we don't create diff --git a/storage/innobase/sync/sync0debug.cc b/storage/innobase/sync/sync0debug.cc index 515c7256d18..75e6f0b39ca 100644 --- a/storage/innobase/sync/sync0debug.cc +++ b/storage/innobase/sync/sync0debug.cc @@ -811,7 +811,7 @@ LatchDebug::check_order( case SYNC_TRX: - /* Either the thread must own the lock_sys->mutex, or + /* Either the thread must own the lock_sys.mutex, or it is allowed to own only ONE trx_t::mutex. */ if (less(latches, level) != NULL) { diff --git a/storage/innobase/trx/trx0i_s.cc b/storage/innobase/trx/trx0i_s.cc index 92dab34d053..65036b9f05c 100644 --- a/storage/innobase/trx/trx0i_s.cc +++ b/storage/innobase/trx/trx0i_s.cc @@ -172,7 +172,7 @@ struct trx_i_s_cache_t { ha_storage_t* storage; /*!< storage for external volatile data that may become unavailable when we release - lock_sys->mutex or trx_sys.mutex */ + lock_sys.mutex or trx_sys.mutex */ ulint mem_allocd; /*!< the amount of memory allocated with mem_alloc*() */ bool is_truncated; /*!< this is true if the memory @@ -537,9 +537,9 @@ thd_done: row->trx_tables_locked = lock_number_of_tables_locked(&trx->lock); - /* These are protected by both trx->mutex or lock_sys->mutex, - or just lock_sys->mutex. For reading, it suffices to hold - lock_sys->mutex. */ + /* These are protected by both trx->mutex or lock_sys.mutex, + or just lock_sys.mutex. For reading, it suffices to hold + lock_sys.mutex. */ row->trx_lock_structs = UT_LIST_GET_LEN(trx->lock.trx_locks); diff --git a/storage/innobase/trx/trx0trx.cc b/storage/innobase/trx/trx0trx.cc index 463ff587483..c76fa683d62 100644 --- a/storage/innobase/trx/trx0trx.cc +++ b/storage/innobase/trx/trx0trx.cc @@ -2094,7 +2094,7 @@ state_ok: /**********************************************************************//** Prints info about a transaction. -The caller must hold lock_sys->mutex. +The caller must hold lock_sys.mutex. When possible, use trx_print() instead. */ void trx_print_latched( @@ -2114,7 +2114,7 @@ trx_print_latched( /**********************************************************************//** Prints info about a transaction. -Acquires and releases lock_sys->mutex. */ +Acquires and releases lock_sys.mutex. */ void trx_print( /*======*/ @@ -2156,7 +2156,7 @@ trx_assert_started( /* trx->state can change from or to NOT_STARTED while we are holding trx_sys.mutex for non-locking autocommit selects but not for other types of transactions. It may change from ACTIVE to PREPARED. Unless - we are holding lock_sys->mutex, it may also change to COMMITTED. */ + we are holding lock_sys.mutex, it may also change to COMMITTED. */ switch (trx->state) { case TRX_STATE_PREPARED: @@ -2432,7 +2432,7 @@ static my_bool trx_get_trx_by_xid_callback(rw_trx_hash_element_t *element, /** Finds PREPARED XA transaction by xid. - trx may have been committed, unless the caller is holding lock_sys->mutex. + trx may have been committed, unless the caller is holding lock_sys.mutex. @param[in] xid X/Open XA transaction identifier |