summaryrefslogtreecommitdiff
path: root/storage/innobase/include/sync0rw.ic
diff options
context:
space:
mode:
authorSergei Golubchik <vuvova@gmail.com>2015-05-04 19:17:21 +0200
committerSergei Golubchik <vuvova@gmail.com>2015-05-04 19:17:21 +0200
commit6d06fbbd1dc25b3c12568f9038060dfdb69f9683 (patch)
tree21e27f3fddc89f9dda6b337091464ba10c490123 /storage/innobase/include/sync0rw.ic
parent1645930d0bd02f79df3ebff412b90acdc15bd9a0 (diff)
downloadmariadb-git-6d06fbbd1dc25b3c12568f9038060dfdb69f9683.tar.gz
move to storage/innobase
Diffstat (limited to 'storage/innobase/include/sync0rw.ic')
-rw-r--r--storage/innobase/include/sync0rw.ic797
1 files changed, 797 insertions, 0 deletions
diff --git a/storage/innobase/include/sync0rw.ic b/storage/innobase/include/sync0rw.ic
new file mode 100644
index 00000000000..bb05ae7daf1
--- /dev/null
+++ b/storage/innobase/include/sync0rw.ic
@@ -0,0 +1,797 @@
+/*****************************************************************************
+
+Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2008, Google Inc.
+
+Portions of this file contain modifications contributed and copyrighted by
+Google, Inc. Those modifications are gratefully acknowledged and are described
+briefly in the InnoDB documentation. The contributions by Google are
+incorporated with their permission, and subject to the conditions contained in
+the file COPYING.Google.
+
+This program is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free Software
+Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License along with
+this program; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
+
+*****************************************************************************/
+
+/**************************************************//**
+@file include/sync0rw.ic
+The read-write lock (for threads)
+
+Created 9/11/1995 Heikki Tuuri
+*******************************************************/
+
+/******************************************************************//**
+Lock an rw-lock in shared mode for the current thread. If the rw-lock is
+locked in exclusive mode, or there is an exclusive lock request waiting,
+the function spins a preset time (controlled by SYNC_SPIN_ROUNDS),
+waiting for the lock before suspending the thread. */
+UNIV_INTERN
+void
+rw_lock_s_lock_spin(
+/*================*/
+ rw_lock_t* lock, /*!< in: pointer to rw-lock */
+ ulint pass, /*!< in: pass value; != 0, if the lock will
+ be passed to another thread to unlock */
+ const char* file_name,/*!< in: file name where lock requested */
+ ulint line); /*!< in: line where requested */
+#ifdef UNIV_SYNC_DEBUG
+/******************************************************************//**
+Inserts the debug information for an rw-lock. */
+UNIV_INTERN
+void
+rw_lock_add_debug_info(
+/*===================*/
+ rw_lock_t* lock, /*!< in: rw-lock */
+ ulint pass, /*!< in: pass value */
+ ulint lock_type, /*!< in: lock type */
+ const char* file_name, /*!< in: file where requested */
+ ulint line); /*!< in: line where requested */
+/******************************************************************//**
+Removes a debug information struct for an rw-lock. */
+UNIV_INTERN
+void
+rw_lock_remove_debug_info(
+/*======================*/
+ rw_lock_t* lock, /*!< in: rw-lock */
+ ulint pass, /*!< in: pass value */
+ ulint lock_type); /*!< in: lock type */
+#endif /* UNIV_SYNC_DEBUG */
+
+/********************************************************************//**
+Check if there are threads waiting for the rw-lock.
+@return 1 if waiters, 0 otherwise */
+UNIV_INLINE
+ulint
+rw_lock_get_waiters(
+/*================*/
+ const rw_lock_t* lock) /*!< in: rw-lock */
+{
+ return(lock->waiters);
+}
+
+/********************************************************************//**
+Sets lock->waiters to 1. It is not an error if lock->waiters is already
+1. On platforms where ATOMIC builtins are used this function enforces a
+memory barrier. */
+UNIV_INLINE
+void
+rw_lock_set_waiter_flag(
+/*====================*/
+ rw_lock_t* lock) /*!< in/out: rw-lock */
+{
+#ifdef INNODB_RW_LOCKS_USE_ATOMICS
+ (void) os_compare_and_swap_ulint(&lock->waiters, 0, 1);
+#else /* INNODB_RW_LOCKS_USE_ATOMICS */
+ lock->waiters = 1;
+ os_wmb;
+#endif /* INNODB_RW_LOCKS_USE_ATOMICS */
+}
+
+/********************************************************************//**
+Resets lock->waiters to 0. It is not an error if lock->waiters is already
+0. On platforms where ATOMIC builtins are used this function enforces a
+memory barrier. */
+UNIV_INLINE
+void
+rw_lock_reset_waiter_flag(
+/*======================*/
+ rw_lock_t* lock) /*!< in/out: rw-lock */
+{
+#ifdef INNODB_RW_LOCKS_USE_ATOMICS
+ (void) os_compare_and_swap_ulint(&lock->waiters, 1, 0);
+#else /* INNODB_RW_LOCKS_USE_ATOMICS */
+ lock->waiters = 0;
+ os_wmb;
+#endif /* INNODB_RW_LOCKS_USE_ATOMICS */
+}
+
+/******************************************************************//**
+Returns the write-status of the lock - this function made more sense
+with the old rw_lock implementation.
+@return RW_LOCK_NOT_LOCKED, RW_LOCK_EX, RW_LOCK_WAIT_EX */
+UNIV_INLINE
+ulint
+rw_lock_get_writer(
+/*===============*/
+ const rw_lock_t* lock) /*!< in: rw-lock */
+{
+ lint lock_word = lock->lock_word;
+ if (lock_word > 0) {
+ /* return NOT_LOCKED in s-lock state, like the writer
+ member of the old lock implementation. */
+ return(RW_LOCK_NOT_LOCKED);
+ } else if ((lock_word == 0) || (lock_word <= -X_LOCK_DECR)) {
+ return(RW_LOCK_EX);
+ } else {
+ ut_ad(lock_word > -X_LOCK_DECR);
+ return(RW_LOCK_WAIT_EX);
+ }
+}
+
+/******************************************************************//**
+Returns the number of readers.
+@return number of readers */
+UNIV_INLINE
+ulint
+rw_lock_get_reader_count(
+/*=====================*/
+ const rw_lock_t* lock) /*!< in: rw-lock */
+{
+ lint lock_word = lock->lock_word;
+ if (lock_word > 0) {
+ /* s-locked, no x-waiters */
+ return(X_LOCK_DECR - lock_word);
+ } else if (lock_word < 0 && lock_word > -X_LOCK_DECR) {
+ /* s-locked, with x-waiters */
+ return((ulint)(-lock_word));
+ }
+ return(0);
+}
+
+#ifndef INNODB_RW_LOCKS_USE_ATOMICS
+UNIV_INLINE
+ib_mutex_t*
+rw_lock_get_mutex(
+/*==============*/
+ rw_lock_t* lock)
+{
+ return(&(lock->mutex));
+}
+#endif
+
+/******************************************************************//**
+Returns the value of writer_count for the lock. Does not reserve the lock
+mutex, so the caller must be sure it is not changed during the call.
+@return value of writer_count */
+UNIV_INLINE
+ulint
+rw_lock_get_x_lock_count(
+/*=====================*/
+ const rw_lock_t* lock) /*!< in: rw-lock */
+{
+ lint lock_copy = lock->lock_word;
+ if ((lock_copy != 0) && (lock_copy > -X_LOCK_DECR)) {
+ return(0);
+ }
+ return((lock_copy == 0) ? 1 : (2 - (lock_copy + X_LOCK_DECR)));
+}
+
+/******************************************************************//**
+Two different implementations for decrementing the lock_word of a rw_lock:
+one for systems supporting atomic operations, one for others. This does
+does not support recusive x-locks: they should be handled by the caller and
+need not be atomic since they are performed by the current lock holder.
+Returns true if the decrement was made, false if not.
+@return TRUE if decr occurs */
+UNIV_INLINE
+ibool
+rw_lock_lock_word_decr(
+/*===================*/
+ rw_lock_t* lock, /*!< in/out: rw-lock */
+ ulint amount) /*!< in: amount to decrement */
+{
+#ifdef INNODB_RW_LOCKS_USE_ATOMICS
+ lint local_lock_word;
+
+ os_rmb;
+ local_lock_word = lock->lock_word;
+ while (local_lock_word > 0) {
+ if (os_compare_and_swap_lint(&lock->lock_word,
+ local_lock_word,
+ local_lock_word - amount)) {
+ return(TRUE);
+ }
+ local_lock_word = lock->lock_word;
+ }
+ return(FALSE);
+#else /* INNODB_RW_LOCKS_USE_ATOMICS */
+ ibool success = FALSE;
+ mutex_enter(&(lock->mutex));
+ if (lock->lock_word > 0) {
+ lock->lock_word -= amount;
+ success = TRUE;
+ }
+ mutex_exit(&(lock->mutex));
+ return(success);
+#endif /* INNODB_RW_LOCKS_USE_ATOMICS */
+}
+
+/******************************************************************//**
+Increments lock_word the specified amount and returns new value.
+@return lock->lock_word after increment */
+UNIV_INLINE
+lint
+rw_lock_lock_word_incr(
+/*===================*/
+ rw_lock_t* lock, /*!< in/out: rw-lock */
+ ulint amount) /*!< in: amount of increment */
+{
+#ifdef INNODB_RW_LOCKS_USE_ATOMICS
+ return(os_atomic_increment_lint(&lock->lock_word, amount));
+#else /* INNODB_RW_LOCKS_USE_ATOMICS */
+ lint local_lock_word;
+
+ mutex_enter(&(lock->mutex));
+
+ lock->lock_word += amount;
+ local_lock_word = lock->lock_word;
+
+ mutex_exit(&(lock->mutex));
+
+ return(local_lock_word);
+#endif /* INNODB_RW_LOCKS_USE_ATOMICS */
+}
+
+/******************************************************************//**
+This function sets the lock->writer_thread and lock->recursive fields.
+For platforms where we are using atomic builtins instead of lock->mutex
+it sets the lock->writer_thread field using atomics to ensure memory
+ordering. Note that it is assumed that the caller of this function
+effectively owns the lock i.e.: nobody else is allowed to modify
+lock->writer_thread at this point in time.
+The protocol is that lock->writer_thread MUST be updated BEFORE the
+lock->recursive flag is set. */
+UNIV_INLINE
+void
+rw_lock_set_writer_id_and_recursion_flag(
+/*=====================================*/
+ rw_lock_t* lock, /*!< in/out: lock to work on */
+ ibool recursive) /*!< in: TRUE if recursion
+ allowed */
+{
+ os_thread_id_t curr_thread = os_thread_get_curr_id();
+
+#ifdef INNODB_RW_LOCKS_USE_ATOMICS
+ os_thread_id_t local_thread;
+ ibool success;
+
+ /* Prevent Valgrind warnings about writer_thread being
+ uninitialized. It does not matter if writer_thread is
+ uninitialized, because we are comparing writer_thread against
+ itself, and the operation should always succeed. */
+ UNIV_MEM_VALID(&lock->writer_thread, sizeof lock->writer_thread);
+
+ local_thread = lock->writer_thread;
+ success = os_compare_and_swap_thread_id(
+ &lock->writer_thread, local_thread, curr_thread);
+ ut_a(success);
+ lock->recursive = recursive;
+
+#else /* INNODB_RW_LOCKS_USE_ATOMICS */
+
+ mutex_enter(&lock->mutex);
+ lock->writer_thread = curr_thread;
+ lock->recursive = recursive;
+ mutex_exit(&lock->mutex);
+
+#endif /* INNODB_RW_LOCKS_USE_ATOMICS */
+}
+
+/******************************************************************//**
+Low-level function which tries to lock an rw-lock in s-mode. Performs no
+spinning.
+@return TRUE if success */
+UNIV_INLINE
+ibool
+rw_lock_s_lock_low(
+/*===============*/
+ rw_lock_t* lock, /*!< in: pointer to rw-lock */
+ ulint pass __attribute__((unused)),
+ /*!< in: pass value; != 0, if the lock will be
+ passed to another thread to unlock */
+ const char* file_name, /*!< in: file name where lock requested */
+ ulint line) /*!< in: line where requested */
+{
+ if (!rw_lock_lock_word_decr(lock, 1)) {
+ /* Locking did not succeed */
+ return(FALSE);
+ }
+
+#ifdef UNIV_SYNC_DEBUG
+ rw_lock_add_debug_info(lock, pass, RW_LOCK_SHARED, file_name, line);
+#endif
+ /* These debugging values are not set safely: they may be incorrect
+ or even refer to a line that is invalid for the file name. */
+ lock->last_s_file_name = file_name;
+ lock->last_s_line = line;
+
+ return(TRUE); /* locking succeeded */
+}
+
+/******************************************************************//**
+NOTE! Use the corresponding macro, not directly this function! Lock an
+rw-lock in shared mode for the current thread. If the rw-lock is locked
+in exclusive mode, or there is an exclusive lock request waiting, the
+function spins a preset time (controlled by SYNC_SPIN_ROUNDS), waiting for
+the lock, before suspending the thread. */
+UNIV_INLINE
+void
+rw_lock_s_lock_func(
+/*================*/
+ rw_lock_t* lock, /*!< in: pointer to rw-lock */
+ ulint pass, /*!< in: pass value; != 0, if the lock will
+ be passed to another thread to unlock */
+ const char* file_name,/*!< in: file name where lock requested */
+ ulint line) /*!< in: line where requested */
+{
+ /* NOTE: As we do not know the thread ids for threads which have
+ s-locked a latch, and s-lockers will be served only after waiting
+ x-lock requests have been fulfilled, then if this thread already
+ owns an s-lock here, it may end up in a deadlock with another thread
+ which requests an x-lock here. Therefore, we will forbid recursive
+ s-locking of a latch: the following assert will warn the programmer
+ of the possibility of this kind of a deadlock. If we want to implement
+ safe recursive s-locking, we should keep in a list the thread ids of
+ the threads which have s-locked a latch. This would use some CPU
+ time. */
+
+#ifdef UNIV_SYNC_DEBUG
+ ut_ad(!rw_lock_own(lock, RW_LOCK_SHARED)); /* see NOTE above */
+ ut_ad(!rw_lock_own(lock, RW_LOCK_EX));
+#endif /* UNIV_SYNC_DEBUG */
+
+ if (rw_lock_s_lock_low(lock, pass, file_name, line)) {
+
+ return; /* Success */
+ } else {
+ /* Did not succeed, try spin wait */
+
+ rw_lock_s_lock_spin(lock, pass, file_name, line);
+
+ return;
+ }
+}
+
+/******************************************************************//**
+NOTE! Use the corresponding macro, not directly this function! Lock an
+rw-lock in exclusive mode for the current thread if the lock can be
+obtained immediately.
+@return TRUE if success */
+UNIV_INLINE
+ibool
+rw_lock_x_lock_func_nowait(
+/*=======================*/
+ rw_lock_t* lock, /*!< in: pointer to rw-lock */
+ const char* file_name,/*!< in: file name where lock requested */
+ ulint line) /*!< in: line where requested */
+{
+ ibool success;
+
+#ifdef INNODB_RW_LOCKS_USE_ATOMICS
+ success = os_compare_and_swap_lint(&lock->lock_word, X_LOCK_DECR, 0);
+#else
+
+ success = FALSE;
+ mutex_enter(&(lock->mutex));
+ if (lock->lock_word == X_LOCK_DECR) {
+ lock->lock_word = 0;
+ success = TRUE;
+ }
+ mutex_exit(&(lock->mutex));
+
+#endif
+ if (success) {
+ rw_lock_set_writer_id_and_recursion_flag(lock, TRUE);
+
+ } else if (lock->recursive
+ && os_thread_eq(lock->writer_thread,
+ os_thread_get_curr_id())) {
+ /* Relock: this lock_word modification is safe since no other
+ threads can modify (lock, unlock, or reserve) lock_word while
+ there is an exclusive writer and this is the writer thread. */
+ if (lock->lock_word == 0) {
+ lock->lock_word = -X_LOCK_DECR;
+ } else {
+ lock->lock_word--;
+ }
+
+ /* Watch for too many recursive locks */
+ ut_ad(lock->lock_word < 0);
+
+ } else {
+ /* Failure */
+ return(FALSE);
+ }
+#ifdef UNIV_SYNC_DEBUG
+ rw_lock_add_debug_info(lock, 0, RW_LOCK_EX, file_name, line);
+#endif
+
+ lock->last_x_file_name = file_name;
+ lock->last_x_line = line;
+
+ ut_ad(rw_lock_validate(lock));
+
+ return(TRUE);
+}
+
+/******************************************************************//**
+Releases a shared mode lock. */
+UNIV_INLINE
+void
+rw_lock_s_unlock_func(
+/*==================*/
+#ifdef UNIV_SYNC_DEBUG
+ ulint pass, /*!< in: pass value; != 0, if the lock may have
+ been passed to another thread to unlock */
+#endif
+ rw_lock_t* lock) /*!< in/out: rw-lock */
+{
+ ut_ad(lock->lock_word > -X_LOCK_DECR);
+ ut_ad(lock->lock_word != 0);
+ ut_ad(lock->lock_word < X_LOCK_DECR);
+
+#ifdef UNIV_SYNC_DEBUG
+ rw_lock_remove_debug_info(lock, pass, RW_LOCK_SHARED);
+#endif
+
+ /* Increment lock_word to indicate 1 less reader */
+ if (rw_lock_lock_word_incr(lock, 1) == 0) {
+
+ /* wait_ex waiter exists. It may not be asleep, but we signal
+ anyway. We do not wake other waiters, because they can't
+ exist without wait_ex waiter and wait_ex waiter goes first.*/
+ os_event_set(lock->wait_ex_event);
+ sync_array_object_signalled();
+
+ }
+
+ ut_ad(rw_lock_validate(lock));
+
+#ifdef UNIV_SYNC_PERF_STAT
+ rw_s_exit_count++;
+#endif
+}
+
+/******************************************************************//**
+Releases an exclusive mode lock. */
+UNIV_INLINE
+void
+rw_lock_x_unlock_func(
+/*==================*/
+#ifdef UNIV_SYNC_DEBUG
+ ulint pass, /*!< in: pass value; != 0, if the lock may have
+ been passed to another thread to unlock */
+#endif
+ rw_lock_t* lock) /*!< in/out: rw-lock */
+{
+ ut_ad(lock->lock_word == 0 || lock->lock_word <= -X_LOCK_DECR);
+
+ /* lock->recursive flag also indicates if lock->writer_thread is
+ valid or stale. If we are the last of the recursive callers
+ then we must unset lock->recursive flag to indicate that the
+ lock->writer_thread is now stale.
+ Note that since we still hold the x-lock we can safely read the
+ lock_word. */
+ if (lock->lock_word == 0) {
+ /* Last caller in a possible recursive chain. */
+ lock->recursive = FALSE;
+ }
+
+#ifdef UNIV_SYNC_DEBUG
+ rw_lock_remove_debug_info(lock, pass, RW_LOCK_EX);
+#endif
+
+ ulint x_lock_incr;
+ if (lock->lock_word == 0) {
+ x_lock_incr = X_LOCK_DECR;
+ } else if (lock->lock_word == -X_LOCK_DECR) {
+ x_lock_incr = X_LOCK_DECR;
+ } else {
+ ut_ad(lock->lock_word < -X_LOCK_DECR);
+ x_lock_incr = 1;
+ }
+
+ if (rw_lock_lock_word_incr(lock, x_lock_incr) == X_LOCK_DECR) {
+ /* Lock is now free. May have to signal read/write waiters.
+ We do not need to signal wait_ex waiters, since they cannot
+ exist when there is a writer. */
+ if (lock->waiters) {
+ rw_lock_reset_waiter_flag(lock);
+ os_event_set(lock->event);
+ sync_array_object_signalled();
+ }
+ }
+
+ ut_ad(rw_lock_validate(lock));
+
+#ifdef UNIV_SYNC_PERF_STAT
+ rw_x_exit_count++;
+#endif
+}
+
+#ifdef UNIV_PFS_RWLOCK
+
+/******************************************************************//**
+Performance schema instrumented wrap function for rw_lock_create_func().
+NOTE! Please use the corresponding macro rw_lock_create(), not directly
+this function! */
+UNIV_INLINE
+void
+pfs_rw_lock_create_func(
+/*====================*/
+ mysql_pfs_key_t key, /*!< in: key registered with
+ performance schema */
+ rw_lock_t* lock, /*!< in: pointer to memory */
+# ifdef UNIV_DEBUG
+# ifdef UNIV_SYNC_DEBUG
+ ulint level, /*!< in: level */
+# endif /* UNIV_SYNC_DEBUG */
+ const char* cmutex_name, /*!< in: mutex name */
+# endif /* UNIV_DEBUG */
+ const char* cfile_name, /*!< in: file name where created */
+ ulint cline) /*!< in: file line where created */
+{
+ /* Initialize the rwlock for performance schema */
+ lock->pfs_psi = PSI_RWLOCK_CALL(init_rwlock)(key, lock);
+
+ /* The actual function to initialize an rwlock */
+ rw_lock_create_func(lock,
+# ifdef UNIV_DEBUG
+# ifdef UNIV_SYNC_DEBUG
+ level,
+# endif /* UNIV_SYNC_DEBUG */
+ cmutex_name,
+# endif /* UNIV_DEBUG */
+ cfile_name,
+ cline);
+}
+/******************************************************************//**
+Performance schema instrumented wrap function for rw_lock_x_lock_func()
+NOTE! Please use the corresponding macro rw_lock_x_lock(), not directly
+this function! */
+UNIV_INLINE
+void
+pfs_rw_lock_x_lock_func(
+/*====================*/
+ rw_lock_t* lock, /*!< in: pointer to rw-lock */
+ ulint pass, /*!< in: pass value; != 0, if the lock will
+ be passed to another thread to unlock */
+ const char* file_name,/*!< in: file name where lock requested */
+ ulint line) /*!< in: line where requested */
+{
+ if (lock->pfs_psi != NULL)
+ {
+ PSI_rwlock_locker* locker;
+ PSI_rwlock_locker_state state;
+
+ /* Record the entry of rw x lock request in performance schema */
+ locker = PSI_RWLOCK_CALL(start_rwlock_wrwait)(
+ &state, lock->pfs_psi, PSI_RWLOCK_WRITELOCK,
+ file_name, static_cast<uint>(line));
+
+ rw_lock_x_lock_func(
+ lock, pass, file_name, static_cast<uint>(line));
+
+ if (locker != NULL) {
+ PSI_RWLOCK_CALL(end_rwlock_wrwait)(locker, 0);
+ }
+ }
+ else
+ {
+ rw_lock_x_lock_func(lock, pass, file_name, line);
+ }
+}
+/******************************************************************//**
+Performance schema instrumented wrap function for
+rw_lock_x_lock_func_nowait()
+NOTE! Please use the corresponding macro rw_lock_x_lock_func(),
+not directly this function!
+@return TRUE if success */
+UNIV_INLINE
+ibool
+pfs_rw_lock_x_lock_func_nowait(
+/*===========================*/
+ rw_lock_t* lock, /*!< in: pointer to rw-lock */
+ const char* file_name,/*!< in: file name where lock
+ requested */
+ ulint line) /*!< in: line where requested */
+{
+ ibool ret;
+
+ if (lock->pfs_psi != NULL)
+ {
+ PSI_rwlock_locker* locker;
+ PSI_rwlock_locker_state state;
+
+ /* Record the entry of rw x lock request in performance schema */
+ locker = PSI_RWLOCK_CALL(start_rwlock_wrwait)(
+ &state, lock->pfs_psi, PSI_RWLOCK_WRITELOCK,
+ file_name, static_cast<uint>(line));
+
+ ret = rw_lock_x_lock_func_nowait(lock, file_name, line);
+
+ if (locker != NULL) {
+ PSI_RWLOCK_CALL(end_rwlock_wrwait)(
+ locker, static_cast<int>(ret));
+ }
+ }
+ else
+ {
+ ret = rw_lock_x_lock_func_nowait(lock, file_name, line);
+ }
+
+ return(ret);
+}
+/******************************************************************//**
+Performance schema instrumented wrap function for rw_lock_free_func()
+NOTE! Please use the corresponding macro rw_lock_free(), not directly
+this function! */
+UNIV_INLINE
+void
+pfs_rw_lock_free_func(
+/*==================*/
+ rw_lock_t* lock) /*!< in: pointer to rw-lock */
+{
+ if (lock->pfs_psi != NULL)
+ {
+ PSI_RWLOCK_CALL(destroy_rwlock)(lock->pfs_psi);
+ lock->pfs_psi = NULL;
+ }
+
+ rw_lock_free_func(lock);
+}
+/******************************************************************//**
+Performance schema instrumented wrap function for rw_lock_s_lock_func()
+NOTE! Please use the corresponding macro rw_lock_s_lock(), not
+directly this function! */
+UNIV_INLINE
+void
+pfs_rw_lock_s_lock_func(
+/*====================*/
+ rw_lock_t* lock, /*!< in: pointer to rw-lock */
+ ulint pass, /*!< in: pass value; != 0, if the
+ lock will be passed to another
+ thread to unlock */
+ const char* file_name,/*!< in: file name where lock
+ requested */
+ ulint line) /*!< in: line where requested */
+{
+ if (lock->pfs_psi != NULL)
+ {
+ PSI_rwlock_locker* locker;
+ PSI_rwlock_locker_state state;
+
+ /* Instrumented to inform we are aquiring a shared rwlock */
+ locker = PSI_RWLOCK_CALL(start_rwlock_rdwait)(
+ &state, lock->pfs_psi, PSI_RWLOCK_READLOCK,
+ file_name, static_cast<uint>(line));
+
+ rw_lock_s_lock_func(lock, pass, file_name, line);
+
+ if (locker != NULL) {
+ PSI_RWLOCK_CALL(end_rwlock_rdwait)(locker, 0);
+ }
+ }
+ else
+ {
+ rw_lock_s_lock_func(lock, pass, file_name, line);
+ }
+
+ return;
+}
+/******************************************************************//**
+Performance schema instrumented wrap function for rw_lock_s_lock_func()
+NOTE! Please use the corresponding macro rw_lock_s_lock(), not
+directly this function!
+@return TRUE if success */
+UNIV_INLINE
+ibool
+pfs_rw_lock_s_lock_low(
+/*===================*/
+ rw_lock_t* lock, /*!< in: pointer to rw-lock */
+ ulint pass, /*!< in: pass value; != 0, if the
+ lock will be passed to another
+ thread to unlock */
+ const char* file_name, /*!< in: file name where lock requested */
+ ulint line) /*!< in: line where requested */
+{
+ ibool ret;
+
+ if (lock->pfs_psi != NULL)
+ {
+ PSI_rwlock_locker* locker;
+ PSI_rwlock_locker_state state;
+
+ /* Instrumented to inform we are aquiring a shared rwlock */
+ locker = PSI_RWLOCK_CALL(start_rwlock_rdwait)(
+ &state, lock->pfs_psi, PSI_RWLOCK_READLOCK,
+ file_name, static_cast<uint>(line));
+
+ ret = rw_lock_s_lock_low(lock, pass, file_name, line);
+
+ if (locker != NULL) {
+ PSI_RWLOCK_CALL(end_rwlock_rdwait)(
+ locker, static_cast<int>(ret));
+ }
+ }
+ else
+ {
+ ret = rw_lock_s_lock_low(lock, pass, file_name, line);
+ }
+
+ return(ret);
+}
+
+/******************************************************************//**
+Performance schema instrumented wrap function for rw_lock_x_unlock_func()
+NOTE! Please use the corresponding macro rw_lock_x_unlock(), not directly
+this function! */
+UNIV_INLINE
+void
+pfs_rw_lock_x_unlock_func(
+/*======================*/
+#ifdef UNIV_SYNC_DEBUG
+ ulint pass, /*!< in: pass value; != 0, if the
+ lock may have been passed to another
+ thread to unlock */
+#endif
+ rw_lock_t* lock) /*!< in/out: rw-lock */
+{
+ /* Inform performance schema we are unlocking the lock */
+ if (lock->pfs_psi != NULL)
+ PSI_RWLOCK_CALL(unlock_rwlock)(lock->pfs_psi);
+
+ rw_lock_x_unlock_func(
+#ifdef UNIV_SYNC_DEBUG
+ pass,
+#endif
+ lock);
+}
+
+/******************************************************************//**
+Performance schema instrumented wrap function for rw_lock_s_unlock_func()
+NOTE! Please use the corresponding macro pfs_rw_lock_s_unlock(), not
+directly this function! */
+UNIV_INLINE
+void
+pfs_rw_lock_s_unlock_func(
+/*======================*/
+#ifdef UNIV_SYNC_DEBUG
+ ulint pass, /*!< in: pass value; != 0, if the
+ lock may have been passed to another
+ thread to unlock */
+#endif
+ rw_lock_t* lock) /*!< in/out: rw-lock */
+{
+ /* Inform performance schema we are unlocking the lock */
+ if (lock->pfs_psi != NULL)
+ PSI_RWLOCK_CALL(unlock_rwlock)(lock->pfs_psi);
+
+ rw_lock_s_unlock_func(
+#ifdef UNIV_SYNC_DEBUG
+ pass,
+#endif
+ lock);
+
+}
+#endif /* UNIV_PFS_RWLOCK */