diff options
Diffstat (limited to 'storage/xtradb/include/sync0sync.ic')
-rw-r--r-- | storage/xtradb/include/sync0sync.ic | 270 |
1 files changed, 270 insertions, 0 deletions
diff --git a/storage/xtradb/include/sync0sync.ic b/storage/xtradb/include/sync0sync.ic new file mode 100644 index 00000000000..c43121ebd0b --- /dev/null +++ b/storage/xtradb/include/sync0sync.ic @@ -0,0 +1,270 @@ +/***************************************************************************** + +Copyright (c) 1995, 2009, Innobase Oy. All Rights Reserved. +Copyright (c) 2008, Google Inc. + +Portions of this file contain modifications contributed and copyrighted by +Google, Inc. Those modifications are gratefully acknowledged and are described +briefly in the InnoDB documentation. The contributions by Google are +incorporated with their permission, and subject to the conditions contained in +the file COPYING.Google. + +This program is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free Software +Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along with +this program; if not, write to the Free Software Foundation, Inc., 59 Temple +Place, Suite 330, Boston, MA 02111-1307 USA + +*****************************************************************************/ + +/****************************************************** +Mutex, the basic synchronization primitive + +Created 9/5/1995 Heikki Tuuri +*******************************************************/ + +/********************************************************************** +Sets the waiters field in a mutex. */ +UNIV_INTERN +void +mutex_set_waiters( +/*==============*/ + mutex_t* mutex, /* in: mutex */ + ulint n); /* in: value to set */ +/********************************************************************** +Reserves a mutex for the current thread. If the mutex is reserved, the +function spins a preset time (controlled by SYNC_SPIN_ROUNDS) waiting +for the mutex before suspending the thread. */ +UNIV_INTERN +void +mutex_spin_wait( +/*============*/ + mutex_t* mutex, /* in: pointer to mutex */ + const char* file_name, /* in: file name where mutex + requested */ + ulint line); /* in: line where requested */ +#ifdef UNIV_SYNC_DEBUG +/********************************************************************** +Sets the debug information for a reserved mutex. */ +UNIV_INTERN +void +mutex_set_debug_info( +/*=================*/ + mutex_t* mutex, /* in: mutex */ + const char* file_name, /* in: file where requested */ + ulint line); /* in: line where requested */ +#endif /* UNIV_SYNC_DEBUG */ +/********************************************************************** +Releases the threads waiting in the primary wait array for this mutex. */ +UNIV_INTERN +void +mutex_signal_object( +/*================*/ + mutex_t* mutex); /* in: mutex */ + +/********************************************************************** +Performs an atomic test-and-set instruction to the lock_word field of a +mutex. */ +UNIV_INLINE +byte +mutex_test_and_set( +/*===============*/ + /* out: the previous value of lock_word: 0 or + 1 */ + mutex_t* mutex) /* in: mutex */ +{ +#if defined(_WIN32) && defined(UNIV_CAN_USE_X86_ASSEMBLER) + byte res; + byte* lw; /* assembler code is used to ensure that + lock_word is loaded from memory */ + ut_ad(mutex); + ut_ad(sizeof(byte) == 1); + + lw = &(mutex->lock_word); + + __asm MOV ECX, lw + __asm MOV EDX, 1 + __asm XCHG DL, BYTE PTR [ECX] + __asm MOV res, DL + + /* The fence below would prevent this thread from + reading the data structure protected by the mutex + before the test-and-set operation is committed, but + the fence is apparently not needed: + + In a posting to comp.arch newsgroup (August 10, 1997) + Andy Glew said that in P6 a LOCKed instruction like + XCHG establishes a fence with respect to memory reads + and writes and thus an explicit fence is not + needed. In P5 he seemed to agree with a previous + newsgroup poster that LOCKed instructions serialize + all instruction execution, and, consequently, also + memory operations. This is confirmed in Intel Software + Dev. Manual, Vol. 3. */ + + /* mutex_fence(); */ + + return(res); +#elif defined(HAVE_GCC_ATOMIC_BUILTINS) + return __sync_lock_test_and_set(&(mutex->lock_word), 1); +#else + ibool ret; + + ret = os_fast_mutex_trylock(&(mutex->os_fast_mutex)); + + if (ret == 0) { + /* We check that os_fast_mutex_trylock does not leak + and allow race conditions */ + ut_a(mutex->lock_word == 0); + + mutex->lock_word = 1; + } + + return((byte)ret); +#endif +} + +/********************************************************************** +Performs a reset instruction to the lock_word field of a mutex. This +instruction also serializes memory operations to the program order. */ +UNIV_INLINE +void +mutex_reset_lock_word( +/*==================*/ + mutex_t* mutex) /* in: mutex */ +{ +#if defined(_WIN32) && defined(UNIV_CAN_USE_X86_ASSEMBLER) + byte* lw; /* assembler code is used to ensure that + lock_word is loaded from memory */ + ut_ad(mutex); + + lw = &(mutex->lock_word); + + __asm MOV EDX, 0 + __asm MOV ECX, lw + __asm XCHG DL, BYTE PTR [ECX] +#elif defined(HAVE_GCC_ATOMIC_BUILTINS) + /* In theory __sync_lock_release should be used to release the lock. + Unfortunately, it does not work properly alone. The workaround is + that more conservative __sync_lock_test_and_set is used instead. */ + __sync_lock_test_and_set(&(mutex->lock_word), 0); +#else + mutex->lock_word = 0; + + os_fast_mutex_unlock(&(mutex->os_fast_mutex)); +#endif +} + +/********************************************************************** +Gets the value of the lock word. */ +UNIV_INLINE +byte +mutex_get_lock_word( +/*================*/ + const mutex_t* mutex) /* in: mutex */ +{ + const volatile byte* ptr; /* declared volatile to ensure that + lock_word is loaded from memory */ + ut_ad(mutex); + + ptr = &(mutex->lock_word); + + return(*ptr); +} + +/********************************************************************** +Gets the waiters field in a mutex. */ +UNIV_INLINE +ulint +mutex_get_waiters( +/*==============*/ + /* out: value to set */ + const mutex_t* mutex) /* in: mutex */ +{ + const volatile ulint* ptr; /* declared volatile to ensure that + the value is read from memory */ + ut_ad(mutex); + + ptr = &(mutex->waiters); + + return(*ptr); /* Here we assume that the read of a single + word from memory is atomic */ +} + +/********************************************************************** +Unlocks a mutex owned by the current thread. */ +UNIV_INLINE +void +mutex_exit( +/*=======*/ + mutex_t* mutex) /* in: pointer to mutex */ +{ + ut_ad(mutex_own(mutex)); + + ut_d(mutex->thread_id = (os_thread_id_t) ULINT_UNDEFINED); + +#ifdef UNIV_SYNC_DEBUG + sync_thread_reset_level(mutex); +#endif + mutex_reset_lock_word(mutex); + + /* A problem: we assume that mutex_reset_lock word + is a memory barrier, that is when we read the waiters + field next, the read must be serialized in memory + after the reset. A speculative processor might + perform the read first, which could leave a waiting + thread hanging indefinitely. + + Our current solution call every second + sync_arr_wake_threads_if_sema_free() + to wake up possible hanging threads if + they are missed in mutex_signal_object. */ + + if (mutex_get_waiters(mutex) != 0) { + + mutex_signal_object(mutex); + } + +#ifdef UNIV_SYNC_PERF_STAT + mutex_exit_count++; +#endif +} + +/********************************************************************** +Locks a mutex for the current thread. If the mutex is reserved, the function +spins a preset time (controlled by SYNC_SPIN_ROUNDS), waiting for the mutex +before suspending the thread. */ +UNIV_INLINE +void +mutex_enter_func( +/*=============*/ + mutex_t* mutex, /* in: pointer to mutex */ + const char* file_name, /* in: file name where locked */ + ulint line) /* in: line where locked */ +{ + ut_ad(mutex_validate(mutex)); + ut_ad(!mutex_own(mutex)); + + /* Note that we do not peek at the value of lock_word before trying + the atomic test_and_set; we could peek, and possibly save time. */ + +#if defined UNIV_DEBUG && !defined UNIV_HOTBACKUP + mutex->count_using++; +#endif /* UNIV_DEBUG && !UNIV_HOTBACKUP */ + + if (!mutex_test_and_set(mutex)) { + ut_d(mutex->thread_id = os_thread_get_curr_id()); +#ifdef UNIV_SYNC_DEBUG + mutex_set_debug_info(mutex, file_name, line); +#endif + return; /* Succeeded! */ + } + + mutex_spin_wait(mutex, file_name, line); +} |