diff options
Diffstat (limited to 'storage/xtradb/include/os0sync.h')
-rw-r--r-- | storage/xtradb/include/os0sync.h | 75 |
1 files changed, 52 insertions, 23 deletions
diff --git a/storage/xtradb/include/os0sync.h b/storage/xtradb/include/os0sync.h index 066fd34d668..f968de7c6dd 100644 --- a/storage/xtradb/include/os0sync.h +++ b/storage/xtradb/include/os0sync.h @@ -322,12 +322,29 @@ pfs_os_fast_mutex_unlock( #endif /* UNIV_PFS_MUTEX */ /**********************************************************//** +Acquires ownership of a fast mutex. Implies a full memory barrier even on +platforms such as PowerPC where this is not normally required. +@return 0 if success, != 0 if was reserved by another thread */ +UNIV_INLINE +ulint +os_fast_mutex_trylock_full_barrier( +/*==================*/ + os_fast_mutex_t* fast_mutex); /*!< in: mutex to acquire */ +/**********************************************************//** Releases ownership of a fast mutex. */ UNIV_INTERN void os_fast_mutex_unlock_func( /*======================*/ fast_mutex_t* fast_mutex); /*!< in: mutex to release */ +/**********************************************************//** +Releases ownership of a fast mutex. Implies a full memory barrier even on +platforms such as PowerPC where this is not normally required. */ +UNIV_INTERN +void +os_fast_mutex_unlock_full_barrier( +/*=================*/ + os_fast_mutex_t* fast_mutex); /*!< in: mutex to release */ /*********************************************************//** Initializes an operating system fast mutex semaphore. */ UNIV_INTERN @@ -432,14 +449,31 @@ amount to decrement. */ /**********************************************************//** Returns the old value of *ptr, atomically sets *ptr to new_val */ -# define os_atomic_test_and_set_byte(ptr, new_val) \ - __sync_lock_test_and_set(ptr, (byte) new_val) - # define os_atomic_test_and_set_ulint(ptr, new_val) \ __sync_lock_test_and_set(ptr, new_val) -# define os_atomic_lock_release_byte(ptr) \ - __sync_lock_release(ptr) +#ifdef __powerpc__ +/* + os_atomic_test_and_set_byte_release() should imply a release barrier before + setting, and a full barrier after. But __sync_lock_test_and_set() is only + documented as an aquire barrier. So on PowerPC we need to add the full + barrier explicitly. */ +# define os_atomic_test_and_set_byte_release(ptr, new_val) \ + do { __sync_lock_release(ptr); \ + __sync_synchronize(); } while (0) +#else +/* + On x86, __sync_lock_test_and_set() happens to be full barrier, due to + LOCK prefix. +*/ +# define os_atomic_test_and_set_byte_release(ptr, new_val) \ + __sync_lock_test_and_set(ptr, (byte) new_val) +#endif +/* + os_atomic_test_and_set_byte_acquire() is a full memory barrier on x86. But + in general, just an aquire barrier should be sufficient. */ +# define os_atomic_test_and_set_byte_acquire(ptr, new_val) \ + __sync_lock_test_and_set(ptr, (byte) new_val) #elif defined(HAVE_IB_SOLARIS_ATOMICS) @@ -517,14 +551,14 @@ amount to decrement. */ /**********************************************************//** Returns the old value of *ptr, atomically sets *ptr to new_val */ -# define os_atomic_test_and_set_byte(ptr, new_val) \ - atomic_swap_uchar(ptr, new_val) - # define os_atomic_test_and_set_ulint(ptr, new_val) \ atomic_swap_ulong(ptr, new_val) -# define os_atomic_lock_release_byte(ptr) \ - (void) atomic_swap_uchar(ptr, 0) +# define os_atomic_test_and_set_byte_acquire(ptr, new_val) \ + atomic_swap_uchar(ptr, new_val) + +# define os_atomic_test_and_set_byte_release(ptr, new_val) \ + atomic_swap_uchar(ptr, new_val) #elif defined(HAVE_WINDOWS_ATOMICS) @@ -644,7 +678,9 @@ Returns the old value of *ptr, atomically sets *ptr to new_val. InterlockedExchange() operates on LONG, and the LONG will be clobbered */ -# define os_atomic_test_and_set_byte(ptr, new_val) \ +# define os_atomic_test_and_set_byte_acquire(ptr, new_val) \ + ((byte) InterlockedExchange(ptr, new_val)) +# define os_atomic_test_and_set_byte_release(ptr, new_val) \ ((byte) InterlockedExchange(ptr, new_val)) # define os_atomic_test_and_set_ulong(ptr, new_val) \ @@ -713,11 +749,7 @@ architecture. Disable memory barrier for Intel architecture for now. */ # define HAVE_MEMORY_BARRIER # define os_rmb __atomic_thread_fence(__ATOMIC_ACQUIRE) # define os_wmb __atomic_thread_fence(__ATOMIC_RELEASE) -#ifdef __powerpc__ -# define os_isync __asm __volatile ("isync":::"memory") -#else -#define os_isync do { } while(0) -#endif +# define os_mb __atomic_thread_fence(__ATOMIC_SEQ_CST) # define IB_MEMORY_BARRIER_STARTUP_MSG \ "GCC builtin __atomic_thread_fence() is used for memory barrier" @@ -726,7 +758,7 @@ architecture. Disable memory barrier for Intel architecture for now. */ # define HAVE_MEMORY_BARRIER # define os_rmb __sync_synchronize() # define os_wmb __sync_synchronize() -# define os_isync __sync_synchronize() +# define os_mb __sync_synchronize() # define IB_MEMORY_BARRIER_STARTUP_MSG \ "GCC builtin __sync_synchronize() is used for memory barrier" @@ -735,7 +767,7 @@ architecture. Disable memory barrier for Intel architecture for now. */ # include <mbarrier.h> # define os_rmb __machine_r_barrier() # define os_wmb __machine_w_barrier() -# define os_isync os_rmb; os_wmb +# define os_mb __machine_rw_barrier() # define IB_MEMORY_BARRIER_STARTUP_MSG \ "Solaris memory ordering functions are used for memory barrier" @@ -744,17 +776,14 @@ architecture. Disable memory barrier for Intel architecture for now. */ # include <intrin.h> # define os_rmb _mm_lfence() # define os_wmb _mm_sfence() -# define os_isync os_rmb; os_wmb +# define os_mb _mm_mfence() # define IB_MEMORY_BARRIER_STARTUP_MSG \ "_mm_lfence() and _mm_sfence() are used for memory barrier" -# define os_atomic_lock_release_byte(ptr) \ - (void) InterlockedExchange(ptr, 0) - #else # define os_rmb do { } while(0) # define os_wmb do { } while(0) -# define os_isync do { } while(0) +# define os_mb do { } while(0) # define IB_MEMORY_BARRIER_STARTUP_MSG \ "Memory barrier is not used" #endif |