summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAdhemerval Zanella <azanella@linux.vnet.ibm.com>2014-02-02 19:30:31 -0200
committerAdhemerval Zanella <azanella@linux.vnet.ibm.com>2014-02-02 19:30:31 -0200
commit4f9eba24ff85c1461ad6a269d4574f8608fba402 (patch)
tree971f0969a5f0289e540043ff17a49a2f95c63c14
parent23abe4d2846dc91e84cc4eb8756b4a53fb09825f (diff)
downloadgperftools-4f9eba24ff85c1461ad6a269d4574f8608fba402.tar.gz
issue-490: Add support to {Acquire,Release}_AtomicExchange
-rw-r--r--src/base/atomicops-internals-arm-generic.h24
-rw-r--r--src/base/atomicops-internals-arm-v6plus.h46
-rw-r--r--src/base/atomicops-internals-linuxppc.h40
-rw-r--r--src/base/atomicops-internals-macosx.h30
-rw-r--r--src/base/atomicops-internals-windows.h27
-rw-r--r--src/base/atomicops-internals-x86.h49
-rw-r--r--src/base/atomicops.h37
-rw-r--r--src/tests/atomicops_unittest.cc78
8 files changed, 281 insertions, 50 deletions
diff --git a/src/base/atomicops-internals-arm-generic.h b/src/base/atomicops-internals-arm-generic.h
index 4acb76a..73087aa 100644
--- a/src/base/atomicops-internals-arm-generic.h
+++ b/src/base/atomicops-internals-arm-generic.h
@@ -89,6 +89,18 @@ inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
return old_value;
}
+inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ // pLinuxKernelCmpxchg already has acquire and release barrier semantics.
+ return NoBarrier_AtomicExchange(ptr, new_value);
+}
+
+inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ // pLinuxKernelCmpxchg already has acquire and release barrier semantics.
+ return NoBarrier_AtomicExchange(ptr, new_value);
+}
+
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
for (;;) {
@@ -176,6 +188,18 @@ inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
return 0;
}
+inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value) {
+ // pLinuxKernelCmpxchg already has acquire and release barrier semantics.
+ return NoBarrier_AtomicExchange(ptr, new_value);
+}
+
+inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value) {
+ // pLinuxKernelCmpxchg already has acquire and release barrier semantics.
+ return NoBarrier_AtomicExchange(ptr, new_value);
+}
+
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
NotImplementedFatalError("NoBarrier_AtomicIncrement");
diff --git a/src/base/atomicops-internals-arm-v6plus.h b/src/base/atomicops-internals-arm-v6plus.h
index 8d5b9b5..e7cecf6 100644
--- a/src/base/atomicops-internals-arm-v6plus.h
+++ b/src/base/atomicops-internals-arm-v6plus.h
@@ -94,6 +94,23 @@ inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
return old;
}
+inline void MemoryBarrier() {
+ __asm__ __volatile__("dmb" : : : "memory");
+}
+
+inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ Atomic32 old_value = NoBarrier_AtomicExchange(ptr, new_value);
+ MemoryBarrier();
+ return old_value;
+}
+
+inline Atomic64 Release_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ MemoryBarrier();
+ return NoBarrier_AtomicExchange(ptr, new_value);
+}
+
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
Atomic32 tmp, res;
@@ -110,10 +127,6 @@ inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
return res;
}
-inline void MemoryBarrier() {
- __asm__ __volatile__("dmb" : : : "memory");
-}
-
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
Atomic32 tmp, res;
@@ -220,6 +233,19 @@ inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
return old;
}
+inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value) {
+ Atomic64 old_value = NoBarrier_AtomicExchange(ptr, new_value);
+ MemoryBarrier();
+ return old_value;
+}
+
+inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value) {
+ MemoryBarrier();
+ return NoBarrier_AtomicExchange(ptr, new_value);
+}
+
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
int store_failed;
@@ -303,6 +329,18 @@ inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
return 0;
}
+inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value) {
+ NotImplementedFatalError("Acquire_AtomicExchange");
+ return 0;
+}
+
+inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value) {
+ NotImplementedFatalError("Release_AtomicExchange");
+ return 0;
+}
+
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
NotImplementedFatalError("NoBarrier_AtomicIncrement");
diff --git a/src/base/atomicops-internals-linuxppc.h b/src/base/atomicops-internals-linuxppc.h
index ceef432..977685d 100644
--- a/src/base/atomicops-internals-linuxppc.h
+++ b/src/base/atomicops-internals-linuxppc.h
@@ -163,6 +163,26 @@ inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr,
return old_value;
}
+inline Atomic32 Acquire_AtomicExchange(volatile Atomic32 *ptr,
+ Atomic32 new_value) {
+ Atomic32 old_value;
+ do {
+ old_value = *ptr;
+ } while (!OSAtomicCompareAndSwap32Acquire(old_value, new_value,
+ const_cast<Atomic32*>(ptr)));
+ return old_value;
+}
+
+inline Atomic32 Release_AtomicExchange(volatile Atomic32 *ptr,
+ Atomic32 new_value) {
+ Atomic32 old_value;
+ do {
+ old_value = *ptr;
+ } while (!OSAtomicCompareAndSwap32Release(old_value, new_value,
+ const_cast<Atomic32*>(ptr)));
+ return old_value;
+}
+
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr,
Atomic32 increment) {
return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr));
@@ -294,6 +314,26 @@ inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr,
return old_value;
}
+inline Atomic64 Acquire_AtomicExchange(volatile Atomic64 *ptr,
+ Atomic64 new_value) {
+ Atomic64 old_value;
+ do {
+ old_value = *ptr;
+ } while (!OSAtomicCompareAndSwap64Acquire(old_value, new_value,
+ const_cast<Atomic64*>(ptr)));
+ return old_value;
+}
+
+inline Atomic64 Release_AtomicExchange(volatile Atomic64 *ptr,
+ Atomic64 new_value) {
+ Atomic64 old_value;
+ do {
+ old_value = *ptr;
+ } while (!OSAtomicCompareAndSwap64Release(old_value, new_value,
+ const_cast<Atomic64*>(ptr)));
+ return old_value;
+}
+
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr,
Atomic64 increment) {
return OSAtomicAdd64(increment, const_cast<Atomic64*>(ptr));
diff --git a/src/base/atomicops-internals-macosx.h b/src/base/atomicops-internals-macosx.h
index 430b9ee..ad84d83 100644
--- a/src/base/atomicops-internals-macosx.h
+++ b/src/base/atomicops-internals-macosx.h
@@ -132,6 +132,21 @@ inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr,
return old_value;
}
+inline Atomic32 Acquire_AtomicExchange(volatile Atomic32 *ptr,
+ Atomic32 new_value) {
+ Atomic32 old_value;
+ do {
+ old_value = *ptr;
+ } while (!OSAtomicCompareAndSwap32Barrier(old_value, new_value,
+ const_cast<Atomic32*>(ptr)));
+ return old_value;
+}
+
+inline Atomic32 Release_AtomicExchange(volatile Atomic32 *ptr,
+ Atomic32 new_value) {
+ return Acquire_AtomicExchange(ptr, new_value);
+}
+
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr,
Atomic32 increment) {
return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr));
@@ -217,6 +232,21 @@ inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr,
return old_value;
}
+inline Atomic64 Acquire_AtomicExchange(volatile Atomic64 *ptr,
+ Atomic64 new_value) {
+ Atomic64 old_value;
+ do {
+ old_value = *ptr;
+ } while (!OSAtomicCompareAndSwap64Barrier(old_value, new_value,
+ const_cast<Atomic64*>(ptr)));
+ return old_value;
+}
+
+inline Atomic64 Release_AtomicExchange(volatile Atomic64 *ptr,
+ Atomic64 new_value) {
+ return Acquire_AtomicExchange(ptr, new_value);
+}
+
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr,
Atomic64 increment) {
return OSAtomicAdd64(increment, const_cast<Atomic64*>(ptr));
diff --git a/src/base/atomicops-internals-windows.h b/src/base/atomicops-internals-windows.h
index ca16e39..ca3a16a 100644
--- a/src/base/atomicops-internals-windows.h
+++ b/src/base/atomicops-internals-windows.h
@@ -137,6 +137,18 @@ inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
return static_cast<Atomic32>(result);
}
+inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ // FastInterlockedExchange has both acquire and release memory barriers.
+ return NoBarrier_AtomicExchange(ptr, new_value);
+}
+
+inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ // FastInterlockedExchange has both acquire and release memory barriers.
+ return NoBarrier_AtomicExchange(ptr, new_value);
+}
+
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return FastInterlockedExchangeAdd(
@@ -188,8 +200,7 @@ inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
- NoBarrier_AtomicExchange(ptr, value);
- // acts as a barrier in this implementation
+ Acquire_AtomicExchange(ptr, value);
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
@@ -478,6 +489,18 @@ inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
#endif // defined(_WIN64) || defined(__MINGW64__)
+inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value) {
+ // FastInterlockedExchange has both acquire and release memory barriers.
+ return NoBarrier_AtomicExchange(ptr, new_value);
+}
+
+inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value) {
+ // FastInterlockedExchange has both acquire and release memory barriers.
+ return NoBarrier_AtomicExchange(ptr, new_value);
+}
+
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
diff --git a/src/base/atomicops-internals-x86.h b/src/base/atomicops-internals-x86.h
index c34aa5c..7c67325 100644
--- a/src/base/atomicops-internals-x86.h
+++ b/src/base/atomicops-internals-x86.h
@@ -89,6 +89,21 @@ inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
return new_value; // Now it's the previous value.
}
+inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ Atomic32 old_val = NoBarrier_AtomicExchange(ptr, new_value);
+ if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
+ __asm__ __volatile__("lfence" : : : "memory");
+ }
+ return old_val;
+}
+
+inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ // xchgl already has release memory barrier semantics.
+ return NoBarrier_AtomicExchange(ptr, new_value);
+}
+
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
Atomic32 temp = increment;
@@ -152,7 +167,7 @@ inline void MemoryBarrier() {
__asm__ __volatile__("mfence" : : : "memory");
} else { // mfence is faster but not present on PIII
Atomic32 x = 0;
- NoBarrier_AtomicExchange(&x, 0); // acts as a barrier on PIII
+ Acquire_AtomicExchange(&x, 0);
}
}
@@ -161,8 +176,7 @@ inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
__asm__ __volatile__("mfence" : : : "memory");
} else {
- NoBarrier_AtomicExchange(ptr, value);
- // acts as a barrier on PIII
+ Acquire_AtomicExchange(ptr, value);
}
}
#endif
@@ -213,6 +227,21 @@ inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
return new_value; // Now it's the previous value.
}
+inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value) {
+ Atomic64 old_val = NoBarrier_AtomicExchange(ptr, new_value);
+ if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
+ __asm__ __volatile__("lfence" : : : "memory");
+ }
+ return old_val;
+}
+
+inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value) {
+ // xchgq already has release memory barrier semantics.
+ return NoBarrier_AtomicExchange(ptr, new_value);
+}
+
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
Atomic64 temp = increment;
@@ -334,6 +363,20 @@ inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
return old_val;
}
+inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_val) {
+ Atomic64 old_val = NoBarrier_AtomicExchange(ptr, new_val);
+ if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
+ __asm__ __volatile__("lfence" : : : "memory");
+ }
+ return old_val;
+}
+
+inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_val) {
+ return NoBarrier_AtomicExchange(ptr, new_val);
+}
+
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
Atomic64 old_val, new_val;
diff --git a/src/base/atomicops.h b/src/base/atomicops.h
index 17e8a27..7f01b4a 100644
--- a/src/base/atomicops.h
+++ b/src/base/atomicops.h
@@ -50,6 +50,16 @@
// implementations on other archtectures will cause your code to break. If you
// do not know what you are doing, avoid these routines, and use a Mutex.
//
+// These following lower-level operations are typically useful only to people
+// implementing higher-level synchronization operations like spinlocks,
+// mutexes, and condition-variables. They combine CompareAndSwap(), a load, or
+// a store with appropriate memory-ordering instructions. "Acquire" operations
+// ensure that no later memory access can be reordered ahead of the operation.
+// "Release" operations ensure that no previous memory access can be reordered
+// after the operation. "Barrier" operations have both "Acquire" and "Release"
+// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
+// access.
+//
// It is incorrect to make direct assignments to/from an atomic variable.
// You should use one of the Load or Store routines. The NoBarrier
// versions are provided when no barriers are needed:
@@ -149,6 +159,18 @@ inline AtomicWord NoBarrier_AtomicExchange(volatile AtomicWord* ptr,
reinterpret_cast<volatile AtomicWordCastType*>(ptr), new_value);
}
+AtomicWord Acquire_AtomicExchange(volatile AtomicWord* ptr,
+ AtomicWord new_value) {
+ return Acquire_AtomicExchange(
+ reinterpret_cast<volatile AtomicWordCastType*>(ptr), new_value);
+}
+
+AtomicWord Release_AtomicExchange(volatile AtomicWord* ptr,
+ AtomicWord new_value) {
+ return Release_AtomicExchange(
+ reinterpret_cast<volatile AtomicWordCastType*>(ptr), new_value);
+}
+
// Atomically increment *ptr by "increment". Returns the new value of
// *ptr with the increment applied. This routine implies no memory
// barriers.
@@ -164,17 +186,6 @@ inline AtomicWord Barrier_AtomicIncrement(volatile AtomicWord* ptr,
reinterpret_cast<volatile AtomicWordCastType*>(ptr), increment);
}
-// ------------------------------------------------------------------------
-// These following lower-level operations are typically useful only to people
-// implementing higher-level synchronization operations like spinlocks,
-// mutexes, and condition-variables. They combine CompareAndSwap(), a load, or
-// a store with appropriate memory-ordering instructions. "Acquire" operations
-// ensure that no later memory access can be reordered ahead of the operation.
-// "Release" operations ensure that no previous memory access can be reordered
-// after the operation. "Barrier" operations have both "Acquire" and "Release"
-// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
-// access.
-// ------------------------------------------------------------------------
inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr,
AtomicWord old_value,
AtomicWord new_value) {
@@ -250,6 +261,8 @@ Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value);
Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value);
+Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value);
+Atomic32 Release_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value);
Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment);
Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment);
@@ -271,6 +284,8 @@ Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value);
Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value);
+Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value);
+Atomic64 Release_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value);
Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
diff --git a/src/tests/atomicops_unittest.cc b/src/tests/atomicops_unittest.cc
index 3892b59..9c5391e 100644
--- a/src/tests/atomicops_unittest.cc
+++ b/src/tests/atomicops_unittest.cc
@@ -38,13 +38,14 @@
#define GG_ULONGLONG(x) static_cast<uint64>(x)
template <class AtomicType>
-static void TestAtomicIncrement() {
+static void TestAtomicIncrement(AtomicType (*atomic_increment_func)
+ (volatile AtomicType*, AtomicType)) {
// For now, we just test single threaded execution
- // use a guard value to make sure the NoBarrier_AtomicIncrement doesn't go
+ // use a guard value to make sure the atomic_increment_func doesn't go
// outside the expected address bounds. This is in particular to
// test that some future change to the asm code doesn't cause the
- // 32-bit NoBarrier_AtomicIncrement doesn't do the wrong thing on 64-bit
+ // 32-bit atomic_increment_func doesn't do the wrong thing on 64-bit
// machines.
struct {
AtomicType prev_word;
@@ -60,47 +61,47 @@ static void TestAtomicIncrement() {
s.count = 0;
s.next_word = next_word_value;
- ASSERT_EQ(1, base::subtle::NoBarrier_AtomicIncrement(&s.count, 1));
+ ASSERT_EQ(1, (*atomic_increment_func)(&s.count, 1));
ASSERT_EQ(1, s.count);
ASSERT_EQ(prev_word_value, s.prev_word);
ASSERT_EQ(next_word_value, s.next_word);
- ASSERT_EQ(3, base::subtle::NoBarrier_AtomicIncrement(&s.count, 2));
+ ASSERT_EQ(3, (*atomic_increment_func)(&s.count, 2));
ASSERT_EQ(3, s.count);
ASSERT_EQ(prev_word_value, s.prev_word);
ASSERT_EQ(next_word_value, s.next_word);
- ASSERT_EQ(6, base::subtle::NoBarrier_AtomicIncrement(&s.count, 3));
+ ASSERT_EQ(6, (*atomic_increment_func)(&s.count, 3));
ASSERT_EQ(6, s.count);
ASSERT_EQ(prev_word_value, s.prev_word);
ASSERT_EQ(next_word_value, s.next_word);
- ASSERT_EQ(3, base::subtle::NoBarrier_AtomicIncrement(&s.count, -3));
+ ASSERT_EQ(3, (*atomic_increment_func)(&s.count, -3));
ASSERT_EQ(3, s.count);
ASSERT_EQ(prev_word_value, s.prev_word);
ASSERT_EQ(next_word_value, s.next_word);
- ASSERT_EQ(1, base::subtle::NoBarrier_AtomicIncrement(&s.count, -2));
+ ASSERT_EQ(1, (*atomic_increment_func)(&s.count, -2));
ASSERT_EQ(1, s.count);
ASSERT_EQ(prev_word_value, s.prev_word);
ASSERT_EQ(next_word_value, s.next_word);
- ASSERT_EQ(0, base::subtle::NoBarrier_AtomicIncrement(&s.count, -1));
+ ASSERT_EQ(0, (*atomic_increment_func)(&s.count, -1));
ASSERT_EQ(0, s.count);
ASSERT_EQ(prev_word_value, s.prev_word);
ASSERT_EQ(next_word_value, s.next_word);
- ASSERT_EQ(-1, base::subtle::NoBarrier_AtomicIncrement(&s.count, -1));
+ ASSERT_EQ(-1, (*atomic_increment_func)(&s.count, -1));
ASSERT_EQ(-1, s.count);
ASSERT_EQ(prev_word_value, s.prev_word);
ASSERT_EQ(next_word_value, s.next_word);
- ASSERT_EQ(-5, base::subtle::NoBarrier_AtomicIncrement(&s.count, -4));
+ ASSERT_EQ(-5, (*atomic_increment_func)(&s.count, -4));
ASSERT_EQ(-5, s.count);
ASSERT_EQ(prev_word_value, s.prev_word);
ASSERT_EQ(next_word_value, s.next_word);
- ASSERT_EQ(0, base::subtle::NoBarrier_AtomicIncrement(&s.count, 5));
+ ASSERT_EQ(0, (*atomic_increment_func)(&s.count, 5));
ASSERT_EQ(0, s.count);
ASSERT_EQ(prev_word_value, s.prev_word);
ASSERT_EQ(next_word_value, s.next_word);
@@ -111,9 +112,10 @@ static void TestAtomicIncrement() {
template <class AtomicType>
-static void TestCompareAndSwap() {
+static void TestCompareAndSwap(AtomicType (*compare_and_swap_func)
+ (volatile AtomicType*, AtomicType, AtomicType)) {
AtomicType value = 0;
- AtomicType prev = base::subtle::NoBarrier_CompareAndSwap(&value, 0, 1);
+ AtomicType prev = (*compare_and_swap_func)(&value, 0, 1);
ASSERT_EQ(1, value);
ASSERT_EQ(0, prev);
@@ -122,21 +124,22 @@ static void TestCompareAndSwap() {
const AtomicType k_test_val = (GG_ULONGLONG(1) <<
(NUM_BITS(AtomicType) - 2)) + 11;
value = k_test_val;
- prev = base::subtle::NoBarrier_CompareAndSwap(&value, 0, 5);
+ prev = (*compare_and_swap_func)(&value, 0, 5);
ASSERT_EQ(k_test_val, value);
ASSERT_EQ(k_test_val, prev);
value = k_test_val;
- prev = base::subtle::NoBarrier_CompareAndSwap(&value, k_test_val, 5);
+ prev = (*compare_and_swap_func)(&value, k_test_val, 5);
ASSERT_EQ(5, value);
ASSERT_EQ(k_test_val, prev);
}
template <class AtomicType>
-static void TestAtomicExchange() {
+static void TestAtomicExchange(AtomicType (*atomic_exchange_func)
+ (volatile AtomicType*, AtomicType)) {
AtomicType value = 0;
- AtomicType new_value = base::subtle::NoBarrier_AtomicExchange(&value, 1);
+ AtomicType new_value = (*atomic_exchange_func)(&value, 1);
ASSERT_EQ(1, value);
ASSERT_EQ(0, new_value);
@@ -145,28 +148,29 @@ static void TestAtomicExchange() {
const AtomicType k_test_val = (GG_ULONGLONG(1) <<
(NUM_BITS(AtomicType) - 2)) + 11;
value = k_test_val;
- new_value = base::subtle::NoBarrier_AtomicExchange(&value, k_test_val);
+ new_value = (*atomic_exchange_func)(&value, k_test_val);
ASSERT_EQ(k_test_val, value);
ASSERT_EQ(k_test_val, new_value);
value = k_test_val;
- new_value = base::subtle::NoBarrier_AtomicExchange(&value, 5);
+ new_value = (*atomic_exchange_func)(&value, 5);
ASSERT_EQ(5, value);
ASSERT_EQ(k_test_val, new_value);
}
template <class AtomicType>
-static void TestAtomicIncrementBounds() {
+static void TestAtomicIncrementBounds(AtomicType (*atomic_increment_func)
+ (volatile AtomicType*, AtomicType)) {
// Test increment at the half-width boundary of the atomic type.
// It is primarily for testing at the 32-bit boundary for 64-bit atomic type.
AtomicType test_val = GG_ULONGLONG(1) << (NUM_BITS(AtomicType) / 2);
AtomicType value = test_val - 1;
- AtomicType new_value = base::subtle::NoBarrier_AtomicIncrement(&value, 1);
+ AtomicType new_value = (*atomic_increment_func)(&value, 1);
ASSERT_EQ(test_val, value);
ASSERT_EQ(value, new_value);
- base::subtle::NoBarrier_AtomicIncrement(&value, -1);
+ (*atomic_increment_func)(&value, -1);
ASSERT_EQ(test_val - 1, value);
}
@@ -222,16 +226,28 @@ static void TestLoad() {
template <class AtomicType>
static void TestAtomicOps() {
- TestCompareAndSwap<AtomicType>();
- TestAtomicExchange<AtomicType>();
- TestAtomicIncrementBounds<AtomicType>();
+ TestCompareAndSwap<AtomicType>(base::subtle::NoBarrier_CompareAndSwap);
+ TestCompareAndSwap<AtomicType>(base::subtle::Acquire_CompareAndSwap);
+ TestCompareAndSwap<AtomicType>(base::subtle::Release_CompareAndSwap);
+
+ TestAtomicExchange<AtomicType>(base::subtle::NoBarrier_AtomicExchange);
+ TestAtomicExchange<AtomicType>(base::subtle::Acquire_AtomicExchange);
+ TestAtomicExchange<AtomicType>(base::subtle::Release_AtomicExchange);
+
+ TestAtomicIncrementBounds<AtomicType>(
+ base::subtle::NoBarrier_AtomicIncrement);
+ TestAtomicIncrementBounds<AtomicType>(
+ base::subtle::Barrier_AtomicIncrement);
+
TestStore<AtomicType>();
TestLoad<AtomicType>();
}
int main(int argc, char** argv) {
- TestAtomicIncrement<AtomicWord>();
- TestAtomicIncrement<Atomic32>();
+ TestAtomicIncrement<AtomicWord>(base::subtle::NoBarrier_AtomicIncrement);
+ TestAtomicIncrement<AtomicWord>(base::subtle::Barrier_AtomicIncrement);
+ TestAtomicIncrement<Atomic32>(base::subtle::NoBarrier_AtomicIncrement);
+ TestAtomicIncrement<Atomic32>(base::subtle::Barrier_AtomicIncrement);
TestAtomicOps<AtomicWord>();
TestAtomicOps<Atomic32>();
@@ -248,8 +264,10 @@ int main(int argc, char** argv) {
// If we ever *do* want to enable this, try adding -msse (or -mmmx?)
// to the CXXFLAGS in Makefile.am.
#if 0 and defined(BASE_HAS_ATOMIC64)
- TestAtomicIncrement<base::subtle::Atomic64>();
- TestAtomicOps<base::subtle::Atomic64>();
+ TestAtomicIncrement<base::subtle::Atomic64>(
+ base::subtle::NoBarrier_AtomicIncrement);
+ TestAtomicIncrement<base::subtle::Atomic64>(
+ base::subtle::Barrier_AtomicIncrement);
#endif
printf("PASS\n");