summaryrefslogtreecommitdiff
path: root/test/CodeGen/atomic.c
diff options
context:
space:
mode:
authorEli Friedman <eli.friedman@gmail.com>2011-09-07 01:41:24 +0000
committerEli Friedman <eli.friedman@gmail.com>2011-09-07 01:41:24 +0000
commitc83b975f1fb9d11e10b5aa25029ae9bb5fa80e07 (patch)
treebab394c235030f2a825826ecbe699a4bd96abf87 /test/CodeGen/atomic.c
parent268942b22464c7364e2c6868915484cd4b7596f7 (diff)
downloadclang-c83b975f1fb9d11e10b5aa25029ae9bb5fa80e07.tar.gz
Switch clang over to using fence/atomicrmw/cmpxchg instead of the intrinsics (which will go away). LLVM CodeGen does almost exactly the same thing with these and the old intrinsics, so I'm reasonably confident this will not break anything.
There are still a few issues which need to be resolved with code generation for atomic load and store, so I'm not converting the places which need those for now. I'm not entirely sure what to do about __builtin_llvm_memory_barrier: the fence instruction doesn't expose all the possibilities which can be expressed by __builtin_llvm_memory_barrier. I would appreciate hearing from anyone who is using this intrinsic. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@139216 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/atomic.c')
-rw-r--r--test/CodeGen/atomic.c109
1 files changed, 29 insertions, 80 deletions
diff --git a/test/CodeGen/atomic.c b/test/CodeGen/atomic.c
index 97e12ebe8c..a0adac8e1c 100644
--- a/test/CodeGen/atomic.c
+++ b/test/CodeGen/atomic.c
@@ -10,118 +10,76 @@ int atomic(void) {
int cmp = 0;
old = __sync_fetch_and_add(&val, 1);
- // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
- // CHECK: call i32 @llvm.atomic.load.add.i32.p0i32(i32* %val, i32 1)
- // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
+ // CHECK: atomicrmw add i32* %val, i32 1 seq_cst
old = __sync_fetch_and_sub(&valc, 2);
- // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
- // CHECK: call i8 @llvm.atomic.load.sub.i8.p0i8(i8* %valc, i8 2)
- // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
+ // CHECK: atomicrmw sub i8* %valc, i8 2 seq_cst
old = __sync_fetch_and_min(&val, 3);
- // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
- // CHECK: call i32 @llvm.atomic.load.min.i32.p0i32(i32* %val, i32 3)
- // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
+ // CHECK: atomicrmw min i32* %val, i32 3 seq_cst
old = __sync_fetch_and_max(&val, 4);
- // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
- // CHECK: call i32 @llvm.atomic.load.max.i32.p0i32(i32* %val, i32 4)
- // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
+ // CHECK: atomicrmw max i32* %val, i32 4 seq_cst
old = __sync_fetch_and_umin(&uval, 5u);
- // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
- // CHECK: call i32 @llvm.atomic.load.umin.i32.p0i32(i32* %uval, i32 5)
- // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
+ // CHECK: atomicrmw umin i32* %uval, i32 5 seq_cst
old = __sync_fetch_and_umax(&uval, 6u);
- // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
- // CHECK: call i32 @llvm.atomic.load.umax.i32.p0i32(i32* %uval, i32 6)
- // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
+ // CHECK: atomicrmw umax i32* %uval, i32 6 seq_cst
old = __sync_lock_test_and_set(&val, 7);
- // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
- // CHECK: call i32 @llvm.atomic.swap.i32.p0i32(i32* %val, i32 7)
- // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
+ // CHECK: atomicrmw xchg i32* %val, i32 7 seq_cst
old = __sync_swap(&val, 8);
- // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
- // CHECK: call i32 @llvm.atomic.swap.i32.p0i32(i32* %val, i32 8)
- // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
+ // CHECK: atomicrmw xchg i32* %val, i32 8 seq_cst
old = __sync_val_compare_and_swap(&val, 4, 1976);
- // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
- // CHECK: call i32 @llvm.atomic.cmp.swap.i32.p0i32(i32* %val, i32 4, i32 1976)
- // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
+ // CHECK: cmpxchg i32* %val, i32 4, i32 1976 seq_cst
old = __sync_bool_compare_and_swap(&val, 4, 1976);
- // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
- // CHECK: call i32 @llvm.atomic.cmp.swap.i32.p0i32(i32* %val, i32 4, i32 1976)
- // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
+ // CHECK: cmpxchg i32* %val, i32 4, i32 1976 seq_cst
old = __sync_fetch_and_and(&val, 0x9);
- // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
- // CHECK: call i32 @llvm.atomic.load.and.i32.p0i32(i32* %val, i32 9)
- // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
+ // CHECK: atomicrmw and i32* %val, i32 9 seq_cst
old = __sync_fetch_and_or(&val, 0xa);
- // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
- // CHECK: call i32 @llvm.atomic.load.or.i32.p0i32(i32* %val, i32 10)
- // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
+ // CHECK: atomicrmw or i32* %val, i32 10 seq_cst
old = __sync_fetch_and_xor(&val, 0xb);
- // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
- // CHECK: call i32 @llvm.atomic.load.xor.i32.p0i32(i32* %val, i32 11)
- // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
+ // CHECK: atomicrmw xor i32* %val, i32 11 seq_cst
old = __sync_add_and_fetch(&val, 1);
- // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
- // CHECK: call i32 @llvm.atomic.load.add.i32.p0i32(i32* %val, i32 1)
- // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
+ // CHECK: atomicrmw add i32* %val, i32 1 seq_cst
old = __sync_sub_and_fetch(&val, 2);
- // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
- // CHECK: call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %val, i32 2)
- // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
+ // CHECK: atomicrmw sub i32* %val, i32 2 seq_cst
old = __sync_and_and_fetch(&valc, 3);
- // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
- // CHECK: call i8 @llvm.atomic.load.and.i8.p0i8(i8* %valc, i8 3)
- // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
+ // CHECK: atomicrmw and i8* %valc, i8 3 seq_cst
old = __sync_or_and_fetch(&valc, 4);
- // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
- // CHECK: call i8 @llvm.atomic.load.or.i8.p0i8(i8* %valc, i8 4)
- // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
+ // CHECK: atomicrmw or i8* %valc, i8 4 seq_cst
old = __sync_xor_and_fetch(&valc, 5);
- // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
- // CHECK: call i8 @llvm.atomic.load.xor.i8.p0i8(i8* %valc, i8 5)
- // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
-
+ // CHECK: atomicrmw xor i8* %valc, i8 5 seq_cst
__sync_val_compare_and_swap((void **)0, (void *)0, (void *)0);
- // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
- // CHECK: call i32 @llvm.atomic.cmp.swap.i32.p0i32(i32* null, i32 0, i32 0)
- // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
+ // CHECK: cmpxchg i32* null, i32 0, i32 0 seq_cst
if ( __sync_val_compare_and_swap(&valb, 0, 1)) {
- // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
- // CHECK: call i8 @llvm.atomic.cmp.swap.i8.p0i8(i8* %valb, i8 0, i8 1)
- // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
+ // CHECK: cmpxchg i8* %valb, i8 0, i8 1 seq_cst
old = 42;
}
__sync_bool_compare_and_swap((void **)0, (void *)0, (void *)0);
- // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
- // CHECK: call i32 @llvm.atomic.cmp.swap.i32.p0i32(i32* null, i32 0, i32 0)
- // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
+ // CHECK: cmpxchg i32* null, i32 0, i32 0 seq_cst
__sync_lock_release(&val);
+ // FIXME: WRONG!
// CHECK: store volatile i32 0, i32*
__sync_synchronize ();
- // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 false)
+ // CHECK: fence seq_cst
return old;
}
@@ -130,6 +88,7 @@ int atomic(void) {
void release_return(int *lock) {
// Ensure this is actually returning void all the way through.
return __sync_lock_release(lock);
+ // FIXME: WRONG!
// CHECK: store volatile i32 0, i32*
}
@@ -138,21 +97,11 @@ void release_return(int *lock) {
// CHECK: @addrspace
void addrspace(int __attribute__((address_space(256))) * P) {
__sync_bool_compare_and_swap(P, 0, 1);
- // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
- // CHECK: call i32 @llvm.atomic.cmp.swap.i32.p256i32(i32 addrspace(256)*{{.*}}, i32 0, i32 1)
- // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
-
-
+ // CHECK: cmpxchg i32 addrspace(256)*{{.*}}, i32 0, i32 1 seq_cst
+
__sync_val_compare_and_swap(P, 0, 1);
- // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
- // CHECK: call i32 @llvm.atomic.cmp.swap.i32.p256i32(i32 addrspace(256)*{{.*}}, i32 0, i32 1)
- // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
-
-
+ // CHECK: cmpxchg i32 addrspace(256)*{{.*}}, i32 0, i32 1 seq_cst
+
__sync_xor_and_fetch(P, 123);
- // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
- // CHECK: call i32 @llvm.atomic.load.xor.i32.p256i32(i32 addrspace(256)* {{.*}}, i32 123)
- // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
-
+ // CHECK: atomicrmw xor i32 addrspace(256)*{{.*}}, i32 123 seq_cst
}
-