summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--lib/CodeGen/CGAtomic.cpp24
-rw-r--r--lib/CodeGen/CGExpr.cpp15
-rw-r--r--lib/CodeGen/CodeGenFunction.h1
-rw-r--r--test/CodeGen/ms-volatile.c15
4 files changed, 23 insertions, 32 deletions
diff --git a/lib/CodeGen/CGAtomic.cpp b/lib/CodeGen/CGAtomic.cpp
index aa5a32dd52..7b747c1383 100644
--- a/lib/CodeGen/CGAtomic.cpp
+++ b/lib/CodeGen/CGAtomic.cpp
@@ -1274,31 +1274,11 @@ bool CodeGenFunction::LValueIsSuitableForInlineAtomic(LValue LV) {
bool IsVolatile = LV.isVolatile() || hasVolatileMember(LV.getType());
// An atomic is inline if we don't need to use a libcall.
bool AtomicIsInline = !AI.shouldUseLibcall();
- return IsVolatile && AtomicIsInline;
-}
-
-/// An type is a candidate for having its loads and stores be made atomic if
-/// we are operating under /volatile:ms *and* we know the access is volatile and
-/// performing such an operation can be performed without a libcall.
-bool CodeGenFunction::typeIsSuitableForInlineAtomic(QualType Ty,
- bool IsVolatile) const {
- // The operation must be volatile for us to make it atomic.
- if (!IsVolatile)
- return false;
- // The -fms-volatile flag must be passed for us to adopt this behavior.
- if (!CGM.getCodeGenOpts().MSVolatile)
- return false;
-
- // An atomic is inline if we don't need to use a libcall (e.g. it is builtin).
- if (!getContext().getTargetInfo().hasBuiltinAtomic(
- getContext().getTypeSize(Ty), getContext().getTypeAlign(Ty)))
- return false;
-
// MSVC doesn't seem to do this for types wider than a pointer.
- if (getContext().getTypeSize(Ty) >
+ if (getContext().getTypeSize(LV.getType()) >
getContext().getTypeSize(getContext().getIntPtrType()))
return false;
- return true;
+ return IsVolatile && AtomicIsInline;
}
RValue CodeGenFunction::EmitAtomicLoad(LValue LV, SourceLocation SL,
diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp
index 1001a00b62..98a6b173f8 100644
--- a/lib/CodeGen/CGExpr.cpp
+++ b/lib/CodeGen/CGExpr.cpp
@@ -1273,10 +1273,10 @@ llvm::Value *CodeGenFunction::EmitLoadOfScalar(Address Addr, bool Volatile,
}
// Atomic operations have to be done on integral types.
- if (Ty->isAtomicType() || typeIsSuitableForInlineAtomic(Ty, Volatile)) {
- LValue lvalue =
+ LValue AtomicLValue =
LValue::MakeAddr(Addr, Ty, getContext(), AlignSource, TBAAInfo);
- return EmitAtomicLoad(lvalue, Loc).getScalarVal();
+ if (Ty->isAtomicType() || LValueIsSuitableForInlineAtomic(AtomicLValue)) {
+ return EmitAtomicLoad(AtomicLValue, Loc).getScalarVal();
}
llvm::LoadInst *Load = Builder.CreateLoad(Addr, Volatile);
@@ -1384,12 +1384,11 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, Address Addr,
Value = EmitToMemory(Value, Ty);
+ LValue AtomicLValue =
+ LValue::MakeAddr(Addr, Ty, getContext(), AlignSource, TBAAInfo);
if (Ty->isAtomicType() ||
- (!isInit && typeIsSuitableForInlineAtomic(Ty, Volatile))) {
- EmitAtomicStore(RValue::get(Value),
- LValue::MakeAddr(Addr, Ty, getContext(),
- AlignSource, TBAAInfo),
- isInit);
+ (!isInit && LValueIsSuitableForInlineAtomic(AtomicLValue))) {
+ EmitAtomicStore(RValue::get(Value), AtomicLValue, isInit);
return;
}
diff --git a/lib/CodeGen/CodeGenFunction.h b/lib/CodeGen/CodeGenFunction.h
index 0100ac3ecd..9f0fdd3f1e 100644
--- a/lib/CodeGen/CodeGenFunction.h
+++ b/lib/CodeGen/CodeGenFunction.h
@@ -2496,7 +2496,6 @@ public:
void EmitAtomicInit(Expr *E, LValue lvalue);
bool LValueIsSuitableForInlineAtomic(LValue Src);
- bool typeIsSuitableForInlineAtomic(QualType Ty, bool IsVolatile) const;
RValue EmitAtomicLoad(LValue LV, SourceLocation SL,
AggValueSlot Slot = AggValueSlot::ignored());
diff --git a/test/CodeGen/ms-volatile.c b/test/CodeGen/ms-volatile.c
index 242ce067d6..a3ef35a3fa 100644
--- a/test/CodeGen/ms-volatile.c
+++ b/test/CodeGen/ms-volatile.c
@@ -7,6 +7,13 @@ struct bar {
};
typedef _Complex float __declspec(align(8)) baz;
+#pragma pack(push)
+#pragma pack(1)
+struct qux {
+ volatile int f;
+};
+#pragma pack(pop)
+
void test1(struct foo *p, struct foo *q) {
*p = *q;
// CHECK-LABEL: @test1
@@ -58,7 +65,8 @@ void test8(volatile double *p, volatile double *q) {
void test9(volatile baz *p, baz *q) {
*p = *q;
// CHECK-LABEL: @test9
- // CHECK: store atomic volatile {{.*}}, {{.*}} release
+ // CHECK: store volatile {{.*}}, {{.*}}
+ // CHECK: store volatile {{.*}}, {{.*}}
}
void test10(volatile long long *p, volatile long long *q) {
*p = *q;
@@ -72,3 +80,8 @@ void test11(volatile float *p, volatile float *q) {
// CHECK: load atomic volatile {{.*}} acquire
// CHECK: store atomic volatile {{.*}}, {{.*}} release
}
+int test12(struct qux *p) {
+ return p->f;
+ // CHECK-LABEL: @test12
+ // CHECK: load volatile {{.*}}
+}