diff options
author | JF Bastien <jfb@google.com> | 2016-04-17 21:28:50 +0000 |
---|---|---|
committer | JF Bastien <jfb@google.com> | 2016-04-17 21:28:50 +0000 |
commit | 56ce09324f4e8e9cacc03b02c6c9ba97b42331fb (patch) | |
tree | cad2ddc9ed47d736e28cff2099362caac9484ef6 /lib/CodeGen/CGAtomic.cpp | |
parent | b0495df9eae2824bee830cc4c94f5441f0d4cbc9 (diff) | |
download | clang-56ce09324f4e8e9cacc03b02c6c9ba97b42331fb.tar.gz |
Revert "NFC: unify clang / LLVM atomic ordering"
This reverts commit b0495df9eae2824bee830cc4c94f5441f0d4cbc9.
Same as for the corresponding LLVM revert, an assert seems to fire.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@266575 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/CodeGen/CGAtomic.cpp')
-rw-r--r-- | lib/CodeGen/CGAtomic.cpp | 166 |
1 files changed, 93 insertions, 73 deletions
diff --git a/lib/CodeGen/CGAtomic.cpp b/lib/CodeGen/CGAtomic.cpp index aa5a32dd52..6a1322424b 100644 --- a/lib/CodeGen/CGAtomic.cpp +++ b/lib/CodeGen/CGAtomic.cpp @@ -243,6 +243,11 @@ namespace { /// Materialize an atomic r-value in atomic-layout memory. Address materializeRValue(RValue rvalue) const; + /// \brief Translates LLVM atomic ordering to GNU atomic ordering for + /// libcalls. + static AtomicExpr::AtomicOrderingKind + translateAtomicOrdering(const llvm::AtomicOrdering AO); + /// \brief Creates temp alloca for intermediate operations on atomic value. Address CreateTempAlloca() const; private: @@ -287,6 +292,25 @@ namespace { }; } +AtomicExpr::AtomicOrderingKind +AtomicInfo::translateAtomicOrdering(const llvm::AtomicOrdering AO) { + switch (AO) { + case llvm::AtomicOrdering::Unordered: + case llvm::AtomicOrdering::NotAtomic: + case llvm::AtomicOrdering::Monotonic: + return AtomicExpr::AO_ABI_memory_order_relaxed; + case llvm::AtomicOrdering::Acquire: + return AtomicExpr::AO_ABI_memory_order_acquire; + case llvm::AtomicOrdering::Release: + return AtomicExpr::AO_ABI_memory_order_release; + case llvm::AtomicOrdering::AcquireRelease: + return AtomicExpr::AO_ABI_memory_order_acq_rel; + case llvm::AtomicOrdering::SequentiallyConsistent: + return AtomicExpr::AO_ABI_memory_order_seq_cst; + } + llvm_unreachable("Unhandled AtomicOrdering"); +} + Address AtomicInfo::CreateTempAlloca() const { Address TempAlloca = CGF.CreateMemTemp( (LVal.isBitField() && ValueSizeInBits > AtomicSizeInBits) ? ValueTy @@ -403,39 +427,34 @@ static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak, /// instructions to cope with the provided (but possibly only dynamically known) /// FailureOrder. static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E, - bool IsWeak, Address Dest, Address Ptr, - Address Val1, Address Val2, + bool IsWeak, Address Dest, + Address Ptr, Address Val1, + Address Val2, llvm::Value *FailureOrderVal, uint64_t Size, llvm::AtomicOrdering SuccessOrder) { llvm::AtomicOrdering FailureOrder; if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) { - auto FOS = FO->getSExtValue(); - if (!llvm::isValidAtomicOrderingCABI(FOS)) + switch (FO->getSExtValue()) { + default: FailureOrder = llvm::AtomicOrdering::Monotonic; - else - switch ((llvm::AtomicOrderingCABI)FOS) { - case llvm::AtomicOrderingCABI::relaxed: - case llvm::AtomicOrderingCABI::release: - case llvm::AtomicOrderingCABI::acq_rel: - FailureOrder = llvm::AtomicOrdering::Monotonic; - break; - case llvm::AtomicOrderingCABI::consume: - case llvm::AtomicOrderingCABI::acquire: - FailureOrder = llvm::AtomicOrdering::Acquire; - break; - case llvm::AtomicOrderingCABI::seq_cst: - FailureOrder = llvm::AtomicOrdering::SequentiallyConsistent; - break; - } + break; + case AtomicExpr::AO_ABI_memory_order_consume: + case AtomicExpr::AO_ABI_memory_order_acquire: + FailureOrder = llvm::AtomicOrdering::Acquire; + break; + case AtomicExpr::AO_ABI_memory_order_seq_cst: + FailureOrder = llvm::AtomicOrdering::SequentiallyConsistent; + break; + } if (isStrongerThan(FailureOrder, SuccessOrder)) { // Don't assert on undefined behavior "failure argument shall be no // stronger than the success argument". FailureOrder = - llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrder); + llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrder); } - emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder, - FailureOrder); + emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, + SuccessOrder, FailureOrder); return; } @@ -468,9 +487,9 @@ static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E, emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder, llvm::AtomicOrdering::Acquire); CGF.Builder.CreateBr(ContBB); - SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::consume), + SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume), AcquireBB); - SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire), + SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acquire), AcquireBB); } if (SeqCstBB) { @@ -478,7 +497,7 @@ static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E, emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder, llvm::AtomicOrdering::SequentiallyConsistent); CGF.Builder.CreateBr(ContBB); - SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst), + SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst), SeqCstBB); } @@ -1025,39 +1044,40 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) { E->getOp() == AtomicExpr::AO__atomic_load_n; if (isa<llvm::ConstantInt>(Order)) { - auto ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); - // We should not ever get to a case where the ordering isn't a valid C ABI - // value, but it's hard to enforce that in general. - if (llvm::isValidAtomicOrderingCABI(ord)) - switch ((llvm::AtomicOrderingCABI)ord) { - case llvm::AtomicOrderingCABI::relaxed: - EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, - llvm::AtomicOrdering::Monotonic); - break; - case llvm::AtomicOrderingCABI::consume: - case llvm::AtomicOrderingCABI::acquire: - if (IsStore) - break; // Avoid crashing on code with undefined behavior - EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, - llvm::AtomicOrdering::Acquire); - break; - case llvm::AtomicOrderingCABI::release: - if (IsLoad) - break; // Avoid crashing on code with undefined behavior - EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, - llvm::AtomicOrdering::Release); - break; - case llvm::AtomicOrderingCABI::acq_rel: - if (IsLoad || IsStore) - break; // Avoid crashing on code with undefined behavior - EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, - llvm::AtomicOrdering::AcquireRelease); - break; - case llvm::AtomicOrderingCABI::seq_cst: - EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, - llvm::AtomicOrdering::SequentiallyConsistent); - break; - } + int ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); + switch (ord) { + case AtomicExpr::AO_ABI_memory_order_relaxed: + EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, + Size, llvm::AtomicOrdering::Monotonic); + break; + case AtomicExpr::AO_ABI_memory_order_consume: + case AtomicExpr::AO_ABI_memory_order_acquire: + if (IsStore) + break; // Avoid crashing on code with undefined behavior + EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, + Size, llvm::AtomicOrdering::Acquire); + break; + case AtomicExpr::AO_ABI_memory_order_release: + if (IsLoad) + break; // Avoid crashing on code with undefined behavior + EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, + Size, llvm::AtomicOrdering::Release); + break; + case AtomicExpr::AO_ABI_memory_order_acq_rel: + if (IsLoad || IsStore) + break; // Avoid crashing on code with undefined behavior + EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, + Size, llvm::AtomicOrdering::AcquireRelease); + break; + case AtomicExpr::AO_ABI_memory_order_seq_cst: + EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, + Size, llvm::AtomicOrdering::SequentiallyConsistent); + break; + default: // invalid order + // We should not ever get here normally, but it's hard to + // enforce that in general. + break; + } if (RValTy->isVoidType()) return RValue::get(nullptr); @@ -1099,9 +1119,9 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) { EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, llvm::AtomicOrdering::Acquire); Builder.CreateBr(ContBB); - SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::consume), + SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume), AcquireBB); - SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire), + SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acquire), AcquireBB); } if (!IsLoad) { @@ -1109,7 +1129,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) { EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, llvm::AtomicOrdering::Release); Builder.CreateBr(ContBB); - SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::release), + SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_release), ReleaseBB); } if (!IsLoad && !IsStore) { @@ -1117,14 +1137,14 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) { EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, llvm::AtomicOrdering::AcquireRelease); Builder.CreateBr(ContBB); - SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acq_rel), + SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acq_rel), AcqRelBB); } Builder.SetInsertPoint(SeqCstBB); EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, llvm::AtomicOrdering::SequentiallyConsistent); Builder.CreateBr(ContBB); - SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst), + SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst), SeqCstBB); // Cleanup and return @@ -1244,9 +1264,9 @@ void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded, CGF.getContext().VoidPtrTy); Args.add(RValue::get(CGF.EmitCastToVoidPtr(AddForLoaded)), CGF.getContext().VoidPtrTy); - Args.add( - RValue::get(llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(AO))), - CGF.getContext().IntTy); + Args.add(RValue::get( + llvm::ConstantInt::get(CGF.IntTy, translateAtomicOrdering(AO))), + CGF.getContext().IntTy); emitAtomicLibcall(CGF, "__atomic_load", CGF.getContext().VoidTy, Args); } @@ -1462,11 +1482,11 @@ AtomicInfo::EmitAtomicCompareExchangeLibcall(llvm::Value *ExpectedAddr, CGF.getContext().VoidPtrTy); Args.add(RValue::get(CGF.EmitCastToVoidPtr(DesiredAddr)), CGF.getContext().VoidPtrTy); - Args.add(RValue::get( - llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Success))), + Args.add(RValue::get(llvm::ConstantInt::get( + CGF.IntTy, translateAtomicOrdering(Success))), CGF.getContext().IntTy); - Args.add(RValue::get( - llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Failure))), + Args.add(RValue::get(llvm::ConstantInt::get( + CGF.IntTy, translateAtomicOrdering(Failure))), CGF.getContext().IntTy); auto SuccessFailureRVal = emitAtomicLibcall(CGF, "__atomic_compare_exchange", CGF.getContext().BoolTy, Args); @@ -1773,9 +1793,9 @@ void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest, getContext().VoidPtrTy); args.add(RValue::get(EmitCastToVoidPtr(srcAddr.getPointer())), getContext().VoidPtrTy); - args.add( - RValue::get(llvm::ConstantInt::get(IntTy, (int)llvm::toCABI(AO))), - getContext().IntTy); + args.add(RValue::get(llvm::ConstantInt::get( + IntTy, AtomicInfo::translateAtomicOrdering(AO))), + getContext().IntTy); emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args); return; } |