aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'clang/lib/CodeGen/CGAtomic.cpp')
-rw-r--r--clang/lib/CodeGen/CGAtomic.cpp63
1 files changed, 40 insertions, 23 deletions
diff --git a/clang/lib/CodeGen/CGAtomic.cpp b/clang/lib/CodeGen/CGAtomic.cpp
index c7256e240a31..ef016cf24f13 100644
--- a/clang/lib/CodeGen/CGAtomic.cpp
+++ b/clang/lib/CodeGen/CGAtomic.cpp
@@ -427,6 +427,8 @@ static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
else
switch ((llvm::AtomicOrderingCABI)FOS) {
case llvm::AtomicOrderingCABI::relaxed:
+ // 31.7.2.18: "The failure argument shall not be memory_order_release
+ // nor memory_order_acq_rel". Fallback to monotonic.
case llvm::AtomicOrderingCABI::release:
case llvm::AtomicOrderingCABI::acq_rel:
FailureOrder = llvm::AtomicOrdering::Monotonic;
@@ -439,12 +441,10 @@ static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
FailureOrder = llvm::AtomicOrdering::SequentiallyConsistent;
break;
}
- if (isStrongerThan(FailureOrder, SuccessOrder)) {
- // Don't assert on undefined behavior "failure argument shall be no
- // stronger than the success argument".
- FailureOrder =
- llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrder);
- }
+ // Prior to c++17, "the failure argument shall be no stronger than the
+ // success argument". This condition has been lifted and the only
+ // precondition is 31.7.2.18. Effectively treat this as a DR and skip
+ // language version checks.
emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
FailureOrder, Scope);
return;
@@ -454,8 +454,7 @@ static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
*SeqCstBB = nullptr;
MonotonicBB = CGF.createBasicBlock("monotonic_fail", CGF.CurFn);
- if (SuccessOrder != llvm::AtomicOrdering::Monotonic &&
- SuccessOrder != llvm::AtomicOrdering::Release)
+ if (SuccessOrder != llvm::AtomicOrdering::Monotonic)
AcquireBB = CGF.createBasicBlock("acquire_fail", CGF.CurFn);
if (SuccessOrder == llvm::AtomicOrdering::SequentiallyConsistent)
SeqCstBB = CGF.createBasicBlock("seqcst_fail", CGF.CurFn);
@@ -479,8 +478,9 @@ static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
Size, SuccessOrder, llvm::AtomicOrdering::Acquire, Scope);
CGF.Builder.CreateBr(ContBB);
- SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
- AcquireBB);
+ if (SuccessOrder != llvm::AtomicOrdering::Release)
+ SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
+ AcquireBB);
SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),
AcquireBB);
}
@@ -602,21 +602,25 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
break;
case AtomicExpr::AO__atomic_add_fetch:
- PostOp = llvm::Instruction::Add;
+ PostOp = E->getValueType()->isFloatingType() ? llvm::Instruction::FAdd
+ : llvm::Instruction::Add;
LLVM_FALLTHROUGH;
case AtomicExpr::AO__c11_atomic_fetch_add:
case AtomicExpr::AO__opencl_atomic_fetch_add:
case AtomicExpr::AO__atomic_fetch_add:
- Op = llvm::AtomicRMWInst::Add;
+ Op = E->getValueType()->isFloatingType() ? llvm::AtomicRMWInst::FAdd
+ : llvm::AtomicRMWInst::Add;
break;
case AtomicExpr::AO__atomic_sub_fetch:
- PostOp = llvm::Instruction::Sub;
+ PostOp = E->getValueType()->isFloatingType() ? llvm::Instruction::FSub
+ : llvm::Instruction::Sub;
LLVM_FALLTHROUGH;
case AtomicExpr::AO__c11_atomic_fetch_sub:
case AtomicExpr::AO__opencl_atomic_fetch_sub:
case AtomicExpr::AO__atomic_fetch_sub:
- Op = llvm::AtomicRMWInst::Sub;
+ Op = E->getValueType()->isFloatingType() ? llvm::AtomicRMWInst::FSub
+ : llvm::AtomicRMWInst::Sub;
break;
case AtomicExpr::AO__atomic_min_fetch:
@@ -813,6 +817,8 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
bool Oversized = getContext().toBits(TInfo.Width) > MaxInlineWidthInBits;
bool Misaligned = (Ptr.getAlignment() % TInfo.Width) != 0;
bool UseLibcall = Misaligned | Oversized;
+ bool ShouldCastToIntPtrTy = true;
+
CharUnits MaxInlineWidth =
getContext().toCharUnitsFromBits(MaxInlineWidthInBits);
@@ -892,11 +898,14 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Temp, Val1Ty));
break;
}
- LLVM_FALLTHROUGH;
+ LLVM_FALLTHROUGH;
case AtomicExpr::AO__atomic_fetch_add:
case AtomicExpr::AO__atomic_fetch_sub:
case AtomicExpr::AO__atomic_add_fetch:
case AtomicExpr::AO__atomic_sub_fetch:
+ ShouldCastToIntPtrTy = !MemTy->isFloatingType();
+ LLVM_FALLTHROUGH;
+
case AtomicExpr::AO__c11_atomic_store:
case AtomicExpr::AO__c11_atomic_exchange:
case AtomicExpr::AO__opencl_atomic_store:
@@ -937,15 +946,23 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
LValue AtomicVal = MakeAddrLValue(Ptr, AtomicTy);
AtomicInfo Atomics(*this, AtomicVal);
- Ptr = Atomics.emitCastToAtomicIntPointer(Ptr);
- if (Val1.isValid()) Val1 = Atomics.convertToAtomicIntPointer(Val1);
- if (Val2.isValid()) Val2 = Atomics.convertToAtomicIntPointer(Val2);
- if (Dest.isValid())
- Dest = Atomics.emitCastToAtomicIntPointer(Dest);
- else if (E->isCmpXChg())
+ if (ShouldCastToIntPtrTy) {
+ Ptr = Atomics.emitCastToAtomicIntPointer(Ptr);
+ if (Val1.isValid())
+ Val1 = Atomics.convertToAtomicIntPointer(Val1);
+ if (Val2.isValid())
+ Val2 = Atomics.convertToAtomicIntPointer(Val2);
+ }
+ if (Dest.isValid()) {
+ if (ShouldCastToIntPtrTy)
+ Dest = Atomics.emitCastToAtomicIntPointer(Dest);
+ } else if (E->isCmpXChg())
Dest = CreateMemTemp(RValTy, "cmpxchg.bool");
- else if (!RValTy->isVoidType())
- Dest = Atomics.emitCastToAtomicIntPointer(Atomics.CreateTempAlloca());
+ else if (!RValTy->isVoidType()) {
+ Dest = Atomics.CreateTempAlloca();
+ if (ShouldCastToIntPtrTy)
+ Dest = Atomics.emitCastToAtomicIntPointer(Dest);
+ }
// Use a library call. See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
if (UseLibcall) {