summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorTim Northover <tnorthover@apple.com>2014-03-11 10:48:52 +0000
committerTim Northover <tnorthover@apple.com>2014-03-11 10:48:52 +0000
commitca396e391e13d417605ebed06780d92c88f14a6b (patch)
treea984bbd50ef52c0ad3f0fd66b81d1021b072d16c /lib
parentfb411c8b8ce42689d42b0d8e3a871d81f86a7298 (diff)
downloadllvm-ca396e391e13d417605ebed06780d92c88f14a6b.tar.gz
llvm-ca396e391e13d417605ebed06780d92c88f14a6b.tar.bz2
llvm-ca396e391e13d417605ebed06780d92c88f14a6b.tar.xz
IR: add a second ordering operand to cmpxhg for failure
The syntax for "cmpxchg" should now look something like: cmpxchg i32* %addr, i32 42, i32 3 acquire monotonic where the second ordering argument gives the required semantics in the case that no exchange takes place. It should be no stronger than the first ordering constraint and cannot be either "release" or "acq_rel" (since no store will have taken place). rdar://problem/15996804 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@203559 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib')
-rw-r--r--lib/Analysis/AliasAnalysis.cpp2
-rw-r--r--lib/AsmParser/LLParser.cpp27
-rw-r--r--lib/AsmParser/LLParser.h1
-rw-r--r--lib/Bitcode/Reader/BitcodeReader.cpp20
-rw-r--r--lib/Bitcode/Writer/BitcodeWriter.cpp4
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeDAG.cpp1
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp9
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAG.cpp28
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp10
-rw-r--r--lib/IR/AsmWriter.cpp34
-rw-r--r--lib/IR/AsmWriter.h3
-rw-r--r--lib/IR/Instruction.cpp10
-rw-r--r--lib/IR/Instructions.cpp27
-rw-r--r--lib/IR/Verifier.cpp17
-rw-r--r--lib/Target/ARM/ARMISelLowering.cpp8
-rw-r--r--lib/Target/CppBackend/CPPBackend.cpp8
-rw-r--r--lib/Target/X86/X86ISelLowering.cpp1
-rw-r--r--lib/Transforms/IPO/MergeFunctions.cpp5
-rw-r--r--lib/Transforms/Instrumentation/MemorySanitizer.cpp2
-rw-r--r--lib/Transforms/Instrumentation/ThreadSanitizer.cpp19
20 files changed, 170 insertions, 66 deletions
diff --git a/lib/Analysis/AliasAnalysis.cpp b/lib/Analysis/AliasAnalysis.cpp
index 36ed40d354..9583bbe5e3 100644
--- a/lib/Analysis/AliasAnalysis.cpp
+++ b/lib/Analysis/AliasAnalysis.cpp
@@ -338,7 +338,7 @@ AliasAnalysis::getModRefInfo(const VAArgInst *V, const Location &Loc) {
AliasAnalysis::ModRefResult
AliasAnalysis::getModRefInfo(const AtomicCmpXchgInst *CX, const Location &Loc) {
// Acquire/Release cmpxchg has properties that matter for arbitrary addresses.
- if (CX->getOrdering() > Monotonic)
+ if (CX->getSuccessOrdering() > Monotonic)
return ModRef;
// If the cmpxchg address does not alias the location, it does not access it.
diff --git a/lib/AsmParser/LLParser.cpp b/lib/AsmParser/LLParser.cpp
index f29ceddf64..f75d3c944a 100644
--- a/lib/AsmParser/LLParser.cpp
+++ b/lib/AsmParser/LLParser.cpp
@@ -1518,6 +1518,15 @@ bool LLParser::ParseScopeAndOrdering(bool isAtomic, SynchronizationScope &Scope,
Scope = CrossThread;
if (EatIfPresent(lltok::kw_singlethread))
Scope = SingleThread;
+
+ return ParseOrdering(Ordering);
+}
+
+/// ParseOrdering
+/// ::= AtomicOrdering
+///
+/// This sets Ordering to the parsed value.
+bool LLParser::ParseOrdering(AtomicOrdering &Ordering) {
switch (Lex.getKind()) {
default: return TokError("Expected ordering on atomic instruction");
case lltok::kw_unordered: Ordering = Unordered; break;
@@ -4193,11 +4202,12 @@ int LLParser::ParseStore(Instruction *&Inst, PerFunctionState &PFS) {
/// ParseCmpXchg
/// ::= 'cmpxchg' 'volatile'? TypeAndValue ',' TypeAndValue ',' TypeAndValue
-/// 'singlethread'? AtomicOrdering
+/// 'singlethread'? AtomicOrdering AtomicOrdering
int LLParser::ParseCmpXchg(Instruction *&Inst, PerFunctionState &PFS) {
Value *Ptr, *Cmp, *New; LocTy PtrLoc, CmpLoc, NewLoc;
bool AteExtraComma = false;
- AtomicOrdering Ordering = NotAtomic;
+ AtomicOrdering SuccessOrdering = NotAtomic;
+ AtomicOrdering FailureOrdering = NotAtomic;
SynchronizationScope Scope = CrossThread;
bool isVolatile = false;
@@ -4209,11 +4219,16 @@ int LLParser::ParseCmpXchg(Instruction *&Inst, PerFunctionState &PFS) {
ParseTypeAndValue(Cmp, CmpLoc, PFS) ||
ParseToken(lltok::comma, "expected ',' after cmpxchg cmp operand") ||
ParseTypeAndValue(New, NewLoc, PFS) ||
- ParseScopeAndOrdering(true /*Always atomic*/, Scope, Ordering))
+ ParseScopeAndOrdering(true /*Always atomic*/, Scope, SuccessOrdering) ||
+ ParseOrdering(FailureOrdering))
return true;
- if (Ordering == Unordered)
+ if (SuccessOrdering == Unordered || FailureOrdering == Unordered)
return TokError("cmpxchg cannot be unordered");
+ if (SuccessOrdering < FailureOrdering)
+ return TokError("cmpxchg must be at least as ordered on success as failure");
+ if (FailureOrdering == Release || FailureOrdering == AcquireRelease)
+ return TokError("cmpxchg failure ordering cannot include release semantics");
if (!Ptr->getType()->isPointerTy())
return Error(PtrLoc, "cmpxchg operand must be a pointer");
if (cast<PointerType>(Ptr->getType())->getElementType() != Cmp->getType())
@@ -4227,8 +4242,8 @@ int LLParser::ParseCmpXchg(Instruction *&Inst, PerFunctionState &PFS) {
return Error(NewLoc, "cmpxchg operand must be power-of-two byte-sized"
" integer");
- AtomicCmpXchgInst *CXI =
- new AtomicCmpXchgInst(Ptr, Cmp, New, Ordering, Scope);
+ AtomicCmpXchgInst *CXI = new AtomicCmpXchgInst(Ptr, Cmp, New, SuccessOrdering,
+ FailureOrdering, Scope);
CXI->setVolatile(isVolatile);
Inst = CXI;
return AteExtraComma ? InstExtraComma : InstNormal;
diff --git a/lib/AsmParser/LLParser.h b/lib/AsmParser/LLParser.h
index 790ffd2c71..294a1e1436 100644
--- a/lib/AsmParser/LLParser.h
+++ b/lib/AsmParser/LLParser.h
@@ -209,6 +209,7 @@ namespace llvm {
bool ParseOptionalAlignment(unsigned &Alignment);
bool ParseScopeAndOrdering(bool isAtomic, SynchronizationScope &Scope,
AtomicOrdering &Ordering);
+ bool ParseOrdering(AtomicOrdering &Ordering);
bool ParseOptionalStackAlignment(unsigned &Alignment);
bool ParseOptionalCommaAlign(unsigned &Alignment, bool &AteExtraComma);
bool ParseOptionalCommaInAlloca(bool &IsInAlloca);
diff --git a/lib/Bitcode/Reader/BitcodeReader.cpp b/lib/Bitcode/Reader/BitcodeReader.cpp
index 5e358d9cc5..eb716660eb 100644
--- a/lib/Bitcode/Reader/BitcodeReader.cpp
+++ b/lib/Bitcode/Reader/BitcodeReader.cpp
@@ -2882,7 +2882,8 @@ error_code BitcodeReader::ParseFunctionBody(Function *F) {
break;
}
case bitc::FUNC_CODE_INST_CMPXCHG: {
- // CMPXCHG:[ptrty, ptr, cmp, new, vol, ordering, synchscope]
+ // CMPXCHG:[ptrty, ptr, cmp, new, vol, successordering, synchscope,
+ // failureordering]
unsigned OpNum = 0;
Value *Ptr, *Cmp, *New;
if (getValueTypePair(Record, OpNum, NextValueNo, Ptr) ||
@@ -2890,13 +2891,22 @@ error_code BitcodeReader::ParseFunctionBody(Function *F) {
cast<PointerType>(Ptr->getType())->getElementType(), Cmp) ||
popValue(Record, OpNum, NextValueNo,
cast<PointerType>(Ptr->getType())->getElementType(), New) ||
- OpNum+3 != Record.size())
+ (OpNum + 3 != Record.size() && OpNum + 4 != Record.size()))
return Error(InvalidRecord);
- AtomicOrdering Ordering = GetDecodedOrdering(Record[OpNum+1]);
- if (Ordering == NotAtomic || Ordering == Unordered)
+ AtomicOrdering SuccessOrdering = GetDecodedOrdering(Record[OpNum+1]);
+ if (SuccessOrdering == NotAtomic || SuccessOrdering == Unordered)
return Error(InvalidRecord);
SynchronizationScope SynchScope = GetDecodedSynchScope(Record[OpNum+2]);
- I = new AtomicCmpXchgInst(Ptr, Cmp, New, Ordering, SynchScope);
+
+ AtomicOrdering FailureOrdering;
+ if (Record.size() < 7)
+ FailureOrdering =
+ AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrdering);
+ else
+ FailureOrdering = GetDecodedOrdering(Record[OpNum+3]);
+
+ I = new AtomicCmpXchgInst(Ptr, Cmp, New, SuccessOrdering, FailureOrdering,
+ SynchScope);
cast<AtomicCmpXchgInst>(I)->setVolatile(Record[OpNum]);
InstructionList.push_back(I);
break;
diff --git a/lib/Bitcode/Writer/BitcodeWriter.cpp b/lib/Bitcode/Writer/BitcodeWriter.cpp
index 8a09507c29..d390eedd36 100644
--- a/lib/Bitcode/Writer/BitcodeWriter.cpp
+++ b/lib/Bitcode/Writer/BitcodeWriter.cpp
@@ -1441,9 +1441,11 @@ static void WriteInstruction(const Instruction &I, unsigned InstID,
pushValue(I.getOperand(2), InstID, Vals, VE); // newval.
Vals.push_back(cast<AtomicCmpXchgInst>(I).isVolatile());
Vals.push_back(GetEncodedOrdering(
- cast<AtomicCmpXchgInst>(I).getOrdering()));
+ cast<AtomicCmpXchgInst>(I).getSuccessOrdering()));
Vals.push_back(GetEncodedSynchScope(
cast<AtomicCmpXchgInst>(I).getSynchScope()));
+ Vals.push_back(GetEncodedOrdering(
+ cast<AtomicCmpXchgInst>(I).getFailureOrdering()));
break;
case Instruction::AtomicRMW:
Code = bitc::FUNC_CODE_INST_ATOMICRMW;
diff --git a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
index 2dcfbb5b27..6297774484 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
@@ -2878,6 +2878,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
Node->getOperand(1), Zero, Zero,
cast<AtomicSDNode>(Node)->getMemOperand(),
cast<AtomicSDNode>(Node)->getOrdering(),
+ cast<AtomicSDNode>(Node)->getOrdering(),
cast<AtomicSDNode>(Node)->getSynchScope());
Results.push_back(Swap.getValue(0));
Results.push_back(Swap.getValue(1));
diff --git a/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index cd7c496375..18b2376b8b 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -193,10 +193,10 @@ SDValue DAGTypeLegalizer::PromoteIntRes_Atomic1(AtomicSDNode *N) {
SDValue DAGTypeLegalizer::PromoteIntRes_Atomic2(AtomicSDNode *N) {
SDValue Op2 = GetPromotedInteger(N->getOperand(2));
SDValue Op3 = GetPromotedInteger(N->getOperand(3));
- SDValue Res = DAG.getAtomic(N->getOpcode(), SDLoc(N),
- N->getMemoryVT(), N->getChain(), N->getBasePtr(),
- Op2, Op3, N->getMemOperand(), N->getOrdering(),
- N->getSynchScope());
+ SDValue Res = DAG.getAtomic(N->getOpcode(), SDLoc(N), N->getMemoryVT(),
+ N->getChain(), N->getBasePtr(), Op2, Op3,
+ N->getMemOperand(), N->getSuccessOrdering(),
+ N->getFailureOrdering(), N->getSynchScope());
// Legalized the chain result - switch anything that used the old chain to
// use the new one.
ReplaceValueWith(SDValue(N, 1), Res.getValue(1));
@@ -2448,6 +2448,7 @@ void DAGTypeLegalizer::ExpandIntRes_ATOMIC_LOAD(SDNode *N,
N->getOperand(1), Zero, Zero,
cast<AtomicSDNode>(N)->getMemOperand(),
cast<AtomicSDNode>(N)->getOrdering(),
+ cast<AtomicSDNode>(N)->getOrdering(),
cast<AtomicSDNode>(N)->getSynchScope());
ReplaceValueWith(SDValue(N, 0), Swap.getValue(0));
ReplaceValueWith(SDValue(N, 1), Swap.getValue(1));
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 9a9062af9d..43a02fe9c7 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -4223,9 +4223,10 @@ SDValue SelectionDAG::getMemset(SDValue Chain, SDLoc dl, SDValue Dst,
}
SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
- SDVTList VTList, SDValue* Ops, unsigned NumOps,
+ SDVTList VTList, SDValue *Ops, unsigned NumOps,
MachineMemOperand *MMO,
- AtomicOrdering Ordering,
+ AtomicOrdering SuccessOrdering,
+ AtomicOrdering FailureOrdering,
SynchronizationScope SynchScope) {
FoldingSetNodeID ID;
ID.AddInteger(MemVT.getRawBits());
@@ -4247,17 +4248,28 @@ SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
SDNode *N = new (NodeAllocator) AtomicSDNode(Opcode, dl.getIROrder(),
dl.getDebugLoc(), VTList, MemVT,
Ops, DynOps, NumOps, MMO,
- Ordering, SynchScope);
+ SuccessOrdering, FailureOrdering,
+ SynchScope);
CSEMap.InsertNode(N, IP);
AllNodes.push_back(N);
return SDValue(N, 0);
}
SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
+ SDVTList VTList, SDValue *Ops, unsigned NumOps,
+ MachineMemOperand *MMO,
+ AtomicOrdering Ordering,
+ SynchronizationScope SynchScope) {
+ return getAtomic(Opcode, dl, MemVT, VTList, Ops, NumOps, MMO, Ordering,
+ Ordering, SynchScope);
+}
+
+SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
SDValue Chain, SDValue Ptr, SDValue Cmp,
SDValue Swp, MachinePointerInfo PtrInfo,
unsigned Alignment,
- AtomicOrdering Ordering,
+ AtomicOrdering SuccessOrdering,
+ AtomicOrdering FailureOrdering,
SynchronizationScope SynchScope) {
if (Alignment == 0) // Ensure that codegen never sees alignment 0
Alignment = getEVTAlignment(MemVT);
@@ -4278,14 +4290,15 @@ SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment);
return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Cmp, Swp, MMO,
- Ordering, SynchScope);
+ SuccessOrdering, FailureOrdering, SynchScope);
}
SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
SDValue Chain,
SDValue Ptr, SDValue Cmp,
SDValue Swp, MachineMemOperand *MMO,
- AtomicOrdering Ordering,
+ AtomicOrdering SuccessOrdering,
+ AtomicOrdering FailureOrdering,
SynchronizationScope SynchScope) {
assert(Opcode == ISD::ATOMIC_CMP_SWAP && "Invalid Atomic Op");
assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types");
@@ -4294,7 +4307,8 @@ SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
SDVTList VTs = getVTList(VT, MVT::Other);
SDValue Ops[] = {Chain, Ptr, Cmp, Swp};
- return getAtomic(Opcode, dl, MemVT, VTs, Ops, 4, MMO, Ordering, SynchScope);
+ return getAtomic(Opcode, dl, MemVT, VTs, Ops, 4, MMO, SuccessOrdering,
+ FailureOrdering, SynchScope);
}
SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 13daba08ee..b60e7803b7 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -3605,14 +3605,15 @@ static SDValue InsertFenceForAtomic(SDValue Chain, AtomicOrdering Order,
void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) {
SDLoc dl = getCurSDLoc();
- AtomicOrdering Order = I.getOrdering();
+ AtomicOrdering SuccessOrder = I.getSuccessOrdering();
+ AtomicOrdering FailureOrder = I.getFailureOrdering();
SynchronizationScope Scope = I.getSynchScope();
SDValue InChain = getRoot();
const TargetLowering *TLI = TM.getTargetLowering();
if (TLI->getInsertFencesForAtomic())
- InChain = InsertFenceForAtomic(InChain, Order, Scope, true, dl,
+ InChain = InsertFenceForAtomic(InChain, SuccessOrder, Scope, true, dl,
DAG, *TLI);
SDValue L =
@@ -3623,13 +3624,14 @@ void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) {
getValue(I.getCompareOperand()),
getValue(I.getNewValOperand()),
MachinePointerInfo(I.getPointerOperand()), 0 /* Alignment */,
- TLI->getInsertFencesForAtomic() ? Monotonic : Order,
+ TLI->getInsertFencesForAtomic() ? Monotonic : SuccessOrder,
+ TLI->getInsertFencesForAtomic() ? Monotonic : FailureOrder,
Scope);
SDValue OutChain = L.getValue(1);
if (TLI->getInsertFencesForAtomic())
- OutChain = InsertFenceForAtomic(OutChain, Order, Scope, false, dl,
+ OutChain = InsertFenceForAtomic(OutChain, SuccessOrder, Scope, false, dl,
DAG, *TLI);
setValue(&I, L);
diff --git a/lib/IR/AsmWriter.cpp b/lib/IR/AsmWriter.cpp
index c9ea49b222..a528e5f326 100644
--- a/lib/IR/AsmWriter.cpp
+++ b/lib/IR/AsmWriter.cpp
@@ -1226,6 +1226,37 @@ void AssemblyWriter::writeAtomic(AtomicOrdering Ordering,
}
}
+void AssemblyWriter::writeAtomicCmpXchg(AtomicOrdering SuccessOrdering,
+ AtomicOrdering FailureOrdering,
+ SynchronizationScope SynchScope) {
+ assert(SuccessOrdering != NotAtomic && FailureOrdering != NotAtomic);
+
+ switch (SynchScope) {
+ case SingleThread: Out << " singlethread"; break;
+ case CrossThread: break;
+ }
+
+ switch (SuccessOrdering) {
+ default: Out << " <bad ordering " << int(SuccessOrdering) << ">"; break;
+ case Unordered: Out << " unordered"; break;
+ case Monotonic: Out << " monotonic"; break;
+ case Acquire: Out << " acquire"; break;
+ case Release: Out << " release"; break;
+ case AcquireRelease: Out << " acq_rel"; break;
+ case SequentiallyConsistent: Out << " seq_cst"; break;
+ }
+
+ switch (FailureOrdering) {
+ default: Out << " <bad ordering " << int(FailureOrdering) << ">"; break;
+ case Unordered: Out << " unordered"; break;
+ case Monotonic: Out << " monotonic"; break;
+ case Acquire: Out << " acquire"; break;
+ case Release: Out << " release"; break;
+ case AcquireRelease: Out << " acq_rel"; break;
+ case SequentiallyConsistent: Out << " seq_cst"; break;
+ }
+}
+
void AssemblyWriter::writeParamOperand(const Value *Operand,
AttributeSet Attrs, unsigned Idx) {
if (Operand == 0) {
@@ -2018,7 +2049,8 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
if (SI->getAlignment())
Out << ", align " << SI->getAlignment();
} else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(&I)) {
- writeAtomic(CXI->getOrdering(), CXI->getSynchScope());
+ writeAtomicCmpXchg(CXI->getSuccessOrdering(), CXI->getFailureOrdering(),
+ CXI->getSynchScope());
} else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(&I)) {
writeAtomic(RMWI->getOrdering(), RMWI->getSynchScope());
} else if (const FenceInst *FI = dyn_cast<FenceInst>(&I)) {
diff --git a/lib/IR/AsmWriter.h b/lib/IR/AsmWriter.h
index 222d3a490f..b4ce6de10d 100644
--- a/lib/IR/AsmWriter.h
+++ b/lib/IR/AsmWriter.h
@@ -90,6 +90,9 @@ public:
void writeOperand(const Value *Op, bool PrintType);
void writeParamOperand(const Value *Operand, AttributeSet Attrs,unsigned Idx);
void writeAtomic(AtomicOrdering Ordering, SynchronizationScope SynchScope);
+ void writeAtomicCmpXchg(AtomicOrdering SuccessOrdering,
+ AtomicOrdering FailureOrdering,
+ SynchronizationScope SynchScope);
void writeAllMDNodes();
void writeMDNode(unsigned Slot, const MDNode *Node);
diff --git a/lib/IR/Instruction.cpp b/lib/IR/Instruction.cpp
index bd7a62e83d..d31a92e031 100644
--- a/lib/IR/Instruction.cpp
+++ b/lib/IR/Instruction.cpp
@@ -313,7 +313,10 @@ bool Instruction::isIdenticalToWhenDefined(const Instruction *I) const {
FI->getSynchScope() == cast<FenceInst>(FI)->getSynchScope();
if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(this))
return CXI->isVolatile() == cast<AtomicCmpXchgInst>(I)->isVolatile() &&
- CXI->getOrdering() == cast<AtomicCmpXchgInst>(I)->getOrdering() &&
+ CXI->getSuccessOrdering() ==
+ cast<AtomicCmpXchgInst>(I)->getSuccessOrdering() &&
+ CXI->getFailureOrdering() ==
+ cast<AtomicCmpXchgInst>(I)->getFailureOrdering() &&
CXI->getSynchScope() == cast<AtomicCmpXchgInst>(I)->getSynchScope();
if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(this))
return RMWI->getOperation() == cast<AtomicRMWInst>(I)->getOperation() &&
@@ -384,7 +387,10 @@ bool Instruction::isSameOperationAs(const Instruction *I,
FI->getSynchScope() == cast<FenceInst>(I)->getSynchScope();
if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(this))
return CXI->isVolatile() == cast<AtomicCmpXchgInst>(I)->isVolatile() &&
- CXI->getOrdering() == cast<AtomicCmpXchgInst>(I)->getOrdering() &&
+ CXI->getSuccessOrdering() ==
+ cast<AtomicCmpXchgInst>(I)->getSuccessOrdering() &&
+ CXI->getFailureOrdering() ==
+ cast<AtomicCmpXchgInst>(I)->getFailureOrdering() &&
CXI->getSynchScope() == cast<AtomicCmpXchgInst>(I)->getSynchScope();
if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(this))
return RMWI->getOperation() == cast<AtomicRMWInst>(I)->getOperation() &&
diff --git a/lib/IR/Instructions.cpp b/lib/IR/Instructions.cpp
index d874411ccd..3aa8413541 100644
--- a/lib/IR/Instructions.cpp
+++ b/lib/IR/Instructions.cpp
@@ -1216,12 +1216,14 @@ void StoreInst::setAlignment(unsigned Align) {
//===----------------------------------------------------------------------===//
void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
- AtomicOrdering Ordering,
+ AtomicOrdering SuccessOrdering,
+ AtomicOrdering FailureOrdering,
SynchronizationScope SynchScope) {
Op<0>() = Ptr;
Op<1>() = Cmp;
Op<2>() = NewVal;
- setOrdering(Ordering);
+ setSuccessOrdering(SuccessOrdering);
+ setFailureOrdering(FailureOrdering);
setSynchScope(SynchScope);
assert(getOperand(0) && getOperand(1) && getOperand(2) &&
@@ -1234,30 +1236,38 @@ void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
assert(getOperand(2)->getType() ==
cast<PointerType>(getOperand(0)->getType())->getElementType()
&& "Ptr must be a pointer to NewVal type!");
- assert(Ordering != NotAtomic &&
+ assert(SuccessOrdering != NotAtomic &&
+ "AtomicCmpXchg instructions must be atomic!");
+ assert(FailureOrdering != NotAtomic &&
"AtomicCmpXchg instructions must be atomic!");
+ assert(SuccessOrdering >= FailureOrdering &&
+ "AtomicCmpXchg success ordering must be at least as strong as fail");
+ assert(FailureOrdering != Release && FailureOrdering != AcquireRelease &&
+ "AtomicCmpXchg failure ordering cannot include release semantics");
}
AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
- AtomicOrdering Ordering,
+ AtomicOrdering SuccessOrdering,
+ AtomicOrdering FailureOrdering,
SynchronizationScope SynchScope,
Instruction *InsertBefore)
: Instruction(Cmp->getType(), AtomicCmpXchg,
OperandTraits<AtomicCmpXchgInst>::op_begin(this),
OperandTraits<AtomicCmpXchgInst>::operands(this),
InsertBefore) {
- Init(Ptr, Cmp, NewVal, Ordering, SynchScope);
+ Init(Ptr, Cmp, NewVal, SuccessOrdering, FailureOrdering, SynchScope);
}
AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
- AtomicOrdering Ordering,
+ AtomicOrdering SuccessOrdering,
+ AtomicOrdering FailureOrdering,
SynchronizationScope SynchScope,
BasicBlock *InsertAtEnd)
: Instruction(Cmp->getType(), AtomicCmpXchg,
OperandTraits<AtomicCmpXchgInst>::op_begin(this),
OperandTraits<AtomicCmpXchgInst>::operands(this),
InsertAtEnd) {
- Init(Ptr, Cmp, NewVal, Ordering, SynchScope);
+ Init(Ptr, Cmp, NewVal, SuccessOrdering, FailureOrdering, SynchScope);
}
//===----------------------------------------------------------------------===//
@@ -3596,7 +3606,8 @@ StoreInst *StoreInst::clone_impl() const {
AtomicCmpXchgInst *AtomicCmpXchgInst::clone_impl() const {
AtomicCmpXchgInst *Result =
new AtomicCmpXchgInst(getOperand(0), getOperand(1), getOperand(2),
- getOrdering(), getSynchScope());
+ getSuccessOrdering(), getFailureOrdering(),
+ getSynchScope());
Result->setVolatile(isVolatile());
return Result;
}
diff --git a/lib/IR/Verifier.cpp b/lib/IR/Verifier.cpp
index 158601226a..4bdc1c13d0 100644
--- a/lib/IR/Verifier.cpp
+++ b/lib/IR/Verifier.cpp
@@ -1829,10 +1829,23 @@ void Verifier::visitAllocaInst(AllocaInst &AI) {
}
void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
- Assert1(CXI.getOrdering() != NotAtomic,
+
+ // FIXME: more conditions???
+ Assert1(CXI.getSuccessOrdering() != NotAtomic,
+ "cmpxchg instructions must be atomic.", &CXI);
+ Assert1(CXI.getFailureOrdering() != NotAtomic,
"cmpxchg instructions must be atomic.", &CXI);
- Assert1(CXI.getOrdering() != Unordered,
+ Assert1(CXI.getSuccessOrdering() != Unordered,
"cmpxchg instructions cannot be unordered.", &CXI);
+ Assert1(CXI.getFailureOrdering() != Unordered,
+ "cmpxchg instructions cannot be unordered.", &CXI);
+ Assert1(CXI.getSuccessOrdering() >= CXI.getFailureOrdering(),
+ "cmpxchg instructions be at least as constrained on success as fail",
+ &CXI);
+ Assert1(CXI.getFailureOrdering() != Release &&
+ CXI.getFailureOrdering() != AcquireRelease,
+ "cmpxchg failure ordering cannot include release semantics", &CXI);
+
PointerType *PTy = dyn_cast<PointerType>(CXI.getOperand(0)->getType());
Assert1(PTy, "First cmpxchg operand must be a pointer.", &CXI);
Type *ElTy = PTy->getElementType();
diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp
index 691961ef2a..aa7ca7f91f 100644
--- a/lib/Target/ARM/ARMISelLowering.cpp
+++ b/lib/Target/ARM/ARMISelLowering.cpp
@@ -6054,10 +6054,10 @@ ReplaceATOMIC_OP_64(SDNode *Node, SmallVectorImpl<SDValue>& Results,
Node->getOperand(i), DAG.getIntPtrConstant(1)));
}
SDVTList Tys = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
- SDValue Result =
- DAG.getAtomic(Node->getOpcode(), dl, MVT::i64, Tys, Ops.data(), Ops.size(),
- cast<MemSDNode>(Node)->getMemOperand(), AN->getOrdering(),
- AN->getSynchScope());
+ SDValue Result = DAG.getAtomic(
+ Node->getOpcode(), dl, MVT::i64, Tys, Ops.data(), Ops.size(),
+ cast<MemSDNode>(Node)->getMemOperand(), AN->getSuccessOrdering(),
+ AN->getFailureOrdering(), AN->getSynchScope());
SDValue OpsF[] = { Result.getValue(0), Result.getValue(1) };
Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, OpsF, 2));
Results.push_back(Result.getValue(2));
diff --git a/lib/Target/CppBackend/CPPBackend.cpp b/lib/Target/CppBackend/CPPBackend.cpp
index b13709914b..31585d9296 100644
--- a/lib/Target/CppBackend/CPPBackend.cpp
+++ b/lib/Target/CppBackend/CPPBackend.cpp
@@ -1567,12 +1567,16 @@ void CppWriter::printInstruction(const Instruction *I,
}
case Instruction::AtomicCmpXchg: {
const AtomicCmpXchgInst *cxi = cast<AtomicCmpXchgInst>(I);
- StringRef Ordering = ConvertAtomicOrdering(cxi->getOrdering());
+ StringRef SuccessOrdering =
+ ConvertAtomicOrdering(cxi->getSuccessOrdering());
+ StringRef FailureOrdering =
+ ConvertAtomicOrdering(cxi->getFailureOrdering());
StringRef CrossThread = ConvertAtomicSynchScope(cxi->getSynchScope());
Out << "AtomicCmpXchgInst* " << iName
<< " = new AtomicCmpXchgInst("
<< opNames[0] << ", " << opNames[1] << ", " << opNames[2] << ", "
- << Ordering << ", " << CrossThread << ", " << bbname
+ << SuccessOrdering << ", " << FailureOrdering << ", "
+ << CrossThread << ", " << bbname
<< ");";
nl(Out) << iName << "->setName(\"";
printEscapedString(cxi->getName());
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index 07c83e8b29..f651205bf3 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -13805,6 +13805,7 @@ static void ReplaceATOMIC_LOAD(SDNode *Node,
Node->getOperand(1), Zero, Zero,
cast<AtomicSDNode>(Node)->getMemOperand(),
cast<AtomicSDNode>(Node)->getOrdering(),
+ cast<AtomicSDNode>(Node)->getOrdering(),
cast<AtomicSDNode>(Node)->getSynchScope());
Results.push_back(Swap.getValue(0));
Results.push_back(Swap.getValue(1));
diff --git a/lib/Transforms/IPO/MergeFunctions.cpp b/lib/Transforms/IPO/MergeFunctions.cpp
index 64f63773f6..ff16b1ecdf 100644
--- a/lib/Transforms/IPO/MergeFunctions.cpp
+++ b/lib/Transforms/IPO/MergeFunctions.cpp
@@ -341,7 +341,10 @@ bool FunctionComparator::isEquivalentOperation(const Instruction *I1,
FI->getSynchScope() == cast<FenceInst>(I2)->getSynchScope();
if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I1))
return CXI->isVolatile() == cast<AtomicCmpXchgInst>(I2)->isVolatile() &&
- CXI->getOrdering() == cast<AtomicCmpXchgInst>(I2)->getOrdering() &&
+ CXI->getSuccessOrdering() ==
+ cast<AtomicCmpXchgInst>(I2)->getSuccessOrdering() &&
+ CXI->getFailureOrdering() ==
+ cast<AtomicCmpXchgInst>(I2)->getFailureOrdering() &&
CXI->getSynchScope() == cast<AtomicCmpXchgInst>(I2)->getSynchScope();
if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I1))
return RMWI->getOperation() == cast<AtomicRMWInst>(I2)->getOperation() &&
diff --git a/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index b4ae443475..b158f1f10a 100644
--- a/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -1088,7 +1088,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
void visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) {
handleCASOrRMW(I);
- I.setOrdering(addReleaseOrdering(I.getOrdering()));
+ I.setSuccessOrdering(addReleaseOrdering(I.getSuccessOrdering()));
}
// Vector manipulation.
diff --git a/lib/Transforms/Instrumentation/ThreadSanitizer.cpp b/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
index fed7508dbc..5ffb17cbf3 100644
--- a/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
+++ b/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
@@ -446,21 +446,6 @@ static ConstantInt *createOrdering(IRBuilder<> *IRB, AtomicOrdering ord) {
return IRB->getInt32(v);
}
-static ConstantInt *createFailOrdering(IRBuilder<> *IRB, AtomicOrdering ord) {
- uint32_t v = 0;
- switch (ord) {
- case NotAtomic: assert(false);
- case Unordered: // Fall-through.
- case Monotonic: v = 0; break;
- // case Consume: v = 1; break; // Not specified yet.
- case Acquire: v = 2; break;
- case Release: v = 0; break;
- case AcquireRelease: v = 2; break;
- case SequentiallyConsistent: v = 5; break;
- }
- return IRB->getInt32(v);
-}
-
// If a memset intrinsic gets inlined by the code gen, we will miss races on it.
// So, we either need to ensure the intrinsic is not inlined, or instrument it.
// We do not instrument memset/memmove/memcpy intrinsics (too complicated),
@@ -556,8 +541,8 @@ bool ThreadSanitizer::instrumentAtomic(Instruction *I) {
Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
IRB.CreateIntCast(CASI->getCompareOperand(), Ty, false),
IRB.CreateIntCast(CASI->getNewValOperand(), Ty, false),
- createOrdering(&IRB, CASI->getOrdering()),
- createFailOrdering(&IRB, CASI->getOrdering())};
+ createOrdering(&IRB, CASI->getSuccessOrdering()),
+ createOrdering(&IRB, CASI->getFailureOrdering())};
CallInst *C = CallInst::Create(TsanAtomicCAS[Idx], ArrayRef<Value*>(Args));
ReplaceInstWithInst(I, C);
} else if (FenceInst *FI = dyn_cast<FenceInst>(I)) {