summaryrefslogtreecommitdiff
path: root/lib/IR
diff options
context:
space:
mode:
authorTim Northover <tnorthover@apple.com>2014-03-11 10:48:52 +0000
committerTim Northover <tnorthover@apple.com>2014-03-11 10:48:52 +0000
commitca396e391e13d417605ebed06780d92c88f14a6b (patch)
treea984bbd50ef52c0ad3f0fd66b81d1021b072d16c /lib/IR
parentfb411c8b8ce42689d42b0d8e3a871d81f86a7298 (diff)
downloadllvm-ca396e391e13d417605ebed06780d92c88f14a6b.tar.gz
llvm-ca396e391e13d417605ebed06780d92c88f14a6b.tar.bz2
llvm-ca396e391e13d417605ebed06780d92c88f14a6b.tar.xz
IR: add a second ordering operand to cmpxhg for failure
The syntax for "cmpxchg" should now look something like: cmpxchg i32* %addr, i32 42, i32 3 acquire monotonic where the second ordering argument gives the required semantics in the case that no exchange takes place. It should be no stronger than the first ordering constraint and cannot be either "release" or "acq_rel" (since no store will have taken place). rdar://problem/15996804 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@203559 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/IR')
-rw-r--r--lib/IR/AsmWriter.cpp34
-rw-r--r--lib/IR/AsmWriter.h3
-rw-r--r--lib/IR/Instruction.cpp10
-rw-r--r--lib/IR/Instructions.cpp27
-rw-r--r--lib/IR/Verifier.cpp17
5 files changed, 78 insertions, 13 deletions
diff --git a/lib/IR/AsmWriter.cpp b/lib/IR/AsmWriter.cpp
index c9ea49b222..a528e5f326 100644
--- a/lib/IR/AsmWriter.cpp
+++ b/lib/IR/AsmWriter.cpp
@@ -1226,6 +1226,37 @@ void AssemblyWriter::writeAtomic(AtomicOrdering Ordering,
}
}
+void AssemblyWriter::writeAtomicCmpXchg(AtomicOrdering SuccessOrdering,
+ AtomicOrdering FailureOrdering,
+ SynchronizationScope SynchScope) {
+ assert(SuccessOrdering != NotAtomic && FailureOrdering != NotAtomic);
+
+ switch (SynchScope) {
+ case SingleThread: Out << " singlethread"; break;
+ case CrossThread: break;
+ }
+
+ switch (SuccessOrdering) {
+ default: Out << " <bad ordering " << int(SuccessOrdering) << ">"; break;
+ case Unordered: Out << " unordered"; break;
+ case Monotonic: Out << " monotonic"; break;
+ case Acquire: Out << " acquire"; break;
+ case Release: Out << " release"; break;
+ case AcquireRelease: Out << " acq_rel"; break;
+ case SequentiallyConsistent: Out << " seq_cst"; break;
+ }
+
+ switch (FailureOrdering) {
+ default: Out << " <bad ordering " << int(FailureOrdering) << ">"; break;
+ case Unordered: Out << " unordered"; break;
+ case Monotonic: Out << " monotonic"; break;
+ case Acquire: Out << " acquire"; break;
+ case Release: Out << " release"; break;
+ case AcquireRelease: Out << " acq_rel"; break;
+ case SequentiallyConsistent: Out << " seq_cst"; break;
+ }
+}
+
void AssemblyWriter::writeParamOperand(const Value *Operand,
AttributeSet Attrs, unsigned Idx) {
if (Operand == 0) {
@@ -2018,7 +2049,8 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
if (SI->getAlignment())
Out << ", align " << SI->getAlignment();
} else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(&I)) {
- writeAtomic(CXI->getOrdering(), CXI->getSynchScope());
+ writeAtomicCmpXchg(CXI->getSuccessOrdering(), CXI->getFailureOrdering(),
+ CXI->getSynchScope());
} else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(&I)) {
writeAtomic(RMWI->getOrdering(), RMWI->getSynchScope());
} else if (const FenceInst *FI = dyn_cast<FenceInst>(&I)) {
diff --git a/lib/IR/AsmWriter.h b/lib/IR/AsmWriter.h
index 222d3a490f..b4ce6de10d 100644
--- a/lib/IR/AsmWriter.h
+++ b/lib/IR/AsmWriter.h
@@ -90,6 +90,9 @@ public:
void writeOperand(const Value *Op, bool PrintType);
void writeParamOperand(const Value *Operand, AttributeSet Attrs,unsigned Idx);
void writeAtomic(AtomicOrdering Ordering, SynchronizationScope SynchScope);
+ void writeAtomicCmpXchg(AtomicOrdering SuccessOrdering,
+ AtomicOrdering FailureOrdering,
+ SynchronizationScope SynchScope);
void writeAllMDNodes();
void writeMDNode(unsigned Slot, const MDNode *Node);
diff --git a/lib/IR/Instruction.cpp b/lib/IR/Instruction.cpp
index bd7a62e83d..d31a92e031 100644
--- a/lib/IR/Instruction.cpp
+++ b/lib/IR/Instruction.cpp
@@ -313,7 +313,10 @@ bool Instruction::isIdenticalToWhenDefined(const Instruction *I) const {
FI->getSynchScope() == cast<FenceInst>(FI)->getSynchScope();
if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(this))
return CXI->isVolatile() == cast<AtomicCmpXchgInst>(I)->isVolatile() &&
- CXI->getOrdering() == cast<AtomicCmpXchgInst>(I)->getOrdering() &&
+ CXI->getSuccessOrdering() ==
+ cast<AtomicCmpXchgInst>(I)->getSuccessOrdering() &&
+ CXI->getFailureOrdering() ==
+ cast<AtomicCmpXchgInst>(I)->getFailureOrdering() &&
CXI->getSynchScope() == cast<AtomicCmpXchgInst>(I)->getSynchScope();
if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(this))
return RMWI->getOperation() == cast<AtomicRMWInst>(I)->getOperation() &&
@@ -384,7 +387,10 @@ bool Instruction::isSameOperationAs(const Instruction *I,
FI->getSynchScope() == cast<FenceInst>(I)->getSynchScope();
if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(this))
return CXI->isVolatile() == cast<AtomicCmpXchgInst>(I)->isVolatile() &&
- CXI->getOrdering() == cast<AtomicCmpXchgInst>(I)->getOrdering() &&
+ CXI->getSuccessOrdering() ==
+ cast<AtomicCmpXchgInst>(I)->getSuccessOrdering() &&
+ CXI->getFailureOrdering() ==
+ cast<AtomicCmpXchgInst>(I)->getFailureOrdering() &&
CXI->getSynchScope() == cast<AtomicCmpXchgInst>(I)->getSynchScope();
if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(this))
return RMWI->getOperation() == cast<AtomicRMWInst>(I)->getOperation() &&
diff --git a/lib/IR/Instructions.cpp b/lib/IR/Instructions.cpp
index d874411ccd..3aa8413541 100644
--- a/lib/IR/Instructions.cpp
+++ b/lib/IR/Instructions.cpp
@@ -1216,12 +1216,14 @@ void StoreInst::setAlignment(unsigned Align) {
//===----------------------------------------------------------------------===//
void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
- AtomicOrdering Ordering,
+ AtomicOrdering SuccessOrdering,
+ AtomicOrdering FailureOrdering,
SynchronizationScope SynchScope) {
Op<0>() = Ptr;
Op<1>() = Cmp;
Op<2>() = NewVal;
- setOrdering(Ordering);
+ setSuccessOrdering(SuccessOrdering);
+ setFailureOrdering(FailureOrdering);
setSynchScope(SynchScope);
assert(getOperand(0) && getOperand(1) && getOperand(2) &&
@@ -1234,30 +1236,38 @@ void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
assert(getOperand(2)->getType() ==
cast<PointerType>(getOperand(0)->getType())->getElementType()
&& "Ptr must be a pointer to NewVal type!");
- assert(Ordering != NotAtomic &&
+ assert(SuccessOrdering != NotAtomic &&
+ "AtomicCmpXchg instructions must be atomic!");
+ assert(FailureOrdering != NotAtomic &&
"AtomicCmpXchg instructions must be atomic!");
+ assert(SuccessOrdering >= FailureOrdering &&
+ "AtomicCmpXchg success ordering must be at least as strong as fail");
+ assert(FailureOrdering != Release && FailureOrdering != AcquireRelease &&
+ "AtomicCmpXchg failure ordering cannot include release semantics");
}
AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
- AtomicOrdering Ordering,
+ AtomicOrdering SuccessOrdering,
+ AtomicOrdering FailureOrdering,
SynchronizationScope SynchScope,
Instruction *InsertBefore)
: Instruction(Cmp->getType(), AtomicCmpXchg,
OperandTraits<AtomicCmpXchgInst>::op_begin(this),
OperandTraits<AtomicCmpXchgInst>::operands(this),
InsertBefore) {
- Init(Ptr, Cmp, NewVal, Ordering, SynchScope);
+ Init(Ptr, Cmp, NewVal, SuccessOrdering, FailureOrdering, SynchScope);
}
AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
- AtomicOrdering Ordering,
+ AtomicOrdering SuccessOrdering,
+ AtomicOrdering FailureOrdering,
SynchronizationScope SynchScope,
BasicBlock *InsertAtEnd)
: Instruction(Cmp->getType(), AtomicCmpXchg,
OperandTraits<AtomicCmpXchgInst>::op_begin(this),
OperandTraits<AtomicCmpXchgInst>::operands(this),
InsertAtEnd) {
- Init(Ptr, Cmp, NewVal, Ordering, SynchScope);
+ Init(Ptr, Cmp, NewVal, SuccessOrdering, FailureOrdering, SynchScope);
}
//===----------------------------------------------------------------------===//
@@ -3596,7 +3606,8 @@ StoreInst *StoreInst::clone_impl() const {
AtomicCmpXchgInst *AtomicCmpXchgInst::clone_impl() const {
AtomicCmpXchgInst *Result =
new AtomicCmpXchgInst(getOperand(0), getOperand(1), getOperand(2),
- getOrdering(), getSynchScope());
+ getSuccessOrdering(), getFailureOrdering(),
+ getSynchScope());
Result->setVolatile(isVolatile());
return Result;
}
diff --git a/lib/IR/Verifier.cpp b/lib/IR/Verifier.cpp
index 158601226a..4bdc1c13d0 100644
--- a/lib/IR/Verifier.cpp
+++ b/lib/IR/Verifier.cpp
@@ -1829,10 +1829,23 @@ void Verifier::visitAllocaInst(AllocaInst &AI) {
}
void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
- Assert1(CXI.getOrdering() != NotAtomic,
+
+ // FIXME: more conditions???
+ Assert1(CXI.getSuccessOrdering() != NotAtomic,
+ "cmpxchg instructions must be atomic.", &CXI);
+ Assert1(CXI.getFailureOrdering() != NotAtomic,
"cmpxchg instructions must be atomic.", &CXI);
- Assert1(CXI.getOrdering() != Unordered,
+ Assert1(CXI.getSuccessOrdering() != Unordered,
"cmpxchg instructions cannot be unordered.", &CXI);
+ Assert1(CXI.getFailureOrdering() != Unordered,
+ "cmpxchg instructions cannot be unordered.", &CXI);
+ Assert1(CXI.getSuccessOrdering() >= CXI.getFailureOrdering(),
+ "cmpxchg instructions be at least as constrained on success as fail",
+ &CXI);
+ Assert1(CXI.getFailureOrdering() != Release &&
+ CXI.getFailureOrdering() != AcquireRelease,
+ "cmpxchg failure ordering cannot include release semantics", &CXI);
+
PointerType *PTy = dyn_cast<PointerType>(CXI.getOperand(0)->getType());
Assert1(PTy, "First cmpxchg operand must be a pointer.", &CXI);
Type *ElTy = PTy->getElementType();