summaryrefslogtreecommitdiff
path: root/lib/VMCore/AsmWriter.cpp
diff options
context:
space:
mode:
authorEli Friedman <eli.friedman@gmail.com>2011-07-28 21:48:00 +0000
committerEli Friedman <eli.friedman@gmail.com>2011-07-28 21:48:00 +0000
commitff03048c1350fcc4fda1ef6d6c57252f3a950854 (patch)
treeb2acebff9cd7d5210b5f461d00ceb7d95db32550 /lib/VMCore/AsmWriter.cpp
parent7f1cce55b5da7ef53269426e3c64586177384dca (diff)
downloadllvm-ff03048c1350fcc4fda1ef6d6c57252f3a950854.tar.gz
llvm-ff03048c1350fcc4fda1ef6d6c57252f3a950854.tar.bz2
llvm-ff03048c1350fcc4fda1ef6d6c57252f3a950854.tar.xz
LangRef and basic memory-representation/reading/writing for 'cmpxchg' and
'atomicrmw' instructions, which allow representing all the current atomic rmw intrinsics. The allowed operands for these instructions are heavily restricted at the moment; we can probably loosen it a bit, but supporting general first-class types (where it makes sense) might get a bit complicated, given how SelectionDAG works. As an initial cut, these operations do not support specifying an alignment, but it would be possible to add if we think it's useful. Specifying an alignment lower than the natural alignment would be essentially impossible to support on anything other than x86, but specifying a greater alignment would be possible. I can't think of any useful optimizations which would use that information, but maybe someone else has ideas. Optimizer/codegen support coming soon. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@136404 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/VMCore/AsmWriter.cpp')
-rw-r--r--lib/VMCore/AsmWriter.cpp25
1 files changed, 25 insertions, 0 deletions
diff --git a/lib/VMCore/AsmWriter.cpp b/lib/VMCore/AsmWriter.cpp
index e6cd418c32..e3e2484def 100644
--- a/lib/VMCore/AsmWriter.cpp
+++ b/lib/VMCore/AsmWriter.cpp
@@ -658,6 +658,23 @@ static const char *getPredicateText(unsigned predicate) {
return pred;
}
+static void writeAtomicRMWOperation(raw_ostream &Out,
+ AtomicRMWInst::BinOp Op) {
+ switch (Op) {
+ default: Out << " <unknown operation " << Op << ">"; break;
+ case AtomicRMWInst::Xchg: Out << " xchg"; break;
+ case AtomicRMWInst::Add: Out << " add"; break;
+ case AtomicRMWInst::Sub: Out << " sub"; break;
+ case AtomicRMWInst::And: Out << " and"; break;
+ case AtomicRMWInst::Nand: Out << " nand"; break;
+ case AtomicRMWInst::Or: Out << " or"; break;
+ case AtomicRMWInst::Xor: Out << " xor"; break;
+ case AtomicRMWInst::Max: Out << " max"; break;
+ case AtomicRMWInst::Min: Out << " min"; break;
+ case AtomicRMWInst::UMax: Out << " umax"; break;
+ case AtomicRMWInst::UMin: Out << " umin"; break;
+ }
+}
static void WriteOptimizationInfo(raw_ostream &Out, const User *U) {
if (const OverflowingBinaryOperator *OBO =
@@ -1670,6 +1687,10 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
if (const CmpInst *CI = dyn_cast<CmpInst>(&I))
Out << ' ' << getPredicateText(CI->getPredicate());
+ // Print out the atomicrmw operation
+ if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(&I))
+ writeAtomicRMWOperation(Out, RMWI->getOperation());
+
// Print out the type of the operands...
const Value *Operand = I.getNumOperands() ? I.getOperand(0) : 0;
@@ -1936,6 +1957,10 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
Out << ", align " << cast<LoadInst>(I).getAlignment();
} else if (isa<StoreInst>(I) && cast<StoreInst>(I).getAlignment()) {
Out << ", align " << cast<StoreInst>(I).getAlignment();
+ } else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(&I)) {
+ writeAtomic(CXI->getOrdering(), CXI->getSynchScope());
+ } else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(&I)) {
+ writeAtomic(RMWI->getOrdering(), RMWI->getSynchScope());
} else if (const FenceInst *FI = dyn_cast<FenceInst>(&I)) {
writeAtomic(FI->getOrdering(), FI->getSynchScope());
}