summaryrefslogtreecommitdiff
path: root/lib/VMCore/AsmWriter.cpp
diff options
context:
space:
mode:
authorEli Friedman <eli.friedman@gmail.com>2011-08-09 23:02:53 +0000
committerEli Friedman <eli.friedman@gmail.com>2011-08-09 23:02:53 +0000
commit21006d40ac9ec7715bca2095451075a83773dc52 (patch)
treea3015838890edfbb23387b5a3dcacb28a51b6e96 /lib/VMCore/AsmWriter.cpp
parent51c9805c4bcca635bc6a854e4a246ebd4258f512 (diff)
downloadllvm-21006d40ac9ec7715bca2095451075a83773dc52.tar.gz
llvm-21006d40ac9ec7715bca2095451075a83773dc52.tar.bz2
llvm-21006d40ac9ec7715bca2095451075a83773dc52.tar.xz
Representation of 'atomic load' and 'atomic store' in IR.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@137170 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/VMCore/AsmWriter.cpp')
-rw-r--r--lib/VMCore/AsmWriter.cpp30
1 files changed, 20 insertions, 10 deletions
diff --git a/lib/VMCore/AsmWriter.cpp b/lib/VMCore/AsmWriter.cpp
index 442e8b8f7f..005f51aae3 100644
--- a/lib/VMCore/AsmWriter.cpp
+++ b/lib/VMCore/AsmWriter.cpp
@@ -1659,14 +1659,18 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
Out << '%' << SlotNum << " = ";
}
+ // If this is an atomic load or store, print out the atomic marker.
+ if ((isa<LoadInst>(I) && cast<LoadInst>(I).isAtomic()) ||
+ (isa<StoreInst>(I) && cast<StoreInst>(I).isAtomic()))
+ Out << "atomic ";
+
// If this is a volatile load or store, print out the volatile marker.
if ((isa<LoadInst>(I) && cast<LoadInst>(I).isVolatile()) ||
- (isa<StoreInst>(I) && cast<StoreInst>(I).isVolatile())) {
- Out << "volatile ";
- } else if (isa<CallInst>(I) && cast<CallInst>(I).isTailCall()) {
- // If this is a call, check if it's a tail call.
+ (isa<StoreInst>(I) && cast<StoreInst>(I).isVolatile()))
+ Out << "volatile ";
+
+ if (isa<CallInst>(I) && cast<CallInst>(I).isTailCall())
Out << "tail ";
- }
// Print out the opcode...
Out << I.getOpcodeName();
@@ -1913,11 +1917,17 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
}
}
- // Print post operand alignment for load/store.
- if (isa<LoadInst>(I) && cast<LoadInst>(I).getAlignment()) {
- Out << ", align " << cast<LoadInst>(I).getAlignment();
- } else if (isa<StoreInst>(I) && cast<StoreInst>(I).getAlignment()) {
- Out << ", align " << cast<StoreInst>(I).getAlignment();
+ // Print atomic ordering/alignment for memory operations
+ if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) {
+ if (LI->isAtomic())
+ writeAtomic(LI->getOrdering(), LI->getSynchScope());
+ if (LI->getAlignment())
+ Out << ", align " << LI->getAlignment();
+ } else if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) {
+ if (SI->isAtomic())
+ writeAtomic(SI->getOrdering(), SI->getSynchScope());
+ if (SI->getAlignment())
+ Out << ", align " << SI->getAlignment();
} else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(&I)) {
writeAtomic(CXI->getOrdering(), CXI->getSynchScope());
} else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(&I)) {