summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEli Friedman <eli.friedman@gmail.com>2011-08-12 22:50:01 +0000
committerEli Friedman <eli.friedman@gmail.com>2011-08-12 22:50:01 +0000
commitf03bb260c90ad013aa4e55af36382875011c95b8 (patch)
tree6d554ebcc06bd6d3509a7808029994c894d002d3
parent10342123adec62151bf9060493dd13583c67ae52 (diff)
downloadllvm-f03bb260c90ad013aa4e55af36382875011c95b8.tar.gz
llvm-f03bb260c90ad013aa4e55af36382875011c95b8.tar.bz2
llvm-f03bb260c90ad013aa4e55af36382875011c95b8.tar.xz
Move "atomic" and "volatile" designations on instructions after the opcode
of the instruction. Note that this change affects the existing non-atomic load and store instructions; the parser now accepts both forms, and the change is noted in the release notes. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@137527 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--docs/LangRef.html12
-rw-r--r--docs/ReleaseNotes.html4
-rw-r--r--lib/AsmParser/LLParser.cpp99
-rw-r--r--lib/AsmParser/LLParser.h10
-rw-r--r--lib/VMCore/AsmWriter.cpp22
-rw-r--r--test/Assembler/atomic.ll26
-rw-r--r--test/Transforms/DeadArgElim/deadexternal.ll2
-rw-r--r--test/Transforms/DeadStoreElimination/simple.ll8
-rw-r--r--test/Transforms/EarlyCSE/basic.ll10
-rw-r--r--test/Transforms/GlobalOpt/2008-01-29-VolatileGlobal.ll2
-rw-r--r--test/Transforms/InstCombine/2008-04-28-VolatileStore.ll2
-rw-r--r--test/Transforms/InstCombine/2008-04-29-VolatileLoadDontMerge.ll2
-rw-r--r--test/Transforms/InstCombine/2008-04-29-VolatileLoadMerge.ll2
-rw-r--r--test/Transforms/InstCombine/2008-07-08-VolatileLoadMerge.ll2
-rw-r--r--test/Transforms/InstCombine/extractvalue.ll2
-rw-r--r--test/Transforms/InstCombine/intrinsics.ll12
-rw-r--r--test/Transforms/InstCombine/volatile_store.ll4
-rw-r--r--test/Transforms/JumpThreading/no-irreducible-loops.ll2
-rw-r--r--test/Transforms/LICM/2007-05-22-VolatileSink.ll2
-rw-r--r--test/Transforms/LICM/2011-04-06-HoistMissedASTUpdate.ll2
-rw-r--r--test/Transforms/LICM/scalar_promote.ll2
-rw-r--r--test/Transforms/ObjCARC/contract-storestrong.ll4
-rw-r--r--test/Transforms/ScalarRepl/volatile.ll4
-rw-r--r--test/Transforms/SimplifyCFG/trapping-load-unreachable.ll8
-rw-r--r--test/Transforms/SimplifyLibCalls/memcmp.ll22
25 files changed, 166 insertions, 101 deletions
diff --git a/docs/LangRef.html b/docs/LangRef.html
index 725691c14f..95cbad06f6 100644
--- a/docs/LangRef.html
+++ b/docs/LangRef.html
@@ -4572,8 +4572,8 @@ that the invoke/unwind semantics are likely to change in future versions.</p>
<h5>Syntax:</h5>
<pre>
- &lt;result&gt; = [volatile] load &lt;ty&gt;* &lt;pointer&gt;[, align &lt;alignment&gt;][, !nontemporal !&lt;index&gt;]
- &lt;result&gt; = atomic [volatile] load &lt;ty&gt;* &lt;pointer&gt; [singlethread] &lt;ordering&gt;, align &lt;alignment&gt;
+ &lt;result&gt; = load [volatile] &lt;ty&gt;* &lt;pointer&gt;[, align &lt;alignment&gt;][, !nontemporal !&lt;index&gt;]
+ &lt;result&gt; = load atomic [volatile] &lt;ty&gt;* &lt;pointer&gt; [singlethread] &lt;ordering&gt;, align &lt;alignment&gt;
!&lt;index&gt; = !{ i32 1 }
</pre>
@@ -4644,8 +4644,8 @@ that the invoke/unwind semantics are likely to change in future versions.</p>
<h5>Syntax:</h5>
<pre>
- [volatile] store &lt;ty&gt; &lt;value&gt;, &lt;ty&gt;* &lt;pointer&gt;[, align &lt;alignment&gt;][, !nontemporal !&lt;index&gt;] <i>; yields {void}</i>
- atomic [volatile] store &lt;ty&gt; &lt;value&gt;, &lt;ty&gt;* &lt;pointer&gt; [singlethread] &lt;ordering&gt;, align &lt;alignment&gt; <i>; yields {void}</i>
+ store [volatile] &lt;ty&gt; &lt;value&gt;, &lt;ty&gt;* &lt;pointer&gt;[, align &lt;alignment&gt;][, !nontemporal !&lt;index&gt;] <i>; yields {void}</i>
+ store atomic [volatile] &lt;ty&gt; &lt;value&gt;, &lt;ty&gt;* &lt;pointer&gt; [singlethread] &lt;ordering&gt;, align &lt;alignment&gt; <i>; yields {void}</i>
</pre>
<h5>Overview:</h5>
@@ -4774,7 +4774,7 @@ thread. (This is useful for interacting with signal handlers.)</p>
<h5>Syntax:</h5>
<pre>
- [volatile] cmpxchg &lt;ty&gt;* &lt;pointer&gt;, &lt;ty&gt; &lt;cmp&gt;, &lt;ty&gt; &lt;new&gt; [singlethread] &lt;ordering&gt; <i>; yields {ty}</i>
+ cmpxchg [volatile] &lt;ty&gt;* &lt;pointer&gt;, &lt;ty&gt; &lt;cmp&gt;, &lt;ty&gt; &lt;new&gt; [singlethread] &lt;ordering&gt; <i>; yields {ty}</i>
</pre>
<h5>Overview:</h5>
@@ -4857,7 +4857,7 @@ done:
<h5>Syntax:</h5>
<pre>
- [volatile] atomicrmw &lt;operation&gt; &lt;ty&gt;* &lt;pointer&gt;, &lt;ty&gt; &lt;value&gt; [singlethread] &lt;ordering&gt; <i>; yields {ty}</i>
+ atomicrmw [volatile] &lt;operation&gt; &lt;ty&gt;* &lt;pointer&gt;, &lt;ty&gt; &lt;value&gt; [singlethread] &lt;ordering&gt; <i>; yields {ty}</i>
</pre>
<h5>Overview:</h5>
diff --git a/docs/ReleaseNotes.html b/docs/ReleaseNotes.html
index 726729aebf..258555496c 100644
--- a/docs/ReleaseNotes.html
+++ b/docs/ReleaseNotes.html
@@ -583,6 +583,10 @@ it run faster:</p>
<ul>
<li>The <code>LowerSetJmp</code> pass wasn't used effectively by any
target and has been removed.</li>
+ <li>The syntax of volatile loads and stores in IR has been changed to
+ "<code>load volatile</code>"/"<code>store volatile</code>". The old
+ syntax ("<code>volatile load</code>"/"<code>volatile store</code>")
+ is still accepted, but is now considered deprecated.</li>
</ul>
</div>
diff --git a/lib/AsmParser/LLParser.cpp b/lib/AsmParser/LLParser.cpp
index a5412a6764..c865afd915 100644
--- a/lib/AsmParser/LLParser.cpp
+++ b/lib/AsmParser/LLParser.cpp
@@ -2950,27 +2950,17 @@ int LLParser::ParseInstruction(Instruction *&Inst, BasicBlock *BB,
case lltok::kw_tail: return ParseCall(Inst, PFS, true);
// Memory.
case lltok::kw_alloca: return ParseAlloc(Inst, PFS);
- case lltok::kw_load: return ParseLoad(Inst, PFS, false, false);
- case lltok::kw_store: return ParseStore(Inst, PFS, false, false);
- case lltok::kw_cmpxchg: return ParseCmpXchg(Inst, PFS, false);
- case lltok::kw_atomicrmw: return ParseAtomicRMW(Inst, PFS, false);
+ case lltok::kw_load: return ParseLoad(Inst, PFS, false);
+ case lltok::kw_store: return ParseStore(Inst, PFS, false);
+ case lltok::kw_cmpxchg: return ParseCmpXchg(Inst, PFS);
+ case lltok::kw_atomicrmw: return ParseAtomicRMW(Inst, PFS);
case lltok::kw_fence: return ParseFence(Inst, PFS);
- case lltok::kw_atomic: {
- bool isVolatile = EatIfPresent(lltok::kw_volatile);
- if (EatIfPresent(lltok::kw_load))
- return ParseLoad(Inst, PFS, true, isVolatile);
- else if (EatIfPresent(lltok::kw_store))
- return ParseStore(Inst, PFS, true, isVolatile);
- }
case lltok::kw_volatile:
+ // For compatibility; canonical location is after load
if (EatIfPresent(lltok::kw_load))
- return ParseLoad(Inst, PFS, false, true);
+ return ParseLoad(Inst, PFS, true);
else if (EatIfPresent(lltok::kw_store))
- return ParseStore(Inst, PFS, false, true);
- else if (EatIfPresent(lltok::kw_cmpxchg))
- return ParseCmpXchg(Inst, PFS, true);
- else if (EatIfPresent(lltok::kw_atomicrmw))
- return ParseAtomicRMW(Inst, PFS, true);
+ return ParseStore(Inst, PFS, true);
else
return TokError("expected 'load' or 'store'");
case lltok::kw_getelementptr: return ParseGetElementPtr(Inst, PFS);
@@ -3694,16 +3684,34 @@ int LLParser::ParseAlloc(Instruction *&Inst, PerFunctionState &PFS) {
}
/// ParseLoad
-/// ::= 'volatile'? 'load' TypeAndValue (',' 'align' i32)?
-// ::= 'atomic' 'volatile'? 'load' TypeAndValue
-// 'singlethread'? AtomicOrdering (',' 'align' i32)?
+/// ::= 'load' 'volatile'? TypeAndValue (',' 'align' i32)?
+/// ::= 'load' 'atomic' 'volatile'? TypeAndValue
+/// 'singlethread'? AtomicOrdering (',' 'align' i32)?
+/// Compatibility:
+/// ::= 'volatile' 'load' TypeAndValue (',' 'align' i32)?
int LLParser::ParseLoad(Instruction *&Inst, PerFunctionState &PFS,
- bool isAtomic, bool isVolatile) {
+ bool isVolatile) {
Value *Val; LocTy Loc;
unsigned Alignment = 0;
bool AteExtraComma = false;
+ bool isAtomic = false;
AtomicOrdering Ordering = NotAtomic;
SynchronizationScope Scope = CrossThread;
+
+ if (Lex.getKind() == lltok::kw_atomic) {
+ if (isVolatile)
+ return TokError("mixing atomic with old volatile placement");
+ isAtomic = true;
+ Lex.Lex();
+ }
+
+ if (Lex.getKind() == lltok::kw_volatile) {
+ if (isVolatile)
+ return TokError("duplicate volatile before and after store");
+ isVolatile = true;
+ Lex.Lex();
+ }
+
if (ParseTypeAndValue(Val, Loc, PFS) ||
ParseScopeAndOrdering(isAtomic, Scope, Ordering) ||
ParseOptionalCommaAlign(Alignment, AteExtraComma))
@@ -3722,16 +3730,35 @@ int LLParser::ParseLoad(Instruction *&Inst, PerFunctionState &PFS,
}
/// ParseStore
-/// ::= 'volatile'? 'store' TypeAndValue ',' TypeAndValue (',' 'align' i32)?
-/// ::= 'atomic' 'volatile'? 'store' TypeAndValue ',' TypeAndValue
+
+/// ::= 'store' 'volatile'? TypeAndValue ',' TypeAndValue (',' 'align' i32)?
+/// ::= 'store' 'atomic' 'volatile'? TypeAndValue ',' TypeAndValue
/// 'singlethread'? AtomicOrdering (',' 'align' i32)?
+/// Compatibility:
+/// ::= 'volatile' 'store' TypeAndValue ',' TypeAndValue (',' 'align' i32)?
int LLParser::ParseStore(Instruction *&Inst, PerFunctionState &PFS,
- bool isAtomic, bool isVolatile) {
+ bool isVolatile) {
Value *Val, *Ptr; LocTy Loc, PtrLoc;
unsigned Alignment = 0;
bool AteExtraComma = false;
+ bool isAtomic = false;
AtomicOrdering Ordering = NotAtomic;
SynchronizationScope Scope = CrossThread;
+
+ if (Lex.getKind() == lltok::kw_atomic) {
+ if (isVolatile)
+ return TokError("mixing atomic with old volatile placement");
+ isAtomic = true;
+ Lex.Lex();
+ }
+
+ if (Lex.getKind() == lltok::kw_volatile) {
+ if (isVolatile)
+ return TokError("duplicate volatile before and after store");
+ isVolatile = true;
+ Lex.Lex();
+ }
+
if (ParseTypeAndValue(Val, Loc, PFS) ||
ParseToken(lltok::comma, "expected ',' after store operand") ||
ParseTypeAndValue(Ptr, PtrLoc, PFS) ||
@@ -3755,14 +3782,18 @@ int LLParser::ParseStore(Instruction *&Inst, PerFunctionState &PFS,
}
/// ParseCmpXchg
-/// ::= 'volatile'? 'cmpxchg' TypeAndValue ',' TypeAndValue ',' TypeAndValue
-/// 'singlethread'? AtomicOrdering
-int LLParser::ParseCmpXchg(Instruction *&Inst, PerFunctionState &PFS,
- bool isVolatile) {
+/// ::= 'cmpxchg' 'volatile'? TypeAndValue ',' TypeAndValue ',' TypeAndValue
+/// 'singlethread'? AtomicOrdering
+int LLParser::ParseCmpXchg(Instruction *&Inst, PerFunctionState &PFS) {
Value *Ptr, *Cmp, *New; LocTy PtrLoc, CmpLoc, NewLoc;
bool AteExtraComma = false;
AtomicOrdering Ordering = NotAtomic;
SynchronizationScope Scope = CrossThread;
+ bool isVolatile = false;
+
+ if (EatIfPresent(lltok::kw_volatile))
+ isVolatile = true;
+
if (ParseTypeAndValue(Ptr, PtrLoc, PFS) ||
ParseToken(lltok::comma, "expected ',' after cmpxchg address") ||
ParseTypeAndValue(Cmp, CmpLoc, PFS) ||
@@ -3794,15 +3825,19 @@ int LLParser::ParseCmpXchg(Instruction *&Inst, PerFunctionState &PFS,
}
/// ParseAtomicRMW
-/// ::= 'volatile'? 'atomicrmw' BinOp TypeAndValue ',' TypeAndValue
-/// 'singlethread'? AtomicOrdering
-int LLParser::ParseAtomicRMW(Instruction *&Inst, PerFunctionState &PFS,
- bool isVolatile) {
+/// ::= 'atomicrmw' 'volatile'? BinOp TypeAndValue ',' TypeAndValue
+/// 'singlethread'? AtomicOrdering
+int LLParser::ParseAtomicRMW(Instruction *&Inst, PerFunctionState &PFS) {
Value *Ptr, *Val; LocTy PtrLoc, ValLoc;
bool AteExtraComma = false;
AtomicOrdering Ordering = NotAtomic;
SynchronizationScope Scope = CrossThread;
+ bool isVolatile = false;
AtomicRMWInst::BinOp Operation;
+
+ if (EatIfPresent(lltok::kw_volatile))
+ isVolatile = true;
+
switch (Lex.getKind()) {
default: return TokError("expected binary operation in atomicrmw");
case lltok::kw_xchg: Operation = AtomicRMWInst::Xchg; break;
diff --git a/lib/AsmParser/LLParser.h b/lib/AsmParser/LLParser.h
index ef4d3dba9e..cbc3c23e86 100644
--- a/lib/AsmParser/LLParser.h
+++ b/lib/AsmParser/LLParser.h
@@ -363,12 +363,10 @@ namespace llvm {
bool ParseLandingPad(Instruction *&I, PerFunctionState &PFS);
bool ParseCall(Instruction *&I, PerFunctionState &PFS, bool isTail);
int ParseAlloc(Instruction *&I, PerFunctionState &PFS);
- int ParseLoad(Instruction *&I, PerFunctionState &PFS,
- bool isAtomic, bool isVolatile);
- int ParseStore(Instruction *&I, PerFunctionState &PFS,
- bool isAtomic, bool isVolatile);
- int ParseCmpXchg(Instruction *&I, PerFunctionState &PFS, bool isVolatile);
- int ParseAtomicRMW(Instruction *&I, PerFunctionState &PFS, bool isVolatile);
+ int ParseLoad(Instruction *&I, PerFunctionState &PFS, bool isVolatile);
+ int ParseStore(Instruction *&I, PerFunctionState &PFS, bool isVolatile);
+ int ParseCmpXchg(Instruction *&I, PerFunctionState &PFS);
+ int ParseAtomicRMW(Instruction *&I, PerFunctionState &PFS);
int ParseFence(Instruction *&I, PerFunctionState &PFS);
int ParseGetElementPtr(Instruction *&I, PerFunctionState &PFS);
int ParseExtractValue(Instruction *&I, PerFunctionState &PFS);
diff --git a/lib/VMCore/AsmWriter.cpp b/lib/VMCore/AsmWriter.cpp
index d166604113..1fc94ba7ca 100644
--- a/lib/VMCore/AsmWriter.cpp
+++ b/lib/VMCore/AsmWriter.cpp
@@ -1658,16 +1658,6 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
else
Out << '%' << SlotNum << " = ";
}
-
- // If this is an atomic load or store, print out the atomic marker.
- if ((isa<LoadInst>(I) && cast<LoadInst>(I).isAtomic()) ||
- (isa<StoreInst>(I) && cast<StoreInst>(I).isAtomic()))
- Out << "atomic ";
-
- // If this is a volatile load or store, print out the volatile marker.
- if ((isa<LoadInst>(I) && cast<LoadInst>(I).isVolatile()) ||
- (isa<StoreInst>(I) && cast<StoreInst>(I).isVolatile()))
- Out << "volatile ";
if (isa<CallInst>(I) && cast<CallInst>(I).isTailCall())
Out << "tail ";
@@ -1675,6 +1665,18 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
// Print out the opcode...
Out << I.getOpcodeName();
+ // If this is an atomic load or store, print out the atomic marker.
+ if ((isa<LoadInst>(I) && cast<LoadInst>(I).isAtomic()) ||
+ (isa<StoreInst>(I) && cast<StoreInst>(I).isAtomic()))
+ Out << " atomic";
+
+ // If this is a volatile operation, print out the volatile marker.
+ if ((isa<LoadInst>(I) && cast<LoadInst>(I).isVolatile()) ||
+ (isa<StoreInst>(I) && cast<StoreInst>(I).isVolatile()) ||
+ (isa<AtomicCmpXchgInst>(I) && cast<AtomicCmpXchgInst>(I).isVolatile()) ||
+ (isa<AtomicRMWInst>(I) && cast<AtomicRMWInst>(I).isVolatile()))
+ Out << " volatile";
+
// Print out optimization information.
WriteOptimizationInfo(Out, &I);
diff --git a/test/Assembler/atomic.ll b/test/Assembler/atomic.ll
new file mode 100644
index 0000000000..fa6f1f40e0
--- /dev/null
+++ b/test/Assembler/atomic.ll
@@ -0,0 +1,26 @@
+; RUN: opt -S < %s | FileCheck %s
+; Basic smoke test for atomic operations.
+
+define void @f(i32* %x) {
+ ; CHECK: load atomic i32* %x unordered, align 4
+ load atomic i32* %x unordered, align 4
+ ; CHECK: load atomic volatile i32* %x singlethread acquire, align 4
+ load atomic volatile i32* %x singlethread acquire, align 4
+ ; CHECK: store atomic i32 3, i32* %x release, align 4
+ store atomic i32 3, i32* %x release, align 4
+ ; CHECK: store atomic volatile i32 3, i32* %x singlethread monotonic, align 4
+ store atomic volatile i32 3, i32* %x singlethread monotonic, align 4
+ ; CHECK: cmpxchg i32* %x, i32 1, i32 0 singlethread monotonic
+ cmpxchg i32* %x, i32 1, i32 0 singlethread monotonic
+ ; CHECK: cmpxchg volatile i32* %x, i32 0, i32 1 acq_rel
+ cmpxchg volatile i32* %x, i32 0, i32 1 acq_rel
+ ; CHECK: atomicrmw add i32* %x, i32 10 seq_cst
+ atomicrmw add i32* %x, i32 10 seq_cst
+ ; CHECK: atomicrmw volatile xchg i32* %x, i32 10 monotonic
+ atomicrmw volatile xchg i32* %x, i32 10 monotonic
+ ; CHECK: fence singlethread release
+ fence singlethread release
+ ; CHECK: fence seq_cst
+ fence seq_cst
+ ret void
+}
diff --git a/test/Transforms/DeadArgElim/deadexternal.ll b/test/Transforms/DeadArgElim/deadexternal.ll
index 8409261313..b2d63ec772 100644
--- a/test/Transforms/DeadArgElim/deadexternal.ll
+++ b/test/Transforms/DeadArgElim/deadexternal.ll
@@ -31,7 +31,7 @@ define void @h() {
entry:
%i = alloca i32, align 4
volatile store i32 10, i32* %i, align 4
-; CHECK: %tmp = volatile load i32* %i, align 4
+; CHECK: %tmp = load volatile i32* %i, align 4
; CHECK-next: call void @f(i32 undef)
%tmp = volatile load i32* %i, align 4
call void @f(i32 %tmp)
diff --git a/test/Transforms/DeadStoreElimination/simple.ll b/test/Transforms/DeadStoreElimination/simple.ll
index 5f143fcd1e..ec2f15737a 100644
--- a/test/Transforms/DeadStoreElimination/simple.ll
+++ b/test/Transforms/DeadStoreElimination/simple.ll
@@ -42,20 +42,20 @@ define i32 @test3(i32* %g_addr) nounwind {
define void @test4(i32* %Q) {
%a = load i32* %Q
- volatile store i32 %a, i32* %Q
+ store volatile i32 %a, i32* %Q
ret void
; CHECK: @test4
; CHECK-NEXT: load i32
-; CHECK-NEXT: volatile store
+; CHECK-NEXT: store volatile
; CHECK-NEXT: ret void
}
define void @test5(i32* %Q) {
- %a = volatile load i32* %Q
+ %a = load volatile i32* %Q
store i32 %a, i32* %Q
ret void
; CHECK: @test5
-; CHECK-NEXT: volatile load
+; CHECK-NEXT: load volatile
; CHECK-NEXT: ret void
}
diff --git a/test/Transforms/EarlyCSE/basic.ll b/test/Transforms/EarlyCSE/basic.ll
index e3c75f97dc..57b1697ff4 100644
--- a/test/Transforms/EarlyCSE/basic.ll
+++ b/test/Transforms/EarlyCSE/basic.ll
@@ -13,21 +13,21 @@ define void @test1(i8 %V, i32 *%P) {
volatile store i32 %C, i32* %P
volatile store i32 %D, i32* %P
; CHECK-NEXT: %C = zext i8 %V to i32
- ; CHECK-NEXT: volatile store i32 %C
- ; CHECK-NEXT: volatile store i32 %C
+ ; CHECK-NEXT: store volatile i32 %C
+ ; CHECK-NEXT: store volatile i32 %C
%E = add i32 %C, %C
%F = add i32 %C, %C
volatile store i32 %E, i32* %P
volatile store i32 %F, i32* %P
; CHECK-NEXT: %E = add i32 %C, %C
- ; CHECK-NEXT: volatile store i32 %E
- ; CHECK-NEXT: volatile store i32 %E
+ ; CHECK-NEXT: store volatile i32 %E
+ ; CHECK-NEXT: store volatile i32 %E
%G = add nuw i32 %C, %C ;; not a CSE with E
volatile store i32 %G, i32* %P
; CHECK-NEXT: %G = add nuw i32 %C, %C
- ; CHECK-NEXT: volatile store i32 %G
+ ; CHECK-NEXT: store volatile i32 %G
ret void
}
diff --git a/test/Transforms/GlobalOpt/2008-01-29-VolatileGlobal.ll b/test/Transforms/GlobalOpt/2008-01-29-VolatileGlobal.ll
index 0c817005c2..a6803abc5d 100644
--- a/test/Transforms/GlobalOpt/2008-01-29-VolatileGlobal.ll
+++ b/test/Transforms/GlobalOpt/2008-01-29-VolatileGlobal.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -globalopt -S | grep {volatile load}
+; RUN: opt < %s -globalopt -S | grep {load volatile}
@t0.1441 = internal global double 0x3FD5555555555555, align 8 ; <double*> [#uses=1]
define double @foo() nounwind {
diff --git a/test/Transforms/InstCombine/2008-04-28-VolatileStore.ll b/test/Transforms/InstCombine/2008-04-28-VolatileStore.ll
index 626564da93..6847f5ed05 100644
--- a/test/Transforms/InstCombine/2008-04-28-VolatileStore.ll
+++ b/test/Transforms/InstCombine/2008-04-28-VolatileStore.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -instcombine -S | grep {volatile store}
+; RUN: opt < %s -instcombine -S | grep {store volatile}
define void @test() {
%votf = alloca <4 x float> ; <<4 x float>*> [#uses=1]
diff --git a/test/Transforms/InstCombine/2008-04-29-VolatileLoadDontMerge.ll b/test/Transforms/InstCombine/2008-04-29-VolatileLoadDontMerge.ll
index f2cc7254a3..a24f3071c9 100644
--- a/test/Transforms/InstCombine/2008-04-29-VolatileLoadDontMerge.ll
+++ b/test/Transforms/InstCombine/2008-04-29-VolatileLoadDontMerge.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -instcombine -S | grep {volatile load} | count 2
+; RUN: opt < %s -instcombine -S | grep {load volatile} | count 2
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
target triple = "i386-apple-darwin8"
@g_1 = internal global i32 0 ; <i32*> [#uses=3]
diff --git a/test/Transforms/InstCombine/2008-04-29-VolatileLoadMerge.ll b/test/Transforms/InstCombine/2008-04-29-VolatileLoadMerge.ll
index 176162d386..5fb11ffb32 100644
--- a/test/Transforms/InstCombine/2008-04-29-VolatileLoadMerge.ll
+++ b/test/Transforms/InstCombine/2008-04-29-VolatileLoadMerge.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -instcombine -S | grep {volatile load} | count 2
+; RUN: opt < %s -instcombine -S | grep {load volatile} | count 2
; PR2262
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
target triple = "i386-apple-darwin8"
diff --git a/test/Transforms/InstCombine/2008-07-08-VolatileLoadMerge.ll b/test/Transforms/InstCombine/2008-07-08-VolatileLoadMerge.ll
index ccfb118276..81044083c6 100644
--- a/test/Transforms/InstCombine/2008-07-08-VolatileLoadMerge.ll
+++ b/test/Transforms/InstCombine/2008-07-08-VolatileLoadMerge.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -instcombine -S | grep {volatile load} | count 2
+; RUN: opt < %s -instcombine -S | grep {load volatile} | count 2
; PR2496
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
target triple = "i386-apple-darwin8"
diff --git a/test/Transforms/InstCombine/extractvalue.ll b/test/Transforms/InstCombine/extractvalue.ll
index 64edc18d45..cf36b8f237 100644
--- a/test/Transforms/InstCombine/extractvalue.ll
+++ b/test/Transforms/InstCombine/extractvalue.ll
@@ -96,7 +96,7 @@ define i32 @nogep-multiuse({i32, i32}* %pair) {
}
; CHECK: define i32 @nogep-volatile
-; CHECK-NEXT: volatile load {{.*}} %pair
+; CHECK-NEXT: load volatile {{.*}} %pair
; CHECK-NEXT: extractvalue
; CHECK-NEXT: ret
define i32 @nogep-volatile({i32, i32}* %pair) {
diff --git a/test/Transforms/InstCombine/intrinsics.ll b/test/Transforms/InstCombine/intrinsics.ll
index 0d84ae4743..f033e51036 100644
--- a/test/Transforms/InstCombine/intrinsics.ll
+++ b/test/Transforms/InstCombine/intrinsics.ll
@@ -152,9 +152,9 @@ entry:
ret void
; CHECK: @powi
; CHECK: %A = fdiv double 1.0{{.*}}, %V
-; CHECK: volatile store double %A,
-; CHECK: volatile store double 1.0
-; CHECK: volatile store double %V
+; CHECK: store volatile double %A,
+; CHECK: store volatile double 1.0
+; CHECK: store volatile double %V
}
define i32 @cttz(i32 %a) {
@@ -194,11 +194,11 @@ entry:
; CHECK: @cmp.simplify
; CHECK-NEXT: entry:
; CHECK-NEXT: %lz.cmp = icmp eq i32 %a, 0
-; CHECK-NEXT: volatile store i1 %lz.cmp, i1* %c
+; CHECK-NEXT: store volatile i1 %lz.cmp, i1* %c
; CHECK-NEXT: %tz.cmp = icmp ne i32 %a, 0
-; CHECK-NEXT: volatile store i1 %tz.cmp, i1* %c
+; CHECK-NEXT: store volatile i1 %tz.cmp, i1* %c
; CHECK-NEXT: %pop.cmp = icmp eq i32 %b, 0
-; CHECK-NEXT: volatile store i1 %pop.cmp, i1* %c
+; CHECK-NEXT: store volatile i1 %pop.cmp, i1* %c
}
diff --git a/test/Transforms/InstCombine/volatile_store.ll b/test/Transforms/InstCombine/volatile_store.ll
index 5316bd772e..0518e5aa02 100644
--- a/test/Transforms/InstCombine/volatile_store.ll
+++ b/test/Transforms/InstCombine/volatile_store.ll
@@ -1,5 +1,5 @@
-; RUN: opt < %s -instcombine -S | grep {volatile store}
-; RUN: opt < %s -instcombine -S | grep {volatile load}
+; RUN: opt < %s -instcombine -S | grep {store volatile}
+; RUN: opt < %s -instcombine -S | grep {load volatile}
@x = weak global i32 0 ; <i32*> [#uses=2]
diff --git a/test/Transforms/JumpThreading/no-irreducible-loops.ll b/test/Transforms/JumpThreading/no-irreducible-loops.ll
index 97276b039a..7c7fe3929a 100644
--- a/test/Transforms/JumpThreading/no-irreducible-loops.ll
+++ b/test/Transforms/JumpThreading/no-irreducible-loops.ll
@@ -1,5 +1,5 @@
; RUN: opt < %s -jump-threading -loop-rotate -instcombine -indvars -loop-unroll -simplifycfg -S -verify-dom-info -verify-loop-info > %t
-; RUN: grep {volatile store} %t | count 3
+; RUN: grep {store volatile} %t | count 3
; RUN: not grep {br label} %t
; Jump threading should not prevent this loop from being unrolled.
diff --git a/test/Transforms/LICM/2007-05-22-VolatileSink.ll b/test/Transforms/LICM/2007-05-22-VolatileSink.ll
index c12e13becc..17383c2ebb 100644
--- a/test/Transforms/LICM/2007-05-22-VolatileSink.ll
+++ b/test/Transforms/LICM/2007-05-22-VolatileSink.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -licm -S | grep {volatile store}
+; RUN: opt < %s -licm -S | grep {store volatile}
; PR1435
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64"
target triple = "i686-apple-darwin8"
diff --git a/test/Transforms/LICM/2011-04-06-HoistMissedASTUpdate.ll b/test/Transforms/LICM/2011-04-06-HoistMissedASTUpdate.ll
index 5774f587f1..fd114f4ccc 100644
--- a/test/Transforms/LICM/2011-04-06-HoistMissedASTUpdate.ll
+++ b/test/Transforms/LICM/2011-04-06-HoistMissedASTUpdate.ll
@@ -15,7 +15,7 @@ for.body4.lr.ph:
br label %for.body4
; CHECK: for.body4:
-; CHECK: volatile load i16* @g_39
+; CHECK: load volatile i16* @g_39
for.body4:
%l_612.11 = phi i32* [ undef, %for.body4.lr.ph ], [ %call19, %for.body4 ]
diff --git a/test/Transforms/LICM/scalar_promote.ll b/test/Transforms/LICM/scalar_promote.ll
index d8acdc1a3a..9aefc4f87e 100644
--- a/test/Transforms/LICM/scalar_promote.ll
+++ b/test/Transforms/LICM/scalar_promote.ll
@@ -65,7 +65,7 @@ Loop:
br i1 true, label %Out, label %Loop
; CHECK: Loop:
-; CHECK-NEXT: volatile load
+; CHECK-NEXT: load volatile
Out: ; preds = %Loop
ret void
diff --git a/test/Transforms/ObjCARC/contract-storestrong.ll b/test/Transforms/ObjCARC/contract-storestrong.ll
index 50ed260eb0..25c93f411c 100644
--- a/test/Transforms/ObjCARC/contract-storestrong.ll
+++ b/test/Transforms/ObjCARC/contract-storestrong.ll
@@ -25,7 +25,7 @@ entry:
; CHECK: define void @test1(i8* %p) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %0 = tail call i8* @objc_retain(i8* %p) nounwind
-; CHECK-NEXT: %tmp = volatile load i8** @x, align 8
+; CHECK-NEXT: %tmp = load volatile i8** @x, align 8
; CHECK-NEXT: store i8* %0, i8** @x, align 8
; CHECK-NEXT: tail call void @objc_release(i8* %tmp) nounwind
; CHECK-NEXT: ret void
@@ -45,7 +45,7 @@ entry:
; CHECK-NEXT: entry:
; CHECK-NEXT: %0 = tail call i8* @objc_retain(i8* %p) nounwind
; CHECK-NEXT: %tmp = load i8** @x, align 8
-; CHECK-NEXT: volatile store i8* %0, i8** @x, align 8
+; CHECK-NEXT: store volatile i8* %0, i8** @x, align 8
; CHECK-NEXT: tail call void @objc_release(i8* %tmp) nounwind
; CHECK-NEXT: ret void
; CHECK-NEXT: }
diff --git a/test/Transforms/ScalarRepl/volatile.ll b/test/Transforms/ScalarRepl/volatile.ll
index 3ff322e065..ab276b043e 100644
--- a/test/Transforms/ScalarRepl/volatile.ll
+++ b/test/Transforms/ScalarRepl/volatile.ll
@@ -1,5 +1,5 @@
-; RUN: opt < %s -scalarrepl -S | grep {volatile load}
-; RUN: opt < %s -scalarrepl -S | grep {volatile store}
+; RUN: opt < %s -scalarrepl -S | grep {load volatile}
+; RUN: opt < %s -scalarrepl -S | grep {store volatile}
define i32 @voltest(i32 %T) {
%A = alloca {i32, i32}
diff --git a/test/Transforms/SimplifyCFG/trapping-load-unreachable.ll b/test/Transforms/SimplifyCFG/trapping-load-unreachable.ll
index 7bca5f5afa..ebf4f171a3 100644
--- a/test/Transforms/SimplifyCFG/trapping-load-unreachable.ll
+++ b/test/Transforms/SimplifyCFG/trapping-load-unreachable.ll
@@ -11,14 +11,14 @@ entry:
br i1 %0, label %bb, label %return
bb: ; preds = %entry
- %1 = volatile load i32* null
+ %1 = load volatile i32* null
unreachable
br label %return
return: ; preds = %entry
ret void
; CHECK: @test1
-; CHECK: volatile load
+; CHECK: load volatile
}
; rdar://7958343
@@ -35,10 +35,10 @@ entry:
; PR7369
define void @test3() nounwind {
entry:
- volatile store i32 4, i32* null
+ store volatile i32 4, i32* null
ret void
; CHECK: @test3
-; CHECK: volatile store i32 4, i32* null
+; CHECK: store volatile i32 4, i32* null
; CHECK: ret
}
diff --git a/test/Transforms/SimplifyLibCalls/memcmp.ll b/test/Transforms/SimplifyLibCalls/memcmp.ll
index ee99501bc0..6ca4dc97a1 100644
--- a/test/Transforms/SimplifyLibCalls/memcmp.ll
+++ b/test/Transforms/SimplifyLibCalls/memcmp.ll
@@ -10,26 +10,26 @@ declare i32 @memcmp(i8*, i8*, i32)
define void @test(i8* %P, i8* %Q, i32 %N, i32* %IP, i1* %BP) {
%A = call i32 @memcmp( i8* %P, i8* %P, i32 %N ) ; <i32> [#uses=1]
; CHECK-NOT: call {{.*}} memcmp
-; CHECK: volatile store
- volatile store i32 %A, i32* %IP
+; CHECK: store volatile
+ store volatile i32 %A, i32* %IP
%B = call i32 @memcmp( i8* %P, i8* %Q, i32 0 ) ; <i32> [#uses=1]
; CHECK-NOT: call {{.*}} memcmp
-; CHECK: volatile store
- volatile store i32 %B, i32* %IP
+; CHECK: store volatile
+ store volatile i32 %B, i32* %IP
%C = call i32 @memcmp( i8* %P, i8* %Q, i32 1 ) ; <i32> [#uses=1]
; CHECK: load
; CHECK: zext
; CHECK: load
; CHECK: zext
; CHECK: sub
-; CHECK: volatile store
- volatile store i32 %C, i32* %IP
- %F = call i32 @memcmp(i8* getelementptr ([4 x i8]* @hel, i32 0, i32 0),
- i8* getelementptr ([8 x i8]* @hello_u, i32 0, i32 0),
- i32 3)
+; CHECK: store volatile
+ store volatile i32 %C, i32* %IP
+ %F = call i32 @memcmp(i8* getelementptr ([4 x i8]* @hel, i32 0, i32 0),
+ i8* getelementptr ([8 x i8]* @hello_u, i32 0, i32 0),
+ i32 3)
; CHECK-NOT: call {{.*}} memcmp
-; CHECK: volatile store
- volatile store i32 %F, i32* %IP
+; CHECK: store volatile
+ store volatile i32 %F, i32* %IP
ret void
}