summaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
Diffstat (limited to 'test')
-rw-r--r--test/Assembler/atomic.ll10
-rw-r--r--test/Bitcode/cmpxchg-upgrade.ll23
-rw-r--r--test/Bitcode/cmpxchg-upgrade.ll.bcbin0 -> 360 bytes
-rw-r--r--test/Bitcode/memInstructions.3.2.ll80
-rw-r--r--test/CodeGen/AArch64/atomic-ops.ll8
-rw-r--r--test/CodeGen/ARM/atomic-64bit.ll2
-rw-r--r--test/CodeGen/ARM/atomic-cmp.ll2
-rw-r--r--test/CodeGen/ARM/atomic-ops-v8.ll8
-rw-r--r--test/CodeGen/Mips/atomic.ll6
-rw-r--r--test/CodeGen/Mips/atomicops.ll2
-rw-r--r--test/CodeGen/PowerPC/Atomics-32.ll32
-rw-r--r--test/CodeGen/PowerPC/Atomics-64.ll32
-rw-r--r--test/CodeGen/PowerPC/atomic-1.ll2
-rw-r--r--test/CodeGen/PowerPC/atomic-2.ll2
-rw-r--r--test/CodeGen/SPARC/atomics.ll4
-rw-r--r--test/CodeGen/SystemZ/cmpxchg-01.ll4
-rw-r--r--test/CodeGen/SystemZ/cmpxchg-02.ll4
-rw-r--r--test/CodeGen/SystemZ/cmpxchg-03.ll24
-rw-r--r--test/CodeGen/SystemZ/cmpxchg-04.ll18
-rw-r--r--test/CodeGen/X86/2010-10-08-cmpxchg8b.ll2
-rw-r--r--test/CodeGen/X86/Atomics-64.ll40
-rw-r--r--test/CodeGen/X86/atomic16.ll2
-rw-r--r--test/CodeGen/X86/atomic32.ll2
-rw-r--r--test/CodeGen/X86/atomic64.ll2
-rw-r--r--test/CodeGen/X86/atomic6432.ll2
-rw-r--r--test/CodeGen/X86/atomic8.ll2
-rw-r--r--test/CodeGen/X86/atomic_op.ll6
-rw-r--r--test/CodeGen/X86/cmpxchg16b.ll2
-rw-r--r--test/CodeGen/X86/coalescer-remat.ll2
-rw-r--r--test/CodeGen/X86/nocx16.ll2
-rw-r--r--test/Instrumentation/AddressSanitizer/test64.ll2
-rw-r--r--test/Instrumentation/MemorySanitizer/atomics.ll10
-rw-r--r--test/Instrumentation/ThreadSanitizer/atomic.ll50
-rw-r--r--test/Transforms/LowerAtomic/atomic-swap.ll2
-rw-r--r--test/Transforms/SimplifyCFG/trapping-load-unreachable.ll2
35 files changed, 209 insertions, 184 deletions
diff --git a/test/Assembler/atomic.ll b/test/Assembler/atomic.ll
index b245cdea75..a2ae58e296 100644
--- a/test/Assembler/atomic.ll
+++ b/test/Assembler/atomic.ll
@@ -10,10 +10,12 @@ define void @f(i32* %x) {
store atomic i32 3, i32* %x release, align 4
; CHECK: store atomic volatile i32 3, i32* %x singlethread monotonic, align 4
store atomic volatile i32 3, i32* %x singlethread monotonic, align 4
- ; CHECK: cmpxchg i32* %x, i32 1, i32 0 singlethread monotonic
- cmpxchg i32* %x, i32 1, i32 0 singlethread monotonic
- ; CHECK: cmpxchg volatile i32* %x, i32 0, i32 1 acq_rel
- cmpxchg volatile i32* %x, i32 0, i32 1 acq_rel
+ ; CHECK: cmpxchg i32* %x, i32 1, i32 0 singlethread monotonic monotonic
+ cmpxchg i32* %x, i32 1, i32 0 singlethread monotonic monotonic
+ ; CHECK: cmpxchg volatile i32* %x, i32 0, i32 1 acq_rel acquire
+ cmpxchg volatile i32* %x, i32 0, i32 1 acq_rel acquire
+ ; CHECK: cmpxchg i32* %x, i32 42, i32 0 acq_rel monotonic
+ cmpxchg i32* %x, i32 42, i32 0 acq_rel monotonic
; CHECK: atomicrmw add i32* %x, i32 10 seq_cst
atomicrmw add i32* %x, i32 10 seq_cst
; CHECK: atomicrmw volatile xchg i32* %x, i32 10 monotonic
diff --git a/test/Bitcode/cmpxchg-upgrade.ll b/test/Bitcode/cmpxchg-upgrade.ll
new file mode 100644
index 0000000000..d36ac1c179
--- /dev/null
+++ b/test/Bitcode/cmpxchg-upgrade.ll
@@ -0,0 +1,23 @@
+; RUN: llvm-dis < %s.bc | FileCheck %s
+
+; cmpxchg-upgrade.ll.bc was produced by running a version of llvm-as from just
+; before the IR change on this file.
+
+define void @test(i32* %addr) {
+ cmpxchg i32* %addr, i32 42, i32 0 monotonic
+; CHECK: cmpxchg i32* %addr, i32 42, i32 0 monotonic monotonic
+
+ cmpxchg i32* %addr, i32 42, i32 0 acquire
+; CHECK: cmpxchg i32* %addr, i32 42, i32 0 acquire acquire
+
+ cmpxchg i32* %addr, i32 42, i32 0 release
+; CHECK: cmpxchg i32* %addr, i32 42, i32 0 release monotonic
+
+ cmpxchg i32* %addr, i32 42, i32 0 acq_rel
+; CHECK: cmpxchg i32* %addr, i32 42, i32 0 acq_rel acquire
+
+ cmpxchg i32* %addr, i32 42, i32 0 seq_cst
+; CHECK: cmpxchg i32* %addr, i32 42, i32 0 seq_cst seq_cst
+
+ ret void
+} \ No newline at end of file
diff --git a/test/Bitcode/cmpxchg-upgrade.ll.bc b/test/Bitcode/cmpxchg-upgrade.ll.bc
new file mode 100644
index 0000000000..922f2eb84e
--- /dev/null
+++ b/test/Bitcode/cmpxchg-upgrade.ll.bc
Binary files differ
diff --git a/test/Bitcode/memInstructions.3.2.ll b/test/Bitcode/memInstructions.3.2.ll
index 868e4b5f4d..21c3deb8a5 100644
--- a/test/Bitcode/memInstructions.3.2.ll
+++ b/test/Bitcode/memInstructions.3.2.ll
@@ -223,69 +223,69 @@ define void @cmpxchg(i32* %ptr,i32 %cmp,i32 %new){
entry:
;cmpxchg [volatile] <ty>* <pointer>, <ty> <cmp>, <ty> <new> [singlethread] <ordering>
-; CHECK: %res1 = cmpxchg i32* %ptr, i32 %cmp, i32 %new monotonic
- %res1 = cmpxchg i32* %ptr, i32 %cmp, i32 %new monotonic
+; CHECK: %res1 = cmpxchg i32* %ptr, i32 %cmp, i32 %new monotonic monotonic
+ %res1 = cmpxchg i32* %ptr, i32 %cmp, i32 %new monotonic monotonic
-; CHECK-NEXT: %res2 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new monotonic
- %res2 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new monotonic
+; CHECK-NEXT: %res2 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new monotonic monotonic
+ %res2 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new monotonic monotonic
-; CHECK-NEXT: %res3 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread monotonic
- %res3 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread monotonic
+; CHECK-NEXT: %res3 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread monotonic monotonic
+ %res3 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread monotonic monotonic
-; CHECK-NEXT: %res4 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread monotonic
- %res4 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread monotonic
+; CHECK-NEXT: %res4 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread monotonic monotonic
+ %res4 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread monotonic monotonic
-; CHECK-NEXT: %res5 = cmpxchg i32* %ptr, i32 %cmp, i32 %new acquire
- %res5 = cmpxchg i32* %ptr, i32 %cmp, i32 %new acquire
+; CHECK-NEXT: %res5 = cmpxchg i32* %ptr, i32 %cmp, i32 %new acquire acquire
+ %res5 = cmpxchg i32* %ptr, i32 %cmp, i32 %new acquire acquire
-; CHECK-NEXT: %res6 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new acquire
- %res6 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new acquire
+; CHECK-NEXT: %res6 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new acquire acquire
+ %res6 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new acquire acquire
-; CHECK-NEXT: %res7 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread acquire
- %res7 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread acquire
+; CHECK-NEXT: %res7 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread acquire acquire
+ %res7 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread acquire acquire
-; CHECK-NEXT: %res8 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread acquire
- %res8 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread acquire
+; CHECK-NEXT: %res8 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread acquire acquire
+ %res8 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread acquire acquire
-; CHECK-NEXT: %res9 = cmpxchg i32* %ptr, i32 %cmp, i32 %new release
- %res9 = cmpxchg i32* %ptr, i32 %cmp, i32 %new release
+; CHECK-NEXT: %res9 = cmpxchg i32* %ptr, i32 %cmp, i32 %new release monotonic
+ %res9 = cmpxchg i32* %ptr, i32 %cmp, i32 %new release monotonic
-; CHECK-NEXT: %res10 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new release
- %res10 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new release
+; CHECK-NEXT: %res10 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new release monotonic
+ %res10 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new release monotonic
-; CHECK-NEXT: %res11 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread release
- %res11 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread release
+; CHECK-NEXT: %res11 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread release monotonic
+ %res11 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread release monotonic
-; CHECK-NEXT: %res12 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread release
- %res12 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread release
+; CHECK-NEXT: %res12 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread release monotonic
+ %res12 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread release monotonic
-; CHECK-NEXT: %res13 = cmpxchg i32* %ptr, i32 %cmp, i32 %new acq_rel
- %res13 = cmpxchg i32* %ptr, i32 %cmp, i32 %new acq_rel
+; CHECK-NEXT: %res13 = cmpxchg i32* %ptr, i32 %cmp, i32 %new acq_rel acquire
+ %res13 = cmpxchg i32* %ptr, i32 %cmp, i32 %new acq_rel acquire
-; CHECK-NEXT: %res14 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new acq_rel
- %res14 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new acq_rel
+; CHECK-NEXT: %res14 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new acq_rel acquire
+ %res14 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new acq_rel acquire
-; CHECK-NEXT: %res15 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread acq_rel
- %res15 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread acq_rel
+; CHECK-NEXT: %res15 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread acq_rel acquire
+ %res15 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread acq_rel acquire
-; CHECK-NEXT: %res16 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread acq_rel
- %res16 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread acq_rel
+; CHECK-NEXT: %res16 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread acq_rel acquire
+ %res16 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread acq_rel acquire
-; CHECK-NEXT: %res17 = cmpxchg i32* %ptr, i32 %cmp, i32 %new seq_cst
- %res17 = cmpxchg i32* %ptr, i32 %cmp, i32 %new seq_cst
+; CHECK-NEXT: %res17 = cmpxchg i32* %ptr, i32 %cmp, i32 %new seq_cst seq_cst
+ %res17 = cmpxchg i32* %ptr, i32 %cmp, i32 %new seq_cst seq_cst
-; CHECK-NEXT: %res18 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new seq_cst
- %res18 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new seq_cst
+; CHECK-NEXT: %res18 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new seq_cst seq_cst
+ %res18 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new seq_cst seq_cst
-; CHECK-NEXT: %res19 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread seq_cst
- %res19 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread seq_cst
+; CHECK-NEXT: %res19 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread seq_cst seq_cst
+ %res19 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread seq_cst seq_cst
-; CHECK-NEXT: %res20 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread seq_cst
- %res20 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread seq_cst
+; CHECK-NEXT: %res20 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread seq_cst seq_cst
+ %res20 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread seq_cst seq_cst
ret void
}
diff --git a/test/CodeGen/AArch64/atomic-ops.ll b/test/CodeGen/AArch64/atomic-ops.ll
index 5857faf80a..5fe29364ee 100644
--- a/test/CodeGen/AArch64/atomic-ops.ll
+++ b/test/CodeGen/AArch64/atomic-ops.ll
@@ -897,7 +897,7 @@ define i64 @test_atomic_load_umax_i64(i64 %offset) nounwind {
define i8 @test_atomic_cmpxchg_i8(i8 %wanted, i8 %new) nounwind {
; CHECK-LABEL: test_atomic_cmpxchg_i8:
- %old = cmpxchg i8* @var8, i8 %wanted, i8 %new acquire
+ %old = cmpxchg i8* @var8, i8 %wanted, i8 %new acquire acquire
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var8
@@ -920,7 +920,7 @@ define i8 @test_atomic_cmpxchg_i8(i8 %wanted, i8 %new) nounwind {
define i16 @test_atomic_cmpxchg_i16(i16 %wanted, i16 %new) nounwind {
; CHECK-LABEL: test_atomic_cmpxchg_i16:
- %old = cmpxchg i16* @var16, i16 %wanted, i16 %new seq_cst
+ %old = cmpxchg i16* @var16, i16 %wanted, i16 %new seq_cst seq_cst
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var16
@@ -943,7 +943,7 @@ define i16 @test_atomic_cmpxchg_i16(i16 %wanted, i16 %new) nounwind {
define i32 @test_atomic_cmpxchg_i32(i32 %wanted, i32 %new) nounwind {
; CHECK-LABEL: test_atomic_cmpxchg_i32:
- %old = cmpxchg i32* @var32, i32 %wanted, i32 %new release
+ %old = cmpxchg i32* @var32, i32 %wanted, i32 %new release monotonic
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var32
@@ -966,7 +966,7 @@ define i32 @test_atomic_cmpxchg_i32(i32 %wanted, i32 %new) nounwind {
define i64 @test_atomic_cmpxchg_i64(i64 %wanted, i64 %new) nounwind {
; CHECK-LABEL: test_atomic_cmpxchg_i64:
- %old = cmpxchg i64* @var64, i64 %wanted, i64 %new monotonic
+ %old = cmpxchg i64* @var64, i64 %wanted, i64 %new monotonic monotonic
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var64
diff --git a/test/CodeGen/ARM/atomic-64bit.ll b/test/CodeGen/ARM/atomic-64bit.ll
index 0477d4f401..7cf45ebde7 100644
--- a/test/CodeGen/ARM/atomic-64bit.ll
+++ b/test/CodeGen/ARM/atomic-64bit.ll
@@ -171,7 +171,7 @@ define i64 @test7(i64* %ptr, i64 %val1, i64 %val2) {
; CHECK-THUMB: bne
; CHECK-THUMB: dmb {{ish$}}
- %r = cmpxchg i64* %ptr, i64 %val1, i64 %val2 seq_cst
+ %r = cmpxchg i64* %ptr, i64 %val1, i64 %val2 seq_cst seq_cst
ret i64 %r
}
diff --git a/test/CodeGen/ARM/atomic-cmp.ll b/test/CodeGen/ARM/atomic-cmp.ll
index 51ada693d0..a4738077b1 100644
--- a/test/CodeGen/ARM/atomic-cmp.ll
+++ b/test/CodeGen/ARM/atomic-cmp.ll
@@ -10,6 +10,6 @@ define i8 @t(i8* %a, i8 %b, i8 %c) nounwind {
; T2-LABEL: t:
; T2: ldrexb
; T2: strexb
- %tmp0 = cmpxchg i8* %a, i8 %b, i8 %c monotonic
+ %tmp0 = cmpxchg i8* %a, i8 %b, i8 %c monotonic monotonic
ret i8 %tmp0
}
diff --git a/test/CodeGen/ARM/atomic-ops-v8.ll b/test/CodeGen/ARM/atomic-ops-v8.ll
index 3f93929fd1..87e76a48c5 100644
--- a/test/CodeGen/ARM/atomic-ops-v8.ll
+++ b/test/CodeGen/ARM/atomic-ops-v8.ll
@@ -987,7 +987,7 @@ define i64 @test_atomic_load_umax_i64(i64 %offset) nounwind {
define i8 @test_atomic_cmpxchg_i8(i8 %wanted, i8 %new) nounwind {
; CHECK-LABEL: test_atomic_cmpxchg_i8:
- %old = cmpxchg i8* @var8, i8 %wanted, i8 %new acquire
+ %old = cmpxchg i8* @var8, i8 %wanted, i8 %new acquire acquire
; CHECK-NOT: dmb
; CHECK-NOT: mcr
; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
@@ -1013,7 +1013,7 @@ define i8 @test_atomic_cmpxchg_i8(i8 %wanted, i8 %new) nounwind {
define i16 @test_atomic_cmpxchg_i16(i16 %wanted, i16 %new) nounwind {
; CHECK-LABEL: test_atomic_cmpxchg_i16:
- %old = cmpxchg i16* @var16, i16 %wanted, i16 %new seq_cst
+ %old = cmpxchg i16* @var16, i16 %wanted, i16 %new seq_cst seq_cst
; CHECK-NOT: dmb
; CHECK-NOT: mcr
; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var16
@@ -1039,7 +1039,7 @@ define i16 @test_atomic_cmpxchg_i16(i16 %wanted, i16 %new) nounwind {
define i32 @test_atomic_cmpxchg_i32(i32 %wanted, i32 %new) nounwind {
; CHECK-LABEL: test_atomic_cmpxchg_i32:
- %old = cmpxchg i32* @var32, i32 %wanted, i32 %new release
+ %old = cmpxchg i32* @var32, i32 %wanted, i32 %new release monotonic
; CHECK-NOT: dmb
; CHECK-NOT: mcr
; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var32
@@ -1065,7 +1065,7 @@ define i32 @test_atomic_cmpxchg_i32(i32 %wanted, i32 %new) nounwind {
define i64 @test_atomic_cmpxchg_i64(i64 %wanted, i64 %new) nounwind {
; CHECK-LABEL: test_atomic_cmpxchg_i64:
- %old = cmpxchg i64* @var64, i64 %wanted, i64 %new monotonic
+ %old = cmpxchg i64* @var64, i64 %wanted, i64 %new monotonic monotonic
; CHECK-NOT: dmb
; CHECK-NOT: mcr
; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var64
diff --git a/test/CodeGen/Mips/atomic.ll b/test/CodeGen/Mips/atomic.ll
index 0e60fe1fbf..77d7bf3154 100644
--- a/test/CodeGen/Mips/atomic.ll
+++ b/test/CodeGen/Mips/atomic.ll
@@ -77,7 +77,7 @@ entry:
%newval.addr = alloca i32, align 4
store i32 %newval, i32* %newval.addr, align 4
%tmp = load i32* %newval.addr, align 4
- %0 = cmpxchg i32* @x, i32 %oldval, i32 %tmp monotonic
+ %0 = cmpxchg i32* @x, i32 %oldval, i32 %tmp monotonic monotonic
ret i32 %0
; CHECK-EL-LABEL: AtomicCmpSwap32:
@@ -333,7 +333,7 @@ entry:
define signext i8 @AtomicCmpSwap8(i8 signext %oldval, i8 signext %newval) nounwind {
entry:
- %0 = cmpxchg i8* @y, i8 %oldval, i8 %newval monotonic
+ %0 = cmpxchg i8* @y, i8 %oldval, i8 %newval monotonic monotonic
ret i8 %0
; CHECK-EL-LABEL: AtomicCmpSwap8:
@@ -429,7 +429,7 @@ entry:
define i32 @zeroreg() nounwind {
entry:
- %0 = cmpxchg i32* @a, i32 1, i32 0 seq_cst
+ %0 = cmpxchg i32* @a, i32 1, i32 0 seq_cst seq_cst
%1 = icmp eq i32 %0, 1
%conv = zext i1 %1 to i32
ret i32 %conv
diff --git a/test/CodeGen/Mips/atomicops.ll b/test/CodeGen/Mips/atomicops.ll
index 0f0f01afc1..dc07c63741 100644
--- a/test/CodeGen/Mips/atomicops.ll
+++ b/test/CodeGen/Mips/atomicops.ll
@@ -20,7 +20,7 @@ entry:
%add.i = add nsw i32 %0, 2
%1 = load volatile i32* %x, align 4
%call1 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([8 x i8]* @.str, i32 0, i32 0), i32 %add.i, i32 %1) nounwind
- %2 = cmpxchg i32* %x, i32 1, i32 2 seq_cst
+ %2 = cmpxchg i32* %x, i32 1, i32 2 seq_cst seq_cst
%3 = load volatile i32* %x, align 4
%call2 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([8 x i8]* @.str, i32 0, i32 0), i32 %2, i32 %3) nounwind
%4 = atomicrmw xchg i32* %x, i32 1 seq_cst
diff --git a/test/CodeGen/PowerPC/Atomics-32.ll b/test/CodeGen/PowerPC/Atomics-32.ll
index 64f149541b..b5c03e2b20 100644
--- a/test/CodeGen/PowerPC/Atomics-32.ll
+++ b/test/CodeGen/PowerPC/Atomics-32.ll
@@ -529,63 +529,63 @@ define void @test_compare_and_swap() nounwind {
entry:
%0 = load i8* @uc, align 1
%1 = load i8* @sc, align 1
- %2 = cmpxchg i8* @sc, i8 %0, i8 %1 monotonic
+ %2 = cmpxchg i8* @sc, i8 %0, i8 %1 monotonic monotonic
store i8 %2, i8* @sc, align 1
%3 = load i8* @uc, align 1
%4 = load i8* @sc, align 1
- %5 = cmpxchg i8* @uc, i8 %3, i8 %4 monotonic
+ %5 = cmpxchg i8* @uc, i8 %3, i8 %4 monotonic monotonic
store i8 %5, i8* @uc, align 1
%6 = load i8* @uc, align 1
%7 = zext i8 %6 to i16
%8 = load i8* @sc, align 1
%9 = sext i8 %8 to i16
%10 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
- %11 = cmpxchg i16* %10, i16 %7, i16 %9 monotonic
+ %11 = cmpxchg i16* %10, i16 %7, i16 %9 monotonic monotonic
store i16 %11, i16* @ss, align 2
%12 = load i8* @uc, align 1
%13 = zext i8 %12 to i16
%14 = load i8* @sc, align 1
%15 = sext i8 %14 to i16
%16 = bitcast i8* bitcast (i16* @us to i8*) to i16*
- %17 = cmpxchg i16* %16, i16 %13, i16 %15 monotonic
+ %17 = cmpxchg i16* %16, i16 %13, i16 %15 monotonic monotonic
store i16 %17, i16* @us, align 2
%18 = load i8* @uc, align 1
%19 = zext i8 %18 to i32
%20 = load i8* @sc, align 1
%21 = sext i8 %20 to i32
%22 = bitcast i8* bitcast (i32* @si to i8*) to i32*
- %23 = cmpxchg i32* %22, i32 %19, i32 %21 monotonic
+ %23 = cmpxchg i32* %22, i32 %19, i32 %21 monotonic monotonic
store i32 %23, i32* @si, align 4
%24 = load i8* @uc, align 1
%25 = zext i8 %24 to i32
%26 = load i8* @sc, align 1
%27 = sext i8 %26 to i32
%28 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
- %29 = cmpxchg i32* %28, i32 %25, i32 %27 monotonic
+ %29 = cmpxchg i32* %28, i32 %25, i32 %27 monotonic monotonic
store i32 %29, i32* @ui, align 4
%30 = load i8* @uc, align 1
%31 = zext i8 %30 to i32
%32 = load i8* @sc, align 1
%33 = sext i8 %32 to i32
%34 = bitcast i8* bitcast (i32* @sl to i8*) to i32*
- %35 = cmpxchg i32* %34, i32 %31, i32 %33 monotonic
+ %35 = cmpxchg i32* %34, i32 %31, i32 %33 monotonic monotonic
store i32 %35, i32* @sl, align 4
%36 = load i8* @uc, align 1
%37 = zext i8 %36 to i32
%38 = load i8* @sc, align 1
%39 = sext i8 %38 to i32
%40 = bitcast i8* bitcast (i32* @ul to i8*) to i32*
- %41 = cmpxchg i32* %40, i32 %37, i32 %39 monotonic
+ %41 = cmpxchg i32* %40, i32 %37, i32 %39 monotonic monotonic
store i32 %41, i32* @ul, align 4
%42 = load i8* @uc, align 1
%43 = load i8* @sc, align 1
- %44 = cmpxchg i8* @sc, i8 %42, i8 %43 monotonic
+ %44 = cmpxchg i8* @sc, i8 %42, i8 %43 monotonic monotonic
%45 = icmp eq i8 %44, %42
%46 = zext i1 %45 to i32
store i32 %46, i32* @ui, align 4
%47 = load i8* @uc, align 1
%48 = load i8* @sc, align 1
- %49 = cmpxchg i8* @uc, i8 %47, i8 %48 monotonic
+ %49 = cmpxchg i8* @uc, i8 %47, i8 %48 monotonic monotonic
%50 = icmp eq i8 %49, %47
%51 = zext i1 %50 to i32
store i32 %51, i32* @ui, align 4
@@ -594,7 +594,7 @@ entry:
%54 = load i8* @sc, align 1
%55 = sext i8 %54 to i16
%56 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
- %57 = cmpxchg i16* %56, i16 %53, i16 %55 monotonic
+ %57 = cmpxchg i16* %56, i16 %53, i16 %55 monotonic monotonic
%58 = icmp eq i16 %57, %53
%59 = zext i1 %58 to i32
store i32 %59, i32* @ui, align 4
@@ -603,7 +603,7 @@ entry:
%62 = load i8* @sc, align 1
%63 = sext i8 %62 to i16
%64 = bitcast i8* bitcast (i16* @us to i8*) to i16*
- %65 = cmpxchg i16* %64, i16 %61, i16 %63 monotonic
+ %65 = cmpxchg i16* %64, i16 %61, i16 %63 monotonic monotonic
%66 = icmp eq i16 %65, %61
%67 = zext i1 %66 to i32
store i32 %67, i32* @ui, align 4
@@ -612,7 +612,7 @@ entry:
%70 = load i8* @sc, align 1
%71 = sext i8 %70 to i32
%72 = bitcast i8* bitcast (i32* @si to i8*) to i32*
- %73 = cmpxchg i32* %72, i32 %69, i32 %71 monotonic
+ %73 = cmpxchg i32* %72, i32 %69, i32 %71 monotonic monotonic
%74 = icmp eq i32 %73, %69
%75 = zext i1 %74 to i32
store i32 %75, i32* @ui, align 4
@@ -621,7 +621,7 @@ entry:
%78 = load i8* @sc, align 1
%79 = sext i8 %78 to i32
%80 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
- %81 = cmpxchg i32* %80, i32 %77, i32 %79 monotonic
+ %81 = cmpxchg i32* %80, i32 %77, i32 %79 monotonic monotonic
%82 = icmp eq i32 %81, %77
%83 = zext i1 %82 to i32
store i32 %83, i32* @ui, align 4
@@ -630,7 +630,7 @@ entry:
%86 = load i8* @sc, align 1
%87 = sext i8 %86 to i32
%88 = bitcast i8* bitcast (i32* @sl to i8*) to i32*
- %89 = cmpxchg i32* %88, i32 %85, i32 %87 monotonic
+ %89 = cmpxchg i32* %88, i32 %85, i32 %87 monotonic monotonic
%90 = icmp eq i32 %89, %85
%91 = zext i1 %90 to i32
store i32 %91, i32* @ui, align 4
@@ -639,7 +639,7 @@ entry:
%94 = load i8* @sc, align 1
%95 = sext i8 %94 to i32
%96 = bitcast i8* bitcast (i32* @ul to i8*) to i32*
- %97 = cmpxchg i32* %96, i32 %93, i32 %95 monotonic
+ %97 = cmpxchg i32* %96, i32 %93, i32 %95 monotonic monotonic
%98 = icmp eq i32 %97, %93
%99 = zext i1 %98 to i32
store i32 %99, i32* @ui, align 4
diff --git a/test/CodeGen/PowerPC/Atomics-64.ll b/test/CodeGen/PowerPC/Atomics-64.ll
index d35b848747..122b54e080 100644
--- a/test/CodeGen/PowerPC/Atomics-64.ll
+++ b/test/CodeGen/PowerPC/Atomics-64.ll
@@ -536,64 +536,64 @@ define void @test_compare_and_swap() nounwind {
entry:
%0 = load i8* @uc, align 1
%1 = load i8* @sc, align 1
- %2 = cmpxchg i8* @sc, i8 %0, i8 %1 monotonic
+ %2 = cmpxchg i8* @sc, i8 %0, i8 %1 monotonic monotonic
store i8 %2, i8* @sc, align 1
%3 = load i8* @uc, align 1
%4 = load i8* @sc, align 1
- %5 = cmpxchg i8* @uc, i8 %3, i8 %4 monotonic
+ %5 = cmpxchg i8* @uc, i8 %3, i8 %4 monotonic monotonic
store i8 %5, i8* @uc, align 1
%6 = load i8* @uc, align 1
%7 = zext i8 %6 to i16
%8 = load i8* @sc, align 1
%9 = sext i8 %8 to i16
%10 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
- %11 = cmpxchg i16* %10, i16 %7, i16 %9 monotonic
+ %11 = cmpxchg i16* %10, i16 %7, i16 %9 monotonic monotonic
store i16 %11, i16* @ss, align 2
%12 = load i8* @uc, align 1
%13 = zext i8 %12 to i16
%14 = load i8* @sc, align 1
%15 = sext i8 %14 to i16
%16 = bitcast i8* bitcast (i16* @us to i8*) to i16*
- %17 = cmpxchg i16* %16, i16 %13, i16 %15 monotonic
+ %17 = cmpxchg i16* %16, i16 %13, i16 %15 monotonic monotonic
store i16 %17, i16* @us, align 2
%18 = load i8* @uc, align 1
%19 = zext i8 %18 to i32
%20 = load i8* @sc, align 1
%21 = sext i8 %20 to i32
%22 = bitcast i8* bitcast (i32* @si to i8*) to i32*
- %23 = cmpxchg i32* %22, i32 %19, i32 %21 monotonic
+ %23 = cmpxchg i32* %22, i32 %19, i32 %21 monotonic monotonic
store i32 %23, i32* @si, align 4
%24 = load i8* @uc, align 1
%25 = zext i8 %24 to i32
%26 = load i8* @sc, align 1
%27 = sext i8 %26 to i32
%28 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
- %29 = cmpxchg i32* %28, i32 %25, i32 %27 monotonic
+ %29 = cmpxchg i32* %28, i32 %25, i32 %27 monotonic monotonic
store i32 %29, i32* @ui, align 4
%30 = load i8* @uc, align 1
%31 = zext i8 %30 to i64
%32 = load i8* @sc, align 1
%33 = sext i8 %32 to i64
%34 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
- %35 = cmpxchg i64* %34, i64 %31, i64 %33 monotonic
+ %35 = cmpxchg i64* %34, i64 %31, i64 %33 monotonic monotonic
store i64 %35, i64* @sl, align 8
%36 = load i8* @uc, align 1
%37 = zext i8 %36 to i64
%38 = load i8* @sc, align 1
%39 = sext i8 %38 to i64
%40 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
- %41 = cmpxchg i64* %40, i64 %37, i64 %39 monotonic
+ %41 = cmpxchg i64* %40, i64 %37, i64 %39 monotonic monotonic
store i64 %41, i64* @ul, align 8
%42 = load i8* @uc, align 1
%43 = load i8* @sc, align 1
- %44 = cmpxchg i8* @sc, i8 %42, i8 %43 monotonic
+ %44 = cmpxchg i8* @sc, i8 %42, i8 %43 monotonic monotonic
%45 = icmp eq i8 %44, %42
%46 = zext i1 %45 to i8
%47 = zext i8 %46 to i32
store i32 %47, i32* @ui, align 4
%48 = load i8* @uc, align 1
%49 = load i8* @sc, align 1
- %50 = cmpxchg i8* @uc, i8 %48, i8 %49 monotonic
+ %50 = cmpxchg i8* @uc, i8 %48, i8 %49 monotonic monotonic
%51 = icmp eq i8 %50, %48
%52 = zext i1 %51 to i8
%53 = zext i8 %52 to i32
@@ -603,7 +603,7 @@ entry:
%56 = load i8* @sc, align 1
%57 = sext i8 %56 to i16
%58 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
- %59 = cmpxchg i16* %58, i16 %55, i16 %57 monotonic
+ %59 = cmpxchg i16* %58, i16 %55, i16 %57 monotonic monotonic
%60 = icmp eq i16 %59, %55
%61 = zext i1 %60 to i8
%62 = zext i8 %61 to i32
@@ -613,7 +613,7 @@ entry:
%65 = load i8* @sc, align 1
%66 = sext i8 %65 to i16
%67 = bitcast i8* bitcast (i16* @us to i8*) to i16*
- %68 = cmpxchg i16* %67, i16 %64, i16 %66 monotonic
+ %68 = cmpxchg i16* %67, i16 %64, i16 %66 monotonic monotonic
%69 = icmp eq i16 %68, %64
%70 = zext i1 %69 to i8
%71 = zext i8 %70 to i32
@@ -623,7 +623,7 @@ entry:
%74 = load i8* @sc, align 1
%75 = sext i8 %74 to i32
%76 = bitcast i8* bitcast (i32* @si to i8*) to i32*
- %77 = cmpxchg i32* %76, i32 %73, i32 %75 monotonic
+ %77 = cmpxchg i32* %76, i32 %73, i32 %75 monotonic monotonic
%78 = icmp eq i32 %77, %73
%79 = zext i1 %78 to i8
%80 = zext i8 %79 to i32
@@ -633,7 +633,7 @@ entry:
%83 = load i8* @sc, align 1
%84 = sext i8 %83 to i32
%85 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
- %86 = cmpxchg i32* %85, i32 %82, i32 %84 monotonic
+ %86 = cmpxchg i32* %85, i32 %82, i32 %84 monotonic monotonic
%87 = icmp eq i32 %86, %82
%88 = zext i1 %87 to i8
%89 = zext i8 %88 to i32
@@ -643,7 +643,7 @@ entry:
%92 = load i8* @sc, align 1
%93 = sext i8 %92 to i64
%94 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
- %95 = cmpxchg i64* %94, i64 %91, i64 %93 monotonic
+ %95 = cmpxchg i64* %94, i64 %91, i64 %93 monotonic monotonic
%96 = icmp eq i64 %95, %91
%97 = zext i1 %96 to i8
%98 = zext i8 %97 to i32
@@ -653,7 +653,7 @@ entry:
%101 = load i8* @sc, align 1
%102 = sext i8 %101 to i64
%103 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
- %104 = cmpxchg i64* %103, i64 %100, i64 %102 monotonic
+ %104 = cmpxchg i64* %103, i64 %100, i64 %102 monotonic monotonic
%105 = icmp eq i64 %104, %100
%106 = zext i1 %105 to i8
%107 = zext i8 %106 to i32
diff --git a/test/CodeGen/PowerPC/atomic-1.ll b/test/CodeGen/PowerPC/atomic-1.ll
index 1737916375..083df47e56 100644
--- a/test/CodeGen/PowerPC/atomic-1.ll
+++ b/test/CodeGen/PowerPC/atomic-1.ll
@@ -11,7 +11,7 @@ define i32 @exchange_and_add(i32* %mem, i32 %val) nounwind {
define i32 @exchange_and_cmp(i32* %mem) nounwind {
; CHECK-LABEL: exchange_and_cmp:
; CHECK: lwarx
- %tmp = cmpxchg i32* %mem, i32 0, i32 1 monotonic
+ %tmp = cmpxchg i32* %mem, i32 0, i32 1 monotonic monotonic
; CHECK: stwcx.
; CHECK: stwcx.
ret i32 %tmp
diff --git a/test/CodeGen/PowerPC/atomic-2.ll b/test/CodeGen/PowerPC/atomic-2.ll
index e56a779667..261335e81d 100644
--- a/test/CodeGen/PowerPC/atomic-2.ll
+++ b/test/CodeGen/PowerPC/atomic-2.ll
@@ -11,7 +11,7 @@ define i64 @exchange_and_add(i64* %mem, i64 %val) nounwind {
define i64 @exchange_and_cmp(i64* %mem) nounwind {
; CHECK-LABEL: exchange_and_cmp:
; CHECK: ldarx
- %tmp = cmpxchg i64* %mem, i64 0, i64 1 monotonic
+ %tmp = cmpxchg i64* %mem, i64 0, i64 1 monotonic monotonic
; CHECK: stdcx.
; CHECK: stdcx.
ret i64 %tmp
diff --git a/test/CodeGen/SPARC/atomics.ll b/test/CodeGen/SPARC/atomics.ll
index b10336c980..4e3e7ae6fd 100644
--- a/test/CodeGen/SPARC/atomics.ll
+++ b/test/CodeGen/SPARC/atomics.ll
@@ -38,7 +38,7 @@ entry:
define i32 @test_cmpxchg_i32(i32 %a, i32* %ptr) {
entry:
- %b = cmpxchg i32* %ptr, i32 %a, i32 123 monotonic
+ %b = cmpxchg i32* %ptr, i32 %a, i32 123 monotonic monotonic
ret i32 %b
}
@@ -48,7 +48,7 @@ entry:
define i64 @test_cmpxchg_i64(i64 %a, i64* %ptr) {
entry:
- %b = cmpxchg i64* %ptr, i64 %a, i64 123 monotonic
+ %b = cmpxchg i64* %ptr, i64 %a, i64 123 monotonic monotonic
ret i64 %b
}
diff --git a/test/CodeGen/SystemZ/cmpxchg-01.ll b/test/CodeGen/SystemZ/cmpxchg-01.ll
index d5ea977869..bb0b18ad57 100644
--- a/test/CodeGen/SystemZ/cmpxchg-01.ll
+++ b/test/CodeGen/SystemZ/cmpxchg-01.ll
@@ -32,7 +32,7 @@ define i8 @f1(i8 %dummy, i8 *%src, i8 %cmp, i8 %swap) {
; CHECK-SHIFT: lcr [[NEGSHIFT:%r[1-9]+]], [[SHIFT]]
; CHECK-SHIFT: rll
; CHECK-SHIFT: rll {{%r[0-9]+}}, %r5, -8([[NEGSHIFT]])
- %res = cmpxchg i8 *%src, i8 %cmp, i8 %swap seq_cst
+ %res = cmpxchg i8 *%src, i8 %cmp, i8 %swap seq_cst seq_cst
ret i8 %res
}
@@ -50,6 +50,6 @@ define i8 @f2(i8 *%src) {
; CHECK-SHIFT: risbg
; CHECK-SHIFT: risbg [[SWAP]], {{%r[0-9]+}}, 32, 55, 0
; CHECK-SHIFT: br %r14
- %res = cmpxchg i8 *%src, i8 42, i8 88 seq_cst
+ %res = cmpxchg i8 *%src, i8 42, i8 88 seq_cst seq_cst
ret i8 %res
}
diff --git a/test/CodeGen/SystemZ/cmpxchg-02.ll b/test/CodeGen/SystemZ/cmpxchg-02.ll
index 08c79d717c..8d46a8c073 100644
--- a/test/CodeGen/SystemZ/cmpxchg-02.ll
+++ b/test/CodeGen/SystemZ/cmpxchg-02.ll
@@ -32,7 +32,7 @@ define i16 @f1(i16 %dummy, i16 *%src, i16 %cmp, i16 %swap) {
; CHECK-SHIFT: lcr [[NEGSHIFT:%r[1-9]+]], [[SHIFT]]
; CHECK-SHIFT: rll
; CHECK-SHIFT: rll {{%r[0-9]+}}, %r5, -16([[NEGSHIFT]])
- %res = cmpxchg i16 *%src, i16 %cmp, i16 %swap seq_cst
+ %res = cmpxchg i16 *%src, i16 %cmp, i16 %swap seq_cst seq_cst
ret i16 %res
}
@@ -50,6 +50,6 @@ define i16 @f2(i16 *%src) {
; CHECK-SHIFT: risbg
; CHECK-SHIFT: risbg [[SWAP]], {{%r[0-9]+}}, 32, 47, 0
; CHECK-SHIFT: br %r14
- %res = cmpxchg i16 *%src, i16 42, i16 88 seq_cst
+ %res = cmpxchg i16 *%src, i16 42, i16 88 seq_cst seq_cst
ret i16 %res
}
diff --git a/test/CodeGen/SystemZ/cmpxchg-03.ll b/test/CodeGen/SystemZ/cmpxchg-03.ll
index 3917979ac2..f6a2ad0b69 100644
--- a/test/CodeGen/SystemZ/cmpxchg-03.ll
+++ b/test/CodeGen/SystemZ/cmpxchg-03.ll
@@ -7,7 +7,7 @@ define i32 @f1(i32 %cmp, i32 %swap, i32 *%src) {
; CHECK-LABEL: f1:
; CHECK: cs %r2, %r3, 0(%r4)
; CHECK: br %r14
- %val = cmpxchg i32 *%src, i32 %cmp, i32 %swap seq_cst
+ %val = cmpxchg i32 *%src, i32 %cmp, i32 %swap seq_cst seq_cst
ret i32 %val
}
@@ -17,7 +17,7 @@ define i32 @f2(i32 %cmp, i32 %swap, i32 *%src) {
; CHECK: cs %r2, %r3, 4092(%r4)
; CHECK: br %r14
%ptr = getelementptr i32 *%src, i64 1023
- %val = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst
+ %val = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
ret i32 %val
}
@@ -27,7 +27,7 @@ define i32 @f3(i32 %cmp, i32 %swap, i32 *%src) {
; CHECK: csy %r2, %r3, 4096(%r4)
; CHECK: br %r14
%ptr = getelementptr i32 *%src, i64 1024
- %val = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst
+ %val = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
ret i32 %val
}
@@ -37,7 +37,7 @@ define i32 @f4(i32 %cmp, i32 %swap, i32 *%src) {
; CHECK: csy %r2, %r3, 524284(%r4)
; CHECK: br %r14
%ptr = getelementptr i32 *%src, i64 131071
- %val = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst
+ %val = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
ret i32 %val
}
@@ -49,7 +49,7 @@ define i32 @f5(i32 %cmp, i32 %swap, i32 *%src) {
; CHECK: cs %r2, %r3, 0(%r4)
; CHECK: br %r14
%ptr = getelementptr i32 *%src, i64 131072
- %val = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst
+ %val = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
ret i32 %val
}
@@ -59,7 +59,7 @@ define i32 @f6(i32 %cmp, i32 %swap, i32 *%src) {
; CHECK: csy %r2, %r3, -4(%r4)
; CHECK: br %r14
%ptr = getelementptr i32 *%src, i64 -1
- %val = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst
+ %val = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
ret i32 %val
}
@@ -69,7 +69,7 @@ define i32 @f7(i32 %cmp, i32 %swap, i32 *%src) {
; CHECK: csy %r2, %r3, -524288(%r4)
; CHECK: br %r14
%ptr = getelementptr i32 *%src, i64 -131072
- %val = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst
+ %val = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
ret i32 %val
}
@@ -81,7 +81,7 @@ define i32 @f8(i32 %cmp, i32 %swap, i32 *%src) {
; CHECK: cs %r2, %r3, 0(%r4)
; CHECK: br %r14
%ptr = getelementptr i32 *%src, i64 -131073
- %val = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst
+ %val = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
ret i32 %val
}
@@ -93,7 +93,7 @@ define i32 @f9(i32 %cmp, i32 %swap, i64 %src, i64 %index) {
; CHECK: br %r14
%add1 = add i64 %src, %index
%ptr = inttoptr i64 %add1 to i32 *
- %val = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst
+ %val = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
ret i32 %val
}
@@ -106,7 +106,7 @@ define i32 @f10(i32 %cmp, i32 %swap, i64 %src, i64 %index) {
%add1 = add i64 %src, %index
%add2 = add i64 %add1, 4096
%ptr = inttoptr i64 %add2 to i32 *
- %val = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst
+ %val = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
ret i32 %val
}
@@ -116,7 +116,7 @@ define i32 @f11(i32 %dummy, i32 %swap, i32 *%ptr) {
; CHECK: lhi %r2, 1001
; CHECK: cs %r2, %r3, 0(%r4)
; CHECK: br %r14
- %val = cmpxchg i32 *%ptr, i32 1001, i32 %swap seq_cst
+ %val = cmpxchg i32 *%ptr, i32 1001, i32 %swap seq_cst seq_cst
ret i32 %val
}
@@ -126,6 +126,6 @@ define i32 @f12(i32 %cmp, i32 *%ptr) {
; CHECK: lhi [[SWAP:%r[0-9]+]], 1002
; CHECK: cs %r2, [[SWAP]], 0(%r3)
; CHECK: br %r14
- %val = cmpxchg i32 *%ptr, i32 %cmp, i32 1002 seq_cst
+ %val = cmpxchg i32 *%ptr, i32 %cmp, i32 1002 seq_cst seq_cst
ret i32 %val
}
diff --git a/test/CodeGen/SystemZ/cmpxchg-04.ll b/test/CodeGen/SystemZ/cmpxchg-04.ll
index f58868f04f..069bad6514 100644
--- a/test/CodeGen/SystemZ/cmpxchg-04.ll
+++ b/test/CodeGen/SystemZ/cmpxchg-04.ll
@@ -7,7 +7,7 @@ define i64 @f1(i64 %cmp, i64 %swap, i64 *%src) {
; CHECK-LABEL: f1:
; CHECK: csg %r2, %r3, 0(%r4)
; CHECK: br %r14
- %val = cmpxchg i64 *%src, i64 %cmp, i64 %swap seq_cst
+ %val = cmpxchg i64 *%src, i64 %cmp, i64 %swap seq_cst seq_cst
ret i64 %val
}
@@ -17,7 +17,7 @@ define i64 @f2(i64 %cmp, i64 %swap, i64 *%src) {
; CHECK: csg %r2, %r3, 524280(%r4)
; CHECK: br %r14
%ptr = getelementptr i64 *%src, i64 65535
- %val = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst
+ %val = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst
ret i64 %val
}
@@ -29,7 +29,7 @@ define i64 @f3(i64 %cmp, i64 %swap, i64 *%src) {
; CHECK: csg %r2, %r3, 0(%r4)
; CHECK: br %r14
%ptr = getelementptr i64 *%src, i64 65536
- %val = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst
+ %val = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst
ret i64 %val
}
@@ -39,7 +39,7 @@ define i64 @f4(i64 %cmp, i64 %swap, i64 *%src) {
; CHECK: csg %r2, %r3, -8(%r4)
; CHECK: br %r14
%ptr = getelementptr i64 *%src, i64 -1
- %val = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst
+ %val = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst
ret i64 %val
}
@@ -49,7 +49,7 @@ define i64 @f5(i64 %cmp, i64 %swap, i64 *%src) {
; CHECK: csg %r2, %r3, -524288(%r4)
; CHECK: br %r14
%ptr = getelementptr i64 *%src, i64 -65536
- %val = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst
+ %val = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst
ret i64 %val
}
@@ -61,7 +61,7 @@ define i64 @f6(i64 %cmp, i64 %swap, i64 *%src) {
; CHECK: csg %r2, %r3, 0(%r4)
; CHECK: br %r14
%ptr = getelementptr i64 *%src, i64 -65537
- %val = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst
+ %val = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst
ret i64 %val
}
@@ -73,7 +73,7 @@ define i64 @f7(i64 %cmp, i64 %swap, i64 %src, i64 %index) {
; CHECK: br %r14
%add1 = add i64 %src, %index
%ptr = inttoptr i64 %add1 to i64 *
- %val = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst
+ %val = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst
ret i64 %val
}
@@ -83,7 +83,7 @@ define i64 @f8(i64 %dummy, i64 %swap, i64 *%ptr) {
; CHECK: lghi %r2, 1001
; CHECK: csg %r2, %r3, 0(%r4)
; CHECK: br %r14
- %val = cmpxchg i64 *%ptr, i64 1001, i64 %swap seq_cst
+ %val = cmpxchg i64 *%ptr, i64 1001, i64 %swap seq_cst seq_cst
ret i64 %val
}
@@ -93,6 +93,6 @@ define i64 @f9(i64 %cmp, i64 *%ptr) {
; CHECK: lghi [[SWAP:%r[0-9]+]], 1002
; CHECK: csg %r2, [[SWAP]], 0(%r3)
; CHECK: br %r14
- %val = cmpxchg i64 *%ptr, i64 %cmp, i64 1002 seq_cst
+ %val = cmpxchg i64 *%ptr, i64 %cmp, i64 1002 seq_cst seq_cst
ret i64 %val
}
diff --git a/test/CodeGen/X86/2010-10-08-cmpxchg8b.ll b/test/CodeGen/X86/2010-10-08-cmpxchg8b.ll
index 0e4118a2a9..f69cedc4d3 100644
--- a/test/CodeGen/X86/2010-10-08-cmpxchg8b.ll
+++ b/test/CodeGen/X86/2010-10-08-cmpxchg8b.ll
@@ -18,7 +18,7 @@ entry:
loop:
; CHECK: lock
; CHECK-NEXT: cmpxchg8b
- %r = cmpxchg i64* %ptr, i64 0, i64 1 monotonic
+ %r = cmpxchg i64* %ptr, i64 0, i64 1 monotonic monotonic
%stored1 = icmp eq i64 %r, 0
br i1 %stored1, label %loop, label %continue
continue:
diff --git a/test/CodeGen/X86/Atomics-64.ll b/test/CodeGen/X86/Atomics-64.ll
index 8b0a349a8b..c274688504 100644
--- a/test/CodeGen/X86/Atomics-64.ll
+++ b/test/CodeGen/X86/Atomics-64.ll
@@ -704,7 +704,7 @@ entry:
%3 = zext i8 %2 to i32
%4 = trunc i32 %3 to i8
%5 = trunc i32 %1 to i8
- %6 = cmpxchg i8* @sc, i8 %4, i8 %5 monotonic
+ %6 = cmpxchg i8* @sc, i8 %4, i8 %5 monotonic monotonic
store i8 %6, i8* @sc, align 1
%7 = load i8* @sc, align 1
%8 = zext i8 %7 to i32
@@ -712,7 +712,7 @@ entry:
%10 = zext i8 %9 to i32
%11 = trunc i32 %10 to i8
%12 = trunc i32 %8 to i8
- %13 = cmpxchg i8* @uc, i8 %11, i8 %12 monotonic
+ %13 = cmpxchg i8* @uc, i8 %11, i8 %12 monotonic monotonic
store i8 %13, i8* @uc, align 1
%14 = load i8* @sc, align 1
%15 = sext i8 %14 to i16
@@ -722,7 +722,7 @@ entry:
%19 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
%20 = trunc i32 %18 to i16
%21 = trunc i32 %16 to i16
- %22 = cmpxchg i16* %19, i16 %20, i16 %21 monotonic
+ %22 = cmpxchg i16* %19, i16 %20, i16 %21 monotonic monotonic
store i16 %22, i16* @ss, align 2
%23 = load i8* @sc, align 1
%24 = sext i8 %23 to i16
@@ -732,49 +732,49 @@ entry:
%28 = bitcast i8* bitcast (i16* @us to i8*) to i16*
%29 = trunc i32 %27 to i16
%30 = trunc i32 %25 to i16
- %31 = cmpxchg i16* %28, i16 %29, i16 %30 monotonic
+ %31 = cmpxchg i16* %28, i16 %29, i16 %30 monotonic monotonic
store i16 %31, i16* @us, align 2
%32 = load i8* @sc, align 1
%33 = sext i8 %32 to i32
%34 = load i8* @uc, align 1
%35 = zext i8 %34 to i32
%36 = bitcast i8* bitcast (i32* @si to i8*) to i32*
- %37 = cmpxchg i32* %36, i32 %35, i32 %33 monotonic
+ %37 = cmpxchg i32* %36, i32 %35, i32 %33 monotonic monotonic
store i32 %37, i32* @si, align 4
%38 = load i8* @sc, align 1
%39 = sext i8 %38 to i32
%40 = load i8* @uc, align 1
%41 = zext i8 %40 to i32
%42 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
- %43 = cmpxchg i32* %42, i32 %41, i32 %39 monotonic
+ %43 = cmpxchg i32* %42, i32 %41, i32 %39 monotonic monotonic
store i32 %43, i32* @ui, align 4
%44 = load i8* @sc, align 1
%45 = sext i8 %44 to i64
%46 = load i8* @uc, align 1
%47 = zext i8 %46 to i64
%48 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
- %49 = cmpxchg i64* %48, i64 %47, i64 %45 monotonic
+ %49 = cmpxchg i64* %48, i64 %47, i64 %45 monotonic monotonic
store i64 %49, i64* @sl, align 8
%50 = load i8* @sc, align 1
%51 = sext i8 %50 to i64
%52 = load i8* @uc, align 1
%53 = zext i8 %52 to i64
%54 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
- %55 = cmpxchg i64* %54, i64 %53, i64 %51 monotonic
+ %55 = cmpxchg i64* %54, i64 %53, i64 %51 monotonic monotonic
store i64 %55, i64* @ul, align 8
%56 = load i8* @sc, align 1
%57 = sext i8 %56 to i64
%58 = load i8* @uc, align 1
%59 = zext i8 %58 to i64
%60 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
- %61 = cmpxchg i64* %60, i64 %59, i64 %57 monotonic
+ %61 = cmpxchg i64* %60, i64 %59, i64 %57 monotonic monotonic
store i64 %61, i64* @sll, align 8
%62 = load i8* @sc, align 1
%63 = sext i8 %62 to i64
%64 = load i8* @uc, align 1
%65 = zext i8 %64 to i64
%66 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
- %67 = cmpxchg i64* %66, i64 %65, i64 %63 monotonic
+ %67 = cmpxchg i64* %66, i64 %65, i64 %63 monotonic monotonic
store i64 %67, i64* @ull, align 8
%68 = load i8* @sc, align 1
%69 = zext i8 %68 to i32
@@ -782,7 +782,7 @@ entry:
%71 = zext i8 %70 to i32
%72 = trunc i32 %71 to i8
%73 = trunc i32 %69 to i8
- %74 = cmpxchg i8* @sc, i8 %72, i8 %73 monotonic
+ %74 = cmpxchg i8* @sc, i8 %72, i8 %73 monotonic monotonic
%75 = icmp eq i8 %74, %72
%76 = zext i1 %75 to i8
%77 = zext i8 %76 to i32
@@ -793,7 +793,7 @@ entry:
%81 = zext i8 %80 to i32
%82 = trunc i32 %81 to i8
%83 = trunc i32 %79 to i8
- %84 = cmpxchg i8* @uc, i8 %82, i8 %83 monotonic
+ %84 = cmpxchg i8* @uc, i8 %82, i8 %83 monotonic monotonic
%85 = icmp eq i8 %84, %82
%86 = zext i1 %85 to i8
%87 = zext i8 %86 to i32
@@ -805,7 +805,7 @@ entry:
%92 = zext i8 %91 to i32
%93 = trunc i32 %92 to i8
%94 = trunc i32 %90 to i8
- %95 = cmpxchg i8* bitcast (i16* @ss to i8*), i8 %93, i8 %94 monotonic
+ %95 = cmpxchg i8* bitcast (i16* @ss to i8*), i8 %93, i8 %94 monotonic monotonic
%96 = icmp eq i8 %95, %93
%97 = zext i1 %96 to i8
%98 = zext i8 %97 to i32
@@ -817,7 +817,7 @@ entry:
%103 = zext i8 %102 to i32
%104 = trunc i32 %103 to i8
%105 = trunc i32 %101 to i8
- %106 = cmpxchg i8* bitcast (i16* @us to i8*), i8 %104, i8 %105 monotonic
+ %106 = cmpxchg i8* bitcast (i16* @us to i8*), i8 %104, i8 %105 monotonic monotonic
%107 = icmp eq i8 %106, %104
%108 = zext i1 %107 to i8
%109 = zext i8 %108 to i32
@@ -828,7 +828,7 @@ entry:
%113 = zext i8 %112 to i32
%114 = trunc i32 %113 to i8
%115 = trunc i32 %111 to i8
- %116 = cmpxchg i8* bitcast (i32* @si to i8*), i8 %114, i8 %115 monotonic
+ %116 = cmpxchg i8* bitcast (i32* @si to i8*), i8 %114, i8 %115 monotonic monotonic
%117 = icmp eq i8 %116, %114
%118 = zext i1 %117 to i8
%119 = zext i8 %118 to i32
@@ -839,7 +839,7 @@ entry:
%123 = zext i8 %122 to i32
%124 = trunc i32 %123 to i8
%125 = trunc i32 %121 to i8
- %126 = cmpxchg i8* bitcast (i32* @ui to i8*), i8 %124, i8 %125 monotonic
+ %126 = cmpxchg i8* bitcast (i32* @ui to i8*), i8 %124, i8 %125 monotonic monotonic
%127 = icmp eq i8 %126, %124
%128 = zext i1 %127 to i8
%129 = zext i8 %128 to i32
@@ -850,7 +850,7 @@ entry:
%133 = zext i8 %132 to i64
%134 = trunc i64 %133 to i8
%135 = trunc i64 %131 to i8
- %136 = cmpxchg i8* bitcast (i64* @sl to i8*), i8 %134, i8 %135 monotonic
+ %136 = cmpxchg i8* bitcast (i64* @sl to i8*), i8 %134, i8 %135 monotonic monotonic
%137 = icmp eq i8 %136, %134
%138 = zext i1 %137 to i8
%139 = zext i8 %138 to i32
@@ -861,7 +861,7 @@ entry:
%143 = zext i8 %142 to i64
%144 = trunc i64 %143 to i8
%145 = trunc i64 %141 to i8
- %146 = cmpxchg i8* bitcast (i64* @ul to i8*), i8 %144, i8 %145 monotonic
+ %146 = cmpxchg i8* bitcast (i64* @ul to i8*), i8 %144, i8 %145 monotonic monotonic
%147 = icmp eq i8 %146, %144
%148 = zext i1 %147 to i8
%149 = zext i8 %148 to i32
@@ -872,7 +872,7 @@ entry:
%153 = zext i8 %152 to i64
%154 = trunc i64 %153 to i8
%155 = trunc i64 %151 to i8
- %156 = cmpxchg i8* bitcast (i64* @sll to i8*), i8 %154, i8 %155 monotonic
+ %156 = cmpxchg i8* bitcast (i64* @sll to i8*), i8 %154, i8 %155 monotonic monotonic
%157 = icmp eq i8 %156, %154
%158 = zext i1 %157 to i8
%159 = zext i8 %158 to i32
@@ -883,7 +883,7 @@ entry:
%163 = zext i8 %162 to i64
%164 = trunc i64 %163 to i8
%165 = trunc i64 %161 to i8
- %166 = cmpxchg i8* bitcast (i64* @ull to i8*), i8 %164, i8 %165 monotonic
+ %166 = cmpxchg i8* bitcast (i64* @ull to i8*), i8 %164, i8 %165 monotonic monotonic
%167 = icmp eq i8 %166, %164
%168 = zext i1 %167 to i8
%169 = zext i8 %168 to i32
diff --git a/test/CodeGen/X86/atomic16.ll b/test/CodeGen/X86/atomic16.ll
index ec2887e29f..45d3ff46a0 100644
--- a/test/CodeGen/X86/atomic16.ll
+++ b/test/CodeGen/X86/atomic16.ll
@@ -217,7 +217,7 @@ define void @atomic_fetch_umin16(i16 %x) nounwind {
}
define void @atomic_fetch_cmpxchg16() nounwind {
- %t1 = cmpxchg i16* @sc16, i16 0, i16 1 acquire
+ %t1 = cmpxchg i16* @sc16, i16 0, i16 1 acquire acquire
; X64: lock
; X64: cmpxchgw
; X32: lock
diff --git a/test/CodeGen/X86/atomic32.ll b/test/CodeGen/X86/atomic32.ll
index 3cb9ca1c76..474c0e6a98 100644
--- a/test/CodeGen/X86/atomic32.ll
+++ b/test/CodeGen/X86/atomic32.ll
@@ -243,7 +243,7 @@ define void @atomic_fetch_umin32(i32 %x) nounwind {
}
define void @atomic_fetch_cmpxchg32() nounwind {
- %t1 = cmpxchg i32* @sc32, i32 0, i32 1 acquire
+ %t1 = cmpxchg i32* @sc32, i32 0, i32 1 acquire acquire
; X64: lock
; X64: cmpxchgl
; X32: lock
diff --git a/test/CodeGen/X86/atomic64.ll b/test/CodeGen/X86/atomic64.ll
index aa00045575..4f55edc056 100644
--- a/test/CodeGen/X86/atomic64.ll
+++ b/test/CodeGen/X86/atomic64.ll
@@ -183,7 +183,7 @@ define void @atomic_fetch_umin64(i64 %x) nounwind {
}
define void @atomic_fetch_cmpxchg64() nounwind {
- %t1 = cmpxchg i64* @sc64, i64 0, i64 1 acquire
+ %t1 = cmpxchg i64* @sc64, i64 0, i64 1 acquire acquire
; X64: lock
; X64: cmpxchgq
; X32: lock
diff --git a/test/CodeGen/X86/atomic6432.ll b/test/CodeGen/X86/atomic6432.ll
index 31e66c876e..c0f7267abe 100644
--- a/test/CodeGen/X86/atomic6432.ll
+++ b/test/CodeGen/X86/atomic6432.ll
@@ -184,7 +184,7 @@ define void @atomic_fetch_umin64(i64 %x) nounwind {
}
define void @atomic_fetch_cmpxchg64() nounwind {
- %t1 = cmpxchg i64* @sc64, i64 0, i64 1 acquire
+ %t1 = cmpxchg i64* @sc64, i64 0, i64 1 acquire acquire
; X32: lock
; X32: cmpxchg8b
ret void
diff --git a/test/CodeGen/X86/atomic8.ll b/test/CodeGen/X86/atomic8.ll
index 3278ed1f50..203b26f0ab 100644
--- a/test/CodeGen/X86/atomic8.ll
+++ b/test/CodeGen/X86/atomic8.ll
@@ -217,7 +217,7 @@ define void @atomic_fetch_umin8(i8 %x) nounwind {
}
define void @atomic_fetch_cmpxchg8() nounwind {
- %t1 = cmpxchg i8* @sc8, i8 0, i8 1 acquire
+ %t1 = cmpxchg i8* @sc8, i8 0, i8 1 acquire acquire
; X64: lock
; X64: cmpxchgb
; X32: lock
diff --git a/test/CodeGen/X86/atomic_op.ll b/test/CodeGen/X86/atomic_op.ll
index a378d6e8d6..b3045ed645 100644
--- a/test/CodeGen/X86/atomic_op.ll
+++ b/test/CodeGen/X86/atomic_op.ll
@@ -101,11 +101,11 @@ entry:
%neg1 = sub i32 0, 10 ; <i32> [#uses=1]
; CHECK: lock
; CHECK: cmpxchgl
- %16 = cmpxchg i32* %val2, i32 %neg1, i32 1 monotonic
+ %16 = cmpxchg i32* %val2, i32 %neg1, i32 1 monotonic monotonic
store i32 %16, i32* %old
; CHECK: lock
; CHECK: cmpxchgl
- %17 = cmpxchg i32* %val2, i32 1976, i32 1 monotonic
+ %17 = cmpxchg i32* %val2, i32 1976, i32 1 monotonic monotonic
store i32 %17, i32* %old
; CHECK: movl [[R17atomic:.*]], %eax
; CHECK: movl $1401, %[[R17mask:[a-z]*]]
@@ -133,6 +133,6 @@ entry:
; CHECK: lock
; CHECK: cmpxchgl %{{.*}}, %gs:(%{{.*}})
- %0 = cmpxchg i32 addrspace(256)* %P, i32 0, i32 1 monotonic
+ %0 = cmpxchg i32 addrspace(256)* %P, i32 0, i32 1 monotonic monotonic
ret void
}
diff --git a/test/CodeGen/X86/cmpxchg16b.ll b/test/CodeGen/X86/cmpxchg16b.ll
index edbd0bc9de..1d5bb85f8d 100644
--- a/test/CodeGen/X86/cmpxchg16b.ll
+++ b/test/CodeGen/X86/cmpxchg16b.ll
@@ -6,7 +6,7 @@ entry:
; CHECK: movl $1, %ebx
; CHECK: lock
; CHECK-NEXT: cmpxchg16b
- %r = cmpxchg i128* %p, i128 0, i128 1 seq_cst
+ %r = cmpxchg i128* %p, i128 0, i128 1 seq_cst seq_cst
ret void
}
diff --git a/test/CodeGen/X86/coalescer-remat.ll b/test/CodeGen/X86/coalescer-remat.ll
index eb7b7a8738..468b70bdc8 100644
--- a/test/CodeGen/X86/coalescer-remat.ll
+++ b/test/CodeGen/X86/coalescer-remat.ll
@@ -5,7 +5,7 @@
define i32 @main() nounwind {
entry:
- %0 = cmpxchg i64* @val, i64 0, i64 1 monotonic
+ %0 = cmpxchg i64* @val, i64 0, i64 1 monotonic monotonic
%1 = tail call i32 (i8*, ...)* @printf(i8* getelementptr ([7 x i8]* @"\01LC", i32 0, i64 0), i64 %0) nounwind
ret i32 0
}
diff --git a/test/CodeGen/X86/nocx16.ll b/test/CodeGen/X86/nocx16.ll
index cceaac4712..8b995dafa7 100644
--- a/test/CodeGen/X86/nocx16.ll
+++ b/test/CodeGen/X86/nocx16.ll
@@ -2,7 +2,7 @@
define void @test(i128* %a) nounwind {
entry:
; CHECK: __sync_val_compare_and_swap_16
- %0 = cmpxchg i128* %a, i128 1, i128 1 seq_cst
+ %0 = cmpxchg i128* %a, i128 1, i128 1 seq_cst seq_cst
; CHECK: __sync_lock_test_and_set_16
%1 = atomicrmw xchg i128* %a, i128 1 seq_cst
; CHECK: __sync_fetch_and_add_16
diff --git a/test/Instrumentation/AddressSanitizer/test64.ll b/test/Instrumentation/AddressSanitizer/test64.ll
index 5b30fae714..6390644422 100644
--- a/test/Instrumentation/AddressSanitizer/test64.ll
+++ b/test/Instrumentation/AddressSanitizer/test64.ll
@@ -26,7 +26,7 @@ entry:
define void @example_cmpxchg(i64* %ptr, i64 %compare_to, i64 %new_value) nounwind uwtable sanitize_address {
entry:
- %0 = cmpxchg i64* %ptr, i64 %compare_to, i64 %new_value seq_cst
+ %0 = cmpxchg i64* %ptr, i64 %compare_to, i64 %new_value seq_cst seq_cst
ret void
}
diff --git a/test/Instrumentation/MemorySanitizer/atomics.ll b/test/Instrumentation/MemorySanitizer/atomics.ll
index ff0245262c..98697d7038 100644
--- a/test/Instrumentation/MemorySanitizer/atomics.ll
+++ b/test/Instrumentation/MemorySanitizer/atomics.ll
@@ -37,7 +37,7 @@ entry:
define i32 @Cmpxchg(i32* %p, i32 %a, i32 %b) sanitize_memory {
entry:
- %0 = cmpxchg i32* %p, i32 %a, i32 %b seq_cst
+ %0 = cmpxchg i32* %p, i32 %a, i32 %b seq_cst seq_cst
ret i32 %0
}
@@ -46,16 +46,16 @@ entry:
; CHECK: icmp
; CHECK: br
; CHECK: @__msan_warning
-; CHECK: cmpxchg {{.*}} seq_cst
+; CHECK: cmpxchg {{.*}} seq_cst seq_cst
; CHECK: store i32 0, {{.*}} @__msan_retval_tls
; CHECK: ret i32
-; relaxed cmpxchg: bump up to "release"
+; relaxed cmpxchg: bump up to "release monotonic"
define i32 @CmpxchgMonotonic(i32* %p, i32 %a, i32 %b) sanitize_memory {
entry:
- %0 = cmpxchg i32* %p, i32 %a, i32 %b monotonic
+ %0 = cmpxchg i32* %p, i32 %a, i32 %b monotonic monotonic
ret i32 %0
}
@@ -64,7 +64,7 @@ entry:
; CHECK: icmp
; CHECK: br
; CHECK: @__msan_warning
-; CHECK: cmpxchg {{.*}} release
+; CHECK: cmpxchg {{.*}} release monotonic
; CHECK: store i32 0, {{.*}} @__msan_retval_tls
; CHECK: ret i32
diff --git a/test/Instrumentation/ThreadSanitizer/atomic.ll b/test/Instrumentation/ThreadSanitizer/atomic.ll
index 70b6cbbf31..e40268f97b 100644
--- a/test/Instrumentation/ThreadSanitizer/atomic.ll
+++ b/test/Instrumentation/ThreadSanitizer/atomic.ll
@@ -348,7 +348,7 @@ entry:
define void @atomic8_cas_monotonic(i8* %a) nounwind uwtable {
entry:
- cmpxchg i8* %a, i8 0, i8 1 monotonic
+ cmpxchg i8* %a, i8 0, i8 1 monotonic monotonic
ret void
}
; CHECK: atomic8_cas_monotonic
@@ -356,7 +356,7 @@ entry:
define void @atomic8_cas_acquire(i8* %a) nounwind uwtable {
entry:
- cmpxchg i8* %a, i8 0, i8 1 acquire
+ cmpxchg i8* %a, i8 0, i8 1 acquire acquire
ret void
}
; CHECK: atomic8_cas_acquire
@@ -364,7 +364,7 @@ entry:
define void @atomic8_cas_release(i8* %a) nounwind uwtable {
entry:
- cmpxchg i8* %a, i8 0, i8 1 release
+ cmpxchg i8* %a, i8 0, i8 1 release monotonic
ret void
}
; CHECK: atomic8_cas_release
@@ -372,7 +372,7 @@ entry:
define void @atomic8_cas_acq_rel(i8* %a) nounwind uwtable {
entry:
- cmpxchg i8* %a, i8 0, i8 1 acq_rel
+ cmpxchg i8* %a, i8 0, i8 1 acq_rel acquire
ret void
}
; CHECK: atomic8_cas_acq_rel
@@ -380,7 +380,7 @@ entry:
define void @atomic8_cas_seq_cst(i8* %a) nounwind uwtable {
entry:
- cmpxchg i8* %a, i8 0, i8 1 seq_cst
+ cmpxchg i8* %a, i8 0, i8 1 seq_cst seq_cst
ret void
}
; CHECK: atomic8_cas_seq_cst
@@ -732,7 +732,7 @@ entry:
define void @atomic16_cas_monotonic(i16* %a) nounwind uwtable {
entry:
- cmpxchg i16* %a, i16 0, i16 1 monotonic
+ cmpxchg i16* %a, i16 0, i16 1 monotonic monotonic
ret void
}
; CHECK: atomic16_cas_monotonic
@@ -740,7 +740,7 @@ entry:
define void @atomic16_cas_acquire(i16* %a) nounwind uwtable {
entry:
- cmpxchg i16* %a, i16 0, i16 1 acquire
+ cmpxchg i16* %a, i16 0, i16 1 acquire acquire
ret void
}
; CHECK: atomic16_cas_acquire
@@ -748,7 +748,7 @@ entry:
define void @atomic16_cas_release(i16* %a) nounwind uwtable {
entry:
- cmpxchg i16* %a, i16 0, i16 1 release
+ cmpxchg i16* %a, i16 0, i16 1 release monotonic
ret void
}
; CHECK: atomic16_cas_release
@@ -756,7 +756,7 @@ entry:
define void @atomic16_cas_acq_rel(i16* %a) nounwind uwtable {
entry:
- cmpxchg i16* %a, i16 0, i16 1 acq_rel
+ cmpxchg i16* %a, i16 0, i16 1 acq_rel acquire
ret void
}
; CHECK: atomic16_cas_acq_rel
@@ -764,7 +764,7 @@ entry:
define void @atomic16_cas_seq_cst(i16* %a) nounwind uwtable {
entry:
- cmpxchg i16* %a, i16 0, i16 1 seq_cst
+ cmpxchg i16* %a, i16 0, i16 1 seq_cst seq_cst
ret void
}
; CHECK: atomic16_cas_seq_cst
@@ -1116,7 +1116,7 @@ entry:
define void @atomic32_cas_monotonic(i32* %a) nounwind uwtable {
entry:
- cmpxchg i32* %a, i32 0, i32 1 monotonic
+ cmpxchg i32* %a, i32 0, i32 1 monotonic monotonic
ret void
}
; CHECK: atomic32_cas_monotonic
@@ -1124,7 +1124,7 @@ entry:
define void @atomic32_cas_acquire(i32* %a) nounwind uwtable {
entry:
- cmpxchg i32* %a, i32 0, i32 1 acquire
+ cmpxchg i32* %a, i32 0, i32 1 acquire acquire
ret void
}
; CHECK: atomic32_cas_acquire
@@ -1132,7 +1132,7 @@ entry:
define void @atomic32_cas_release(i32* %a) nounwind uwtable {
entry:
- cmpxchg i32* %a, i32 0, i32 1 release
+ cmpxchg i32* %a, i32 0, i32 1 release monotonic
ret void
}
; CHECK: atomic32_cas_release
@@ -1140,7 +1140,7 @@ entry:
define void @atomic32_cas_acq_rel(i32* %a) nounwind uwtable {
entry:
- cmpxchg i32* %a, i32 0, i32 1 acq_rel
+ cmpxchg i32* %a, i32 0, i32 1 acq_rel acquire
ret void
}
; CHECK: atomic32_cas_acq_rel
@@ -1148,7 +1148,7 @@ entry:
define void @atomic32_cas_seq_cst(i32* %a) nounwind uwtable {
entry:
- cmpxchg i32* %a, i32 0, i32 1 seq_cst
+ cmpxchg i32* %a, i32 0, i32 1 seq_cst seq_cst
ret void
}
; CHECK: atomic32_cas_seq_cst
@@ -1500,7 +1500,7 @@ entry:
define void @atomic64_cas_monotonic(i64* %a) nounwind uwtable {
entry:
- cmpxchg i64* %a, i64 0, i64 1 monotonic
+ cmpxchg i64* %a, i64 0, i64 1 monotonic monotonic
ret void
}
; CHECK: atomic64_cas_monotonic
@@ -1508,7 +1508,7 @@ entry:
define void @atomic64_cas_acquire(i64* %a) nounwind uwtable {
entry:
- cmpxchg i64* %a, i64 0, i64 1 acquire
+ cmpxchg i64* %a, i64 0, i64 1 acquire acquire
ret void
}
; CHECK: atomic64_cas_acquire
@@ -1516,7 +1516,7 @@ entry:
define void @atomic64_cas_release(i64* %a) nounwind uwtable {
entry:
- cmpxchg i64* %a, i64 0, i64 1 release
+ cmpxchg i64* %a, i64 0, i64 1 release monotonic
ret void
}
; CHECK: atomic64_cas_release
@@ -1524,7 +1524,7 @@ entry:
define void @atomic64_cas_acq_rel(i64* %a) nounwind uwtable {
entry:
- cmpxchg i64* %a, i64 0, i64 1 acq_rel
+ cmpxchg i64* %a, i64 0, i64 1 acq_rel acquire
ret void
}
; CHECK: atomic64_cas_acq_rel
@@ -1532,7 +1532,7 @@ entry:
define void @atomic64_cas_seq_cst(i64* %a) nounwind uwtable {
entry:
- cmpxchg i64* %a, i64 0, i64 1 seq_cst
+ cmpxchg i64* %a, i64 0, i64 1 seq_cst seq_cst
ret void
}
; CHECK: atomic64_cas_seq_cst
@@ -1884,7 +1884,7 @@ entry:
define void @atomic128_cas_monotonic(i128* %a) nounwind uwtable {
entry:
- cmpxchg i128* %a, i128 0, i128 1 monotonic
+ cmpxchg i128* %a, i128 0, i128 1 monotonic monotonic
ret void
}
; CHECK: atomic128_cas_monotonic
@@ -1892,7 +1892,7 @@ entry:
define void @atomic128_cas_acquire(i128* %a) nounwind uwtable {
entry:
- cmpxchg i128* %a, i128 0, i128 1 acquire
+ cmpxchg i128* %a, i128 0, i128 1 acquire acquire
ret void
}
; CHECK: atomic128_cas_acquire
@@ -1900,7 +1900,7 @@ entry:
define void @atomic128_cas_release(i128* %a) nounwind uwtable {
entry:
- cmpxchg i128* %a, i128 0, i128 1 release
+ cmpxchg i128* %a, i128 0, i128 1 release monotonic
ret void
}
; CHECK: atomic128_cas_release
@@ -1908,7 +1908,7 @@ entry:
define void @atomic128_cas_acq_rel(i128* %a) nounwind uwtable {
entry:
- cmpxchg i128* %a, i128 0, i128 1 acq_rel
+ cmpxchg i128* %a, i128 0, i128 1 acq_rel acquire
ret void
}
; CHECK: atomic128_cas_acq_rel
@@ -1916,7 +1916,7 @@ entry:
define void @atomic128_cas_seq_cst(i128* %a) nounwind uwtable {
entry:
- cmpxchg i128* %a, i128 0, i128 1 seq_cst
+ cmpxchg i128* %a, i128 0, i128 1 seq_cst seq_cst
ret void
}
; CHECK: atomic128_cas_seq_cst
diff --git a/test/Transforms/LowerAtomic/atomic-swap.ll b/test/Transforms/LowerAtomic/atomic-swap.ll
index 4331677764..c319834b62 100644
--- a/test/Transforms/LowerAtomic/atomic-swap.ll
+++ b/test/Transforms/LowerAtomic/atomic-swap.ll
@@ -3,7 +3,7 @@
define i8 @cmpswap() {
; CHECK-LABEL: @cmpswap(
%i = alloca i8
- %j = cmpxchg i8* %i, i8 0, i8 42 monotonic
+ %j = cmpxchg i8* %i, i8 0, i8 42 monotonic monotonic
; CHECK: [[INST:%[a-z0-9]+]] = load
; CHECK-NEXT: icmp
; CHECK-NEXT: select
diff --git a/test/Transforms/SimplifyCFG/trapping-load-unreachable.ll b/test/Transforms/SimplifyCFG/trapping-load-unreachable.ll
index e9d93e834a..5ae62af545 100644
--- a/test/Transforms/SimplifyCFG/trapping-load-unreachable.ll
+++ b/test/Transforms/SimplifyCFG/trapping-load-unreachable.ll
@@ -65,7 +65,7 @@ define void @test5(i1 %C, i32* %P) {
entry:
br i1 %C, label %T, label %F
T:
- cmpxchg volatile i32* %P, i32 0, i32 1 seq_cst
+ cmpxchg volatile i32* %P, i32 0, i32 1 seq_cst seq_cst
unreachable
F:
ret void