summaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
Diffstat (limited to 'test')
-rw-r--r--test/CodeGen/PowerPC/2010-03-09-indirect-call.ll6
-rw-r--r--test/CodeGen/Thumb2/thumb2-cmn.ll22
-rw-r--r--test/CodeGen/Thumb2/thumb2-cmp.ll18
-rw-r--r--test/CodeGen/Thumb2/thumb2-cmp2.ll18
-rw-r--r--test/CodeGen/Thumb2/thumb2-teq.ll16
-rw-r--r--test/CodeGen/Thumb2/thumb2-teq2.ll18
-rw-r--r--test/CodeGen/Thumb2/thumb2-tst.ll16
-rw-r--r--test/CodeGen/Thumb2/thumb2-tst2.ll18
-rw-r--r--test/CodeGen/X86/add.ll10
-rw-r--r--test/CodeGen/X86/andimm8.ll2
-rw-r--r--test/CodeGen/X86/avx-minmax.ll2
-rw-r--r--test/CodeGen/X86/coalescer-commute2.ll2
-rw-r--r--test/CodeGen/X86/dbg-value-range.ll1
-rw-r--r--test/CodeGen/X86/phys_subreg_coalesce-3.ll6
-rw-r--r--test/CodeGen/X86/pmul.ll4
-rw-r--r--test/CodeGen/X86/sse-minmax.ll285
-rw-r--r--test/CodeGen/X86/win64_alloca_dynalloca.ll15
17 files changed, 227 insertions, 232 deletions
diff --git a/test/CodeGen/PowerPC/2010-03-09-indirect-call.ll b/test/CodeGen/PowerPC/2010-03-09-indirect-call.ll
index 6b31397138..0003a17c22 100644
--- a/test/CodeGen/PowerPC/2010-03-09-indirect-call.ll
+++ b/test/CodeGen/PowerPC/2010-03-09-indirect-call.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=ppc32 -mcpu=g5 -mtriple=powerpc-apple-darwin10.0 -join-physregs | FileCheck %s
+; RUN: llc < %s -march=ppc32 -mcpu=g5 -mtriple=powerpc-apple-darwin10.0 | FileCheck %s
; ModuleID = 'nn.c'
target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f128:64:128"
target triple = "powerpc-apple-darwin11.0"
@@ -9,7 +9,9 @@ target triple = "powerpc-apple-darwin11.0"
define void @foo() nounwind ssp {
entry:
-; CHECK: mtctr r12
+; Better: mtctr r12
+; CHECK: mr r12, [[REG:r[0-9]+]]
+; CHECK: mtctr [[REG]]
%0 = load void (...)** @p, align 4 ; <void (...)*> [#uses=1]
call void (...)* %0() nounwind
br label %return
diff --git a/test/CodeGen/Thumb2/thumb2-cmn.ll b/test/CodeGen/Thumb2/thumb2-cmn.ll
index 21bbd269ca..67b07e63fc 100644
--- a/test/CodeGen/Thumb2/thumb2-cmn.ll
+++ b/test/CodeGen/Thumb2/thumb2-cmn.ll
@@ -1,7 +1,7 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 -join-physregs | FileCheck %s
+; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-; These tests implicitly depend on 'movs r0, #0' being rematerialized below the
-; test as 'mov.w r0, #0'. So far, that requires physreg joining.
+; These tests could be improved by 'movs r0, #0' being rematerialized below the
+; test as 'mov.w r0, #0'.
define i1 @f1(i32 %a, i32 %b) {
%nb = sub i32 0, %b
@@ -9,7 +9,7 @@ define i1 @f1(i32 %a, i32 %b) {
ret i1 %tmp
}
; CHECK: f1:
-; CHECK: cmn r0, r1
+; CHECK: cmn {{.*}}, r1
define i1 @f2(i32 %a, i32 %b) {
%nb = sub i32 0, %b
@@ -17,7 +17,7 @@ define i1 @f2(i32 %a, i32 %b) {
ret i1 %tmp
}
; CHECK: f2:
-; CHECK: cmn r0, r1
+; CHECK: cmn {{.*}}, r1
define i1 @f3(i32 %a, i32 %b) {
%nb = sub i32 0, %b
@@ -25,7 +25,7 @@ define i1 @f3(i32 %a, i32 %b) {
ret i1 %tmp
}
; CHECK: f3:
-; CHECK: cmn r0, r1
+; CHECK: cmn {{.*}}, r1
define i1 @f4(i32 %a, i32 %b) {
%nb = sub i32 0, %b
@@ -33,7 +33,7 @@ define i1 @f4(i32 %a, i32 %b) {
ret i1 %tmp
}
; CHECK: f4:
-; CHECK: cmn r0, r1
+; CHECK: cmn {{.*}}, r1
define i1 @f5(i32 %a, i32 %b) {
%tmp = shl i32 %b, 5
@@ -42,7 +42,7 @@ define i1 @f5(i32 %a, i32 %b) {
ret i1 %tmp1
}
; CHECK: f5:
-; CHECK: cmn.w r0, r1, lsl #5
+; CHECK: cmn.w {{.*}}, r1, lsl #5
define i1 @f6(i32 %a, i32 %b) {
%tmp = lshr i32 %b, 6
@@ -51,7 +51,7 @@ define i1 @f6(i32 %a, i32 %b) {
ret i1 %tmp1
}
; CHECK: f6:
-; CHECK: cmn.w r0, r1, lsr #6
+; CHECK: cmn.w {{.*}}, r1, lsr #6
define i1 @f7(i32 %a, i32 %b) {
%tmp = ashr i32 %b, 7
@@ -60,7 +60,7 @@ define i1 @f7(i32 %a, i32 %b) {
ret i1 %tmp1
}
; CHECK: f7:
-; CHECK: cmn.w r0, r1, asr #7
+; CHECK: cmn.w {{.*}}, r1, asr #7
define i1 @f8(i32 %a, i32 %b) {
%l8 = shl i32 %a, 24
@@ -71,7 +71,7 @@ define i1 @f8(i32 %a, i32 %b) {
ret i1 %tmp1
}
; CHECK: f8:
-; CHECK: cmn.w r0, r0, ror #8
+; CHECK: cmn.w {{.*}}, {{.*}}, ror #8
define void @f9(i32 %a, i32 %b) nounwind optsize {
diff --git a/test/CodeGen/Thumb2/thumb2-cmp.ll b/test/CodeGen/Thumb2/thumb2-cmp.ll
index da121140be..4ce7acc22e 100644
--- a/test/CodeGen/Thumb2/thumb2-cmp.ll
+++ b/test/CodeGen/Thumb2/thumb2-cmp.ll
@@ -1,12 +1,12 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 -join-physregs | FileCheck %s
+; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-; These tests implicitly depend on 'movs r0, #0' being rematerialized below the
-; test as 'mov.w r0, #0'. So far, that requires physreg joining.
+; These tests would be improved by 'movs r0, #0' being rematerialized below the
+; test as 'mov.w r0, #0'.
; 0x000000bb = 187
define i1 @f1(i32 %a) {
; CHECK: f1:
-; CHECK: cmp r0, #187
+; CHECK: cmp {{.*}}, #187
%tmp = icmp ne i32 %a, 187
ret i1 %tmp
}
@@ -14,7 +14,7 @@ define i1 @f1(i32 %a) {
; 0x00aa00aa = 11141290
define i1 @f2(i32 %a) {
; CHECK: f2:
-; CHECK: cmp.w r0, #11141290
+; CHECK: cmp.w {{.*}}, #11141290
%tmp = icmp eq i32 %a, 11141290
ret i1 %tmp
}
@@ -22,7 +22,7 @@ define i1 @f2(i32 %a) {
; 0xcc00cc00 = 3422604288
define i1 @f3(i32 %a) {
; CHECK: f3:
-; CHECK: cmp.w r0, #-872363008
+; CHECK: cmp.w {{.*}}, #-872363008
%tmp = icmp ne i32 %a, 3422604288
ret i1 %tmp
}
@@ -30,7 +30,7 @@ define i1 @f3(i32 %a) {
; 0xdddddddd = 3722304989
define i1 @f4(i32 %a) {
; CHECK: f4:
-; CHECK: cmp.w r0, #-572662307
+; CHECK: cmp.w {{.*}}, #-572662307
%tmp = icmp ne i32 %a, 3722304989
ret i1 %tmp
}
@@ -38,7 +38,7 @@ define i1 @f4(i32 %a) {
; 0x00110000 = 1114112
define i1 @f5(i32 %a) {
; CHECK: f5:
-; CHECK: cmp.w r0, #1114112
+; CHECK: cmp.w {{.*}}, #1114112
%tmp = icmp eq i32 %a, 1114112
ret i1 %tmp
}
@@ -46,7 +46,7 @@ define i1 @f5(i32 %a) {
; Check that we don't do an invalid (a > b) --> !(a < b + 1) transform.
;
; CHECK: f6:
-; CHECK-NOT: cmp.w r0, #-2147483648
+; CHECK-NOT: cmp.w {{.*}}, #-2147483648
; CHECK: bx lr
define i32 @f6(i32 %a) {
%tmp = icmp sgt i32 %a, 2147483647
diff --git a/test/CodeGen/Thumb2/thumb2-cmp2.ll b/test/CodeGen/Thumb2/thumb2-cmp2.ll
index 15052e0067..f6790deb1f 100644
--- a/test/CodeGen/Thumb2/thumb2-cmp2.ll
+++ b/test/CodeGen/Thumb2/thumb2-cmp2.ll
@@ -1,25 +1,25 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 -join-physregs | FileCheck %s
+; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-; These tests implicitly depend on 'movs r0, #0' being rematerialized below the
-; test as 'mov.w r0, #0'. So far, that requires physreg joining.
+; These tests would be improved by 'movs r0, #0' being rematerialized below the
+; test as 'mov.w r0, #0'.
define i1 @f1(i32 %a, i32 %b) {
; CHECK: f1:
-; CHECK: cmp r0, r1
+; CHECK: cmp {{.*}}, r1
%tmp = icmp ne i32 %a, %b
ret i1 %tmp
}
define i1 @f2(i32 %a, i32 %b) {
; CHECK: f2:
-; CHECK: cmp r0, r1
+; CHECK: cmp {{.*}}, r1
%tmp = icmp eq i32 %a, %b
ret i1 %tmp
}
define i1 @f6(i32 %a, i32 %b) {
; CHECK: f6:
-; CHECK: cmp.w r0, r1, lsl #5
+; CHECK: cmp.w {{.*}}, r1, lsl #5
%tmp = shl i32 %b, 5
%tmp1 = icmp eq i32 %tmp, %a
ret i1 %tmp1
@@ -27,7 +27,7 @@ define i1 @f6(i32 %a, i32 %b) {
define i1 @f7(i32 %a, i32 %b) {
; CHECK: f7:
-; CHECK: cmp.w r0, r1, lsr #6
+; CHECK: cmp.w {{.*}}, r1, lsr #6
%tmp = lshr i32 %b, 6
%tmp1 = icmp ne i32 %tmp, %a
ret i1 %tmp1
@@ -35,7 +35,7 @@ define i1 @f7(i32 %a, i32 %b) {
define i1 @f8(i32 %a, i32 %b) {
; CHECK: f8:
-; CHECK: cmp.w r0, r1, asr #7
+; CHECK: cmp.w {{.*}}, r1, asr #7
%tmp = ashr i32 %b, 7
%tmp1 = icmp eq i32 %a, %tmp
ret i1 %tmp1
@@ -43,7 +43,7 @@ define i1 @f8(i32 %a, i32 %b) {
define i1 @f9(i32 %a, i32 %b) {
; CHECK: f9:
-; CHECK: cmp.w r0, r0, ror #8
+; CHECK: cmp.w {{.*}}, {{.*}}, ror #8
%l8 = shl i32 %a, 24
%r8 = lshr i32 %a, 8
%tmp = or i32 %l8, %r8
diff --git a/test/CodeGen/Thumb2/thumb2-teq.ll b/test/CodeGen/Thumb2/thumb2-teq.ll
index 00c928fc07..d453f469ab 100644
--- a/test/CodeGen/Thumb2/thumb2-teq.ll
+++ b/test/CodeGen/Thumb2/thumb2-teq.ll
@@ -1,7 +1,7 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 -join-physregs | FileCheck %s
+; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-; These tests implicitly depend on 'movs r0, #0' being rematerialized below the
-; test as 'mov.w r0, #0'. So far, that requires physreg joining.
+; These tests would be improved by 'movs r0, #0' being rematerialized below the
+; test as 'mov.w r0, #0'.
; 0x000000bb = 187
define i1 @f2(i32 %a) {
@@ -10,7 +10,7 @@ define i1 @f2(i32 %a) {
ret i1 %tmp1
}
; CHECK: f2:
-; CHECK: teq.w r0, #187
+; CHECK: teq.w {{.*}}, #187
; 0x00aa00aa = 11141290
define i1 @f3(i32 %a) {
@@ -19,7 +19,7 @@ define i1 @f3(i32 %a) {
ret i1 %tmp1
}
; CHECK: f3:
-; CHECK: teq.w r0, #11141290
+; CHECK: teq.w {{.*}}, #11141290
; 0xcc00cc00 = 3422604288
define i1 @f6(i32 %a) {
@@ -28,7 +28,7 @@ define i1 @f6(i32 %a) {
ret i1 %tmp1
}
; CHECK: f6:
-; CHECK: teq.w r0, #-872363008
+; CHECK: teq.w {{.*}}, #-872363008
; 0xdddddddd = 3722304989
define i1 @f7(i32 %a) {
@@ -37,7 +37,7 @@ define i1 @f7(i32 %a) {
ret i1 %tmp1
}
; CHECK: f7:
-; CHECK: teq.w r0, #-572662307
+; CHECK: teq.w {{.*}}, #-572662307
; 0xdddddddd = 3722304989
define i1 @f8(i32 %a) {
@@ -53,5 +53,5 @@ define i1 @f10(i32 %a) {
ret i1 %tmp1
}
; CHECK: f10:
-; CHECK: teq.w r0, #1114112
+; CHECK: teq.w {{.*}}, #1114112
diff --git a/test/CodeGen/Thumb2/thumb2-teq2.ll b/test/CodeGen/Thumb2/thumb2-teq2.ll
index 8acae9090f..27ecad8393 100644
--- a/test/CodeGen/Thumb2/thumb2-teq2.ll
+++ b/test/CodeGen/Thumb2/thumb2-teq2.ll
@@ -1,11 +1,11 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 -join-physregs | FileCheck %s
+; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-; These tests implicitly depend on 'movs r0, #0' being rematerialized below the
-; tst as 'mov.w r0, #0'. So far, that requires physreg joining.
+; These tests would be improved by 'movs r0, #0' being rematerialized below the
+; tst as 'mov.w r0, #0'.
define i1 @f2(i32 %a, i32 %b) {
; CHECK: f2
-; CHECK: teq.w r0, r1
+; CHECK: teq.w {{.*}}, r1
%tmp = xor i32 %a, %b
%tmp1 = icmp eq i32 %tmp, 0
ret i1 %tmp1
@@ -13,7 +13,7 @@ define i1 @f2(i32 %a, i32 %b) {
define i1 @f4(i32 %a, i32 %b) {
; CHECK: f4
-; CHECK: teq.w r0, r1
+; CHECK: teq.w {{.*}}, r1
%tmp = xor i32 %a, %b
%tmp1 = icmp eq i32 0, %tmp
ret i1 %tmp1
@@ -21,7 +21,7 @@ define i1 @f4(i32 %a, i32 %b) {
define i1 @f6(i32 %a, i32 %b) {
; CHECK: f6
-; CHECK: teq.w r0, r1, lsl #5
+; CHECK: teq.w {{.*}}, r1, lsl #5
%tmp = shl i32 %b, 5
%tmp1 = xor i32 %a, %tmp
%tmp2 = icmp eq i32 %tmp1, 0
@@ -30,7 +30,7 @@ define i1 @f6(i32 %a, i32 %b) {
define i1 @f7(i32 %a, i32 %b) {
; CHECK: f7
-; CHECK: teq.w r0, r1, lsr #6
+; CHECK: teq.w {{.*}}, r1, lsr #6
%tmp = lshr i32 %b, 6
%tmp1 = xor i32 %a, %tmp
%tmp2 = icmp eq i32 %tmp1, 0
@@ -39,7 +39,7 @@ define i1 @f7(i32 %a, i32 %b) {
define i1 @f8(i32 %a, i32 %b) {
; CHECK: f8
-; CHECK: teq.w r0, r1, asr #7
+; CHECK: teq.w {{.*}}, r1, asr #7
%tmp = ashr i32 %b, 7
%tmp1 = xor i32 %a, %tmp
%tmp2 = icmp eq i32 %tmp1, 0
@@ -48,7 +48,7 @@ define i1 @f8(i32 %a, i32 %b) {
define i1 @f9(i32 %a, i32 %b) {
; CHECK: f9
-; CHECK: teq.w r0, r0, ror #8
+; CHECK: teq.w {{.*}}, {{.*}}, ror #8
%l8 = shl i32 %a, 24
%r8 = lshr i32 %a, 8
%tmp = or i32 %l8, %r8
diff --git a/test/CodeGen/Thumb2/thumb2-tst.ll b/test/CodeGen/Thumb2/thumb2-tst.ll
index 43e208cc59..67fe82ee52 100644
--- a/test/CodeGen/Thumb2/thumb2-tst.ll
+++ b/test/CodeGen/Thumb2/thumb2-tst.ll
@@ -1,7 +1,7 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 -join-physregs | FileCheck %s
+; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-; These tests implicitly depend on 'movs r0, #0' being rematerialized below the
-; tst as 'mov.w r0, #0'. So far, that requires physreg joining.
+; These tests would be improved by 'movs r0, #0' being rematerialized below the
+; tst as 'mov.w r0, #0'.
; 0x000000bb = 187
define i1 @f2(i32 %a) {
@@ -10,7 +10,7 @@ define i1 @f2(i32 %a) {
ret i1 %tmp1
}
; CHECK: f2:
-; CHECK: tst.w r0, #187
+; CHECK: tst.w {{.*}}, #187
; 0x00aa00aa = 11141290
define i1 @f3(i32 %a) {
@@ -19,7 +19,7 @@ define i1 @f3(i32 %a) {
ret i1 %tmp1
}
; CHECK: f3:
-; CHECK: tst.w r0, #11141290
+; CHECK: tst.w {{.*}}, #11141290
; 0xcc00cc00 = 3422604288
define i1 @f6(i32 %a) {
@@ -28,7 +28,7 @@ define i1 @f6(i32 %a) {
ret i1 %tmp1
}
; CHECK: f6:
-; CHECK: tst.w r0, #-872363008
+; CHECK: tst.w {{.*}}, #-872363008
; 0xdddddddd = 3722304989
define i1 @f7(i32 %a) {
@@ -37,7 +37,7 @@ define i1 @f7(i32 %a) {
ret i1 %tmp1
}
; CHECK: f7:
-; CHECK: tst.w r0, #-572662307
+; CHECK: tst.w {{.*}}, #-572662307
; 0x00110000 = 1114112
define i1 @f10(i32 %a) {
@@ -46,4 +46,4 @@ define i1 @f10(i32 %a) {
ret i1 %tmp1
}
; CHECK: f10:
-; CHECK: tst.w r0, #1114112
+; CHECK: tst.w {{.*}}, #1114112
diff --git a/test/CodeGen/Thumb2/thumb2-tst2.ll b/test/CodeGen/Thumb2/thumb2-tst2.ll
index bfe016fc8d..e3fe792005 100644
--- a/test/CodeGen/Thumb2/thumb2-tst2.ll
+++ b/test/CodeGen/Thumb2/thumb2-tst2.ll
@@ -1,11 +1,11 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 -join-physregs | FileCheck %s
+; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-; These tests implicitly depend on 'movs r0, #0' being rematerialized below the
-; tst as 'mov.w r0, #0'. So far, that requires physreg joining.
+; These tests would be improved by 'movs r0, #0' being rematerialized below the
+; tst as 'mov.w r0, #0'.
define i1 @f2(i32 %a, i32 %b) {
; CHECK: f2:
-; CHECK: tst r0, r1
+; CHECK: tst {{.*}}, r1
%tmp = and i32 %a, %b
%tmp1 = icmp eq i32 %tmp, 0
ret i1 %tmp1
@@ -13,7 +13,7 @@ define i1 @f2(i32 %a, i32 %b) {
define i1 @f4(i32 %a, i32 %b) {
; CHECK: f4:
-; CHECK: tst r0, r1
+; CHECK: tst {{.*}}, r1
%tmp = and i32 %a, %b
%tmp1 = icmp eq i32 0, %tmp
ret i1 %tmp1
@@ -21,7 +21,7 @@ define i1 @f4(i32 %a, i32 %b) {
define i1 @f6(i32 %a, i32 %b) {
; CHECK: f6:
-; CHECK: tst.w r0, r1, lsl #5
+; CHECK: tst.w {{.*}}, r1, lsl #5
%tmp = shl i32 %b, 5
%tmp1 = and i32 %a, %tmp
%tmp2 = icmp eq i32 %tmp1, 0
@@ -30,7 +30,7 @@ define i1 @f6(i32 %a, i32 %b) {
define i1 @f7(i32 %a, i32 %b) {
; CHECK: f7:
-; CHECK: tst.w r0, r1, lsr #6
+; CHECK: tst.w {{.*}}, r1, lsr #6
%tmp = lshr i32 %b, 6
%tmp1 = and i32 %a, %tmp
%tmp2 = icmp eq i32 %tmp1, 0
@@ -39,7 +39,7 @@ define i1 @f7(i32 %a, i32 %b) {
define i1 @f8(i32 %a, i32 %b) {
; CHECK: f8:
-; CHECK: tst.w r0, r1, asr #7
+; CHECK: tst.w {{.*}}, r1, asr #7
%tmp = ashr i32 %b, 7
%tmp1 = and i32 %a, %tmp
%tmp2 = icmp eq i32 %tmp1, 0
@@ -48,7 +48,7 @@ define i1 @f8(i32 %a, i32 %b) {
define i1 @f9(i32 %a, i32 %b) {
; CHECK: f9:
-; CHECK: tst.w r0, r0, ror #8
+; CHECK: tst.w {{.*}}, {{.*}}, ror #8
%l8 = shl i32 %a, 24
%r8 = lshr i32 %a, 8
%tmp = or i32 %l8, %r8
diff --git a/test/CodeGen/X86/add.ll b/test/CodeGen/X86/add.ll
index 8e871f4aeb..03d2e472cb 100644
--- a/test/CodeGen/X86/add.ll
+++ b/test/CodeGen/X86/add.ll
@@ -1,8 +1,6 @@
; RUN: llc < %s -mcpu=generic -march=x86 | FileCheck %s -check-prefix=X32
-; RUN: llc < %s -mcpu=generic -mtriple=x86_64-linux -join-physregs | FileCheck %s -check-prefix=X64
-; RUN: llc < %s -mcpu=generic -mtriple=x86_64-win32 -join-physregs | FileCheck %s -check-prefix=X64
-
-; Some of these tests depend on -join-physregs to commute instructions.
+; RUN: llc < %s -mcpu=generic -mtriple=x86_64-linux | FileCheck %s -check-prefix=X64
+; RUN: llc < %s -mcpu=generic -mtriple=x86_64-win32 | FileCheck %s -check-prefix=X64
; The immediate can be encoded in a smaller way if the
; instruction is a sub instead of an add.
@@ -101,9 +99,9 @@ define {i32, i1} @test7(i32 %v1, i32 %v2) nounwind {
}
; X64: test7:
-; X64: addl %e[[A1]], %eax
+; X64: addl %e[[A1]], %e
; X64-NEXT: setb %dl
-; X64-NEXT: ret
+; X64: ret
; PR5443
define {i64, i1} @test8(i64 %left, i64 %right) nounwind {
diff --git a/test/CodeGen/X86/andimm8.ll b/test/CodeGen/X86/andimm8.ll
index a3dc85ff5c..640237d0b5 100644
--- a/test/CodeGen/X86/andimm8.ll
+++ b/test/CodeGen/X86/andimm8.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=x86-64 -mtriple=x86_64-pc-linux-gnu -show-mc-encoding -join-physregs | FileCheck %s
+; RUN: llc < %s -march=x86-64 -mtriple=x86_64-pc-linux-gnu -show-mc-encoding | FileCheck %s
; PR8365
; CHECK: andl $-64, %edi # encoding: [0x83,0xe7,0xc0]
diff --git a/test/CodeGen/X86/avx-minmax.ll b/test/CodeGen/X86/avx-minmax.ll
index 7c58820109..737fa6e416 100644
--- a/test/CodeGen/X86/avx-minmax.ll
+++ b/test/CodeGen/X86/avx-minmax.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=x86-64 -mattr=+avx -asm-verbose=false -join-physregs -enable-unsafe-fp-math -enable-no-nans-fp-math -promote-elements | FileCheck -check-prefix=UNSAFE %s
+; RUN: llc < %s -march=x86-64 -mattr=+avx -asm-verbose=false -enable-unsafe-fp-math -enable-no-nans-fp-math -promote-elements | FileCheck -check-prefix=UNSAFE %s
; UNSAFE: maxpd:
; UNSAFE: vmaxpd {{.+}}, %xmm
diff --git a/test/CodeGen/X86/coalescer-commute2.ll b/test/CodeGen/X86/coalescer-commute2.ll
index 6e5c1cf630..730692093d 100644
--- a/test/CodeGen/X86/coalescer-commute2.ll
+++ b/test/CodeGen/X86/coalescer-commute2.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=x86_64-linux -join-physregs | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s
; CHECK-NOT: mov
; CHECK: paddw
; CHECK-NOT: mov
diff --git a/test/CodeGen/X86/dbg-value-range.ll b/test/CodeGen/X86/dbg-value-range.ll
index 28d873bfba..6b16865ba9 100644
--- a/test/CodeGen/X86/dbg-value-range.ll
+++ b/test/CodeGen/X86/dbg-value-range.ll
@@ -1,5 +1,4 @@
; RUN: llc -mtriple=x86_64-apple-darwin10 < %s | FileCheck %s
-; RUN: llc -mtriple=x86_64-apple-darwin10 -regalloc=basic -join-physregs < %s | FileCheck %s
%struct.a = type { i32 }
diff --git a/test/CodeGen/X86/phys_subreg_coalesce-3.ll b/test/CodeGen/X86/phys_subreg_coalesce-3.ll
index 4162015ea8..984d7e57e0 100644
--- a/test/CodeGen/X86/phys_subreg_coalesce-3.ll
+++ b/test/CodeGen/X86/phys_subreg_coalesce-3.ll
@@ -1,10 +1,14 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin -join-physregs | FileCheck %s
+; RUN: llc < %s -mtriple=i386-apple-darwin | FileCheck %s
+; XFAIL: *
; rdar://5571034
; This requires physreg joining, %vreg13 is live everywhere:
; 304L %CL<def> = COPY %vreg13:sub_8bit; GR32_ABCD:%vreg13
; 320L %vreg15<def> = COPY %vreg19; GR32:%vreg15 GR32_NOSP:%vreg19
; 336L %vreg15<def> = SAR32rCL %vreg15, %EFLAGS<imp-def,dead>, %CL<imp-use,kill>; GR32:%vreg15
+;
+; This test is XFAIL until the register allocator understands trivial physreg
+; interference. <rdar://9802098>
define void @foo(i32* nocapture %quadrant, i32* nocapture %ptr, i32 %bbSize, i32 %bbStart, i32 %shifts) nounwind ssp {
; CHECK: foo:
diff --git a/test/CodeGen/X86/pmul.ll b/test/CodeGen/X86/pmul.ll
index 024ce3c761..da4af81959 100644
--- a/test/CodeGen/X86/pmul.ll
+++ b/test/CodeGen/X86/pmul.ll
@@ -1,9 +1,7 @@
-; RUN: llc < %s -march=x86 -mattr=sse41 -mcpu=nehalem -stack-alignment=16 -join-physregs > %t
+; RUN: llc < %s -march=x86 -mattr=sse41 -mcpu=nehalem -stack-alignment=16 > %t
; RUN: grep pmul %t | count 12
; RUN: grep mov %t | count 11
-; The f() arguments in %xmm0 and %xmm1 cause an extra movdqa without -join-physregs.
-
define <4 x i32> @a(<4 x i32> %i) nounwind {
%A = mul <4 x i32> %i, < i32 117, i32 117, i32 117, i32 117 >
ret <4 x i32> %A
diff --git a/test/CodeGen/X86/sse-minmax.ll b/test/CodeGen/X86/sse-minmax.ll
index 903b54ea6e..ee7aa06426 100644
--- a/test/CodeGen/X86/sse-minmax.ll
+++ b/test/CodeGen/X86/sse-minmax.ll
@@ -1,6 +1,6 @@
-; RUN: llc < %s -march=x86-64 -mcpu=nehalem -asm-verbose=false -join-physregs -promote-elements | FileCheck %s
-; RUN: llc < %s -march=x86-64 -mcpu=nehalem -asm-verbose=false -join-physregs -enable-unsafe-fp-math -enable-no-nans-fp-math -promote-elements | FileCheck -check-prefix=UNSAFE %s
-; RUN: llc < %s -march=x86-64 -mcpu=nehalem -asm-verbose=false -join-physregs -enable-no-nans-fp-math -promote-elements | FileCheck -check-prefix=FINITE %s
+; RUN: llc < %s -march=x86-64 -mcpu=nehalem -asm-verbose=false -promote-elements | FileCheck %s
+; RUN: llc < %s -march=x86-64 -mcpu=nehalem -asm-verbose=false -enable-unsafe-fp-math -enable-no-nans-fp-math -promote-elements | FileCheck -check-prefix=UNSAFE %s
+; RUN: llc < %s -march=x86-64 -mcpu=nehalem -asm-verbose=false -enable-no-nans-fp-math -promote-elements | FileCheck -check-prefix=FINITE %s
; Some of these patterns can be matched as SSE min or max. Some of
; then can be matched provided that the operands are swapped.
@@ -8,13 +8,10 @@
; and a conditional branch.
; The naming convention is {,x_,y_}{o,u}{gt,lt,ge,le}{,_inverse}
-; x_ : use 0.0 instead of %y
-; y_ : use -0.0 instead of %y
+; _x: use 0.0 instead of %y
+; _y: use -0.0 instead of %y
; _inverse : swap the arms of the select.
-; Some of these tests depend on -join-physregs commuting instructions to
-; eliminate copies.
-
; CHECK: ogt:
; CHECK-NEXT: maxsd %xmm1, %xmm0
; CHECK-NEXT: ret
@@ -139,147 +136,147 @@ define double @ole_inverse(double %x, double %y) nounwind {
ret double %d
}
-; CHECK: x_ogt:
+; CHECK: ogt_x:
; CHECK-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; CHECK-NEXT: maxsd %xmm1, %xmm0
; CHECK-NEXT: ret
-; UNSAFE: x_ogt:
+; UNSAFE: ogt_x:
; UNSAFE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; UNSAFE-NEXT: maxsd %xmm1, %xmm0
; UNSAFE-NEXT: ret
-; FINITE: x_ogt:
+; FINITE: ogt_x:
; FINITE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; FINITE-NEXT: maxsd %xmm1, %xmm0
; FINITE-NEXT: ret
-define double @x_ogt(double %x) nounwind {
+define double @ogt_x(double %x) nounwind {
%c = fcmp ogt double %x, 0.000000e+00
%d = select i1 %c, double %x, double 0.000000e+00
ret double %d
}
-; CHECK: x_olt:
+; CHECK: olt_x:
; CHECK-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; CHECK-NEXT: minsd %xmm1, %xmm0
; CHECK-NEXT: ret
-; UNSAFE: x_olt:
+; UNSAFE: olt_x:
; UNSAFE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; UNSAFE-NEXT: minsd %xmm1, %xmm0
; UNSAFE-NEXT: ret
-; FINITE: x_olt:
+; FINITE: olt_x:
; FINITE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; FINITE-NEXT: minsd %xmm1, %xmm0
; FINITE-NEXT: ret
-define double @x_olt(double %x) nounwind {
+define double @olt_x(double %x) nounwind {
%c = fcmp olt double %x, 0.000000e+00
%d = select i1 %c, double %x, double 0.000000e+00
ret double %d
}
-; CHECK: x_ogt_inverse:
+; CHECK: ogt_inverse_x:
; CHECK-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; CHECK-NEXT: minsd %xmm0, %xmm1
; CHECK-NEXT: movap{{[sd]}} %xmm1, %xmm0
; CHECK-NEXT: ret
-; UNSAFE: x_ogt_inverse:
+; UNSAFE: ogt_inverse_x:
; UNSAFE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; UNSAFE-NEXT: minsd %xmm0, %xmm1
; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0
; UNSAFE-NEXT: ret
-; FINITE: x_ogt_inverse:
+; FINITE: ogt_inverse_x:
; FINITE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; FINITE-NEXT: minsd %xmm0, %xmm1
; FINITE-NEXT: movap{{[sd]}} %xmm1, %xmm0
; FINITE-NEXT: ret
-define double @x_ogt_inverse(double %x) nounwind {
+define double @ogt_inverse_x(double %x) nounwind {
%c = fcmp ogt double %x, 0.000000e+00
%d = select i1 %c, double 0.000000e+00, double %x
ret double %d
}
-; CHECK: x_olt_inverse:
+; CHECK: olt_inverse_x:
; CHECK-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; CHECK-NEXT: maxsd %xmm0, %xmm1
; CHECK-NEXT: movap{{[sd]}} %xmm1, %xmm0
; CHECK-NEXT: ret
-; UNSAFE: x_olt_inverse:
+; UNSAFE: olt_inverse_x:
; UNSAFE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; UNSAFE-NEXT: maxsd %xmm0, %xmm1
; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0
; UNSAFE-NEXT: ret
-; FINITE: x_olt_inverse:
+; FINITE: olt_inverse_x:
; FINITE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; FINITE-NEXT: maxsd %xmm0, %xmm1
; FINITE-NEXT: movap{{[sd]}} %xmm1, %xmm0
; FINITE-NEXT: ret
-define double @x_olt_inverse(double %x) nounwind {
+define double @olt_inverse_x(double %x) nounwind {
%c = fcmp olt double %x, 0.000000e+00
%d = select i1 %c, double 0.000000e+00, double %x
ret double %d
}
-; CHECK: x_oge:
+; CHECK: oge_x:
; CHECK: ucomisd %xmm1, %xmm0
-; UNSAFE: x_oge:
+; UNSAFE: oge_x:
; UNSAFE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; UNSAFE-NEXT: maxsd %xmm1, %xmm0
; UNSAFE-NEXT: ret
-; FINITE: x_oge:
+; FINITE: oge_x:
; FINITE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; FINITE-NEXT: maxsd %xmm1, %xmm0
; FINITE-NEXT: ret
-define double @x_oge(double %x) nounwind {
+define double @oge_x(double %x) nounwind {
%c = fcmp oge double %x, 0.000000e+00
%d = select i1 %c, double %x, double 0.000000e+00
ret double %d
}
-; CHECK: x_ole:
+; CHECK: ole_x:
; CHECK: ucomisd %xmm0, %xmm1
-; UNSAFE: x_ole:
+; UNSAFE: ole_x:
; UNSAFE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; UNSAFE-NEXT: minsd %xmm1, %xmm0
; UNSAFE-NEXT: ret
-; FINITE: x_ole:
+; FINITE: ole_x:
; FINITE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; FINITE-NEXT: minsd %xmm1, %xmm0
; FINITE-NEXT: ret
-define double @x_ole(double %x) nounwind {
+define double @ole_x(double %x) nounwind {
%c = fcmp ole double %x, 0.000000e+00
%d = select i1 %c, double %x, double 0.000000e+00
ret double %d
}
-; CHECK: x_oge_inverse:
-; CHECK: ucomisd %xmm1, %xmm0
-; UNSAFE: x_oge_inverse:
+; CHECK: oge_inverse_x:
+; CHECK: ucomisd %xmm
+; UNSAFE: oge_inverse_x:
; UNSAFE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; UNSAFE-NEXT: minsd %xmm0, %xmm1
; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0
; UNSAFE-NEXT: ret
-; FINITE: x_oge_inverse:
+; FINITE: oge_inverse_x:
; FINITE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; FINITE-NEXT: minsd %xmm0, %xmm1
; FINITE-NEXT: movap{{[sd]}} %xmm1, %xmm0
; FINITE-NEXT: ret
-define double @x_oge_inverse(double %x) nounwind {
+define double @oge_inverse_x(double %x) nounwind {
%c = fcmp oge double %x, 0.000000e+00
%d = select i1 %c, double 0.000000e+00, double %x
ret double %d
}
-; CHECK: x_ole_inverse:
-; CHECK: ucomisd %xmm0, %xmm1
-; UNSAFE: x_ole_inverse:
+; CHECK: ole_inverse_x:
+; CHECK: ucomisd %xmm
+; UNSAFE: ole_inverse_x:
; UNSAFE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; UNSAFE-NEXT: maxsd %xmm0, %xmm1
; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0
; UNSAFE-NEXT: ret
-; FINITE: x_ole_inverse:
+; FINITE: ole_inverse_x:
; FINITE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; FINITE-NEXT: maxsd %xmm0, %xmm1
; FINITE-NEXT: movap{{[sd]}} %xmm1, %xmm0
; FINITE-NEXT: ret
-define double @x_ole_inverse(double %x) nounwind {
+define double @ole_inverse_x(double %x) nounwind {
%c = fcmp ole double %x, 0.000000e+00
%d = select i1 %c, double 0.000000e+00, double %x
ret double %d
@@ -411,419 +408,419 @@ define double @ule_inverse(double %x, double %y) nounwind {
ret double %d
}
-; CHECK: x_ugt:
+; CHECK: ugt_x:
; CHECK: ucomisd %xmm0, %xmm1
-; UNSAFE: x_ugt:
+; UNSAFE: ugt_x:
; UNSAFE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; UNSAFE-NEXT: maxsd %xmm1, %xmm0
; UNSAFE-NEXT: ret
-; FINITE: x_ugt:
+; FINITE: ugt_x:
; FINITE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; FINITE-NEXT: maxsd %xmm1, %xmm0
; FINITE-NEXT: ret
-define double @x_ugt(double %x) nounwind {
+define double @ugt_x(double %x) nounwind {
%c = fcmp ugt double %x, 0.000000e+00
%d = select i1 %c, double %x, double 0.000000e+00
ret double %d
}
-; CHECK: x_ult:
+; CHECK: ult_x:
; CHECK: ucomisd %xmm1, %xmm0
-; UNSAFE: x_ult:
+; UNSAFE: ult_x:
; UNSAFE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; UNSAFE-NEXT: minsd %xmm1, %xmm0
; UNSAFE-NEXT: ret
-; FINITE: x_ult:
+; FINITE: ult_x:
; FINITE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; FINITE-NEXT: minsd %xmm1, %xmm0
; FINITE-NEXT: ret
-define double @x_ult(double %x) nounwind {
+define double @ult_x(double %x) nounwind {
%c = fcmp ult double %x, 0.000000e+00
%d = select i1 %c, double %x, double 0.000000e+00
ret double %d
}
-; CHECK: x_ugt_inverse:
-; CHECK: ucomisd %xmm0, %xmm1
-; UNSAFE: x_ugt_inverse:
+; CHECK: ugt_inverse_x:
+; CHECK: ucomisd %xmm
+; UNSAFE: ugt_inverse_x:
; UNSAFE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; UNSAFE-NEXT: minsd %xmm0, %xmm1
; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0
; UNSAFE-NEXT: ret
-; FINITE: x_ugt_inverse:
+; FINITE: ugt_inverse_x:
; FINITE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; FINITE-NEXT: minsd %xmm0, %xmm1
; FINITE-NEXT: movap{{[sd]}} %xmm1, %xmm0
; FINITE-NEXT: ret
-define double @x_ugt_inverse(double %x) nounwind {
+define double @ugt_inverse_x(double %x) nounwind {
%c = fcmp ugt double %x, 0.000000e+00
%d = select i1 %c, double 0.000000e+00, double %x
ret double %d
}
-; CHECK: x_ult_inverse:
-; CHECK: ucomisd %xmm1, %xmm0
-; UNSAFE: x_ult_inverse:
+; CHECK: ult_inverse_x:
+; CHECK: ucomisd %xmm
+; UNSAFE: ult_inverse_x:
; UNSAFE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; UNSAFE-NEXT: maxsd %xmm0, %xmm1
; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0
; UNSAFE-NEXT: ret
-; FINITE: x_ult_inverse:
+; FINITE: ult_inverse_x:
; FINITE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; FINITE-NEXT: maxsd %xmm0, %xmm1
; FINITE-NEXT: movap{{[sd]}} %xmm1, %xmm0
; FINITE-NEXT: ret
-define double @x_ult_inverse(double %x) nounwind {
+define double @ult_inverse_x(double %x) nounwind {
%c = fcmp ult double %x, 0.000000e+00
%d = select i1 %c, double 0.000000e+00, double %x
ret double %d
}
-; CHECK: x_uge:
+; CHECK: uge_x:
; CHECK-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; CHECK-NEXT: maxsd %xmm0, %xmm1
; CHECK-NEXT: movap{{[sd]}} %xmm1, %xmm0
; CHECK-NEXT: ret
-; UNSAFE: x_uge:
+; UNSAFE: uge_x:
; UNSAFE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; UNSAFE-NEXT: maxsd %xmm1, %xmm0
; UNSAFE-NEXT: ret
-; FINITE: x_uge:
+; FINITE: uge_x:
; FINITE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; FINITE-NEXT: maxsd %xmm1, %xmm0
; FINITE-NEXT: ret
-define double @x_uge(double %x) nounwind {
+define double @uge_x(double %x) nounwind {
%c = fcmp uge double %x, 0.000000e+00
%d = select i1 %c, double %x, double 0.000000e+00
ret double %d
}
-; CHECK: x_ule:
+; CHECK: ule_x:
; CHECK-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; CHECK-NEXT: minsd %xmm0, %xmm1
; CHECK-NEXT: movap{{[sd]}} %xmm1, %xmm0
; CHECK-NEXT: ret
-; UNSAFE: x_ule:
+; UNSAFE: ule_x:
; UNSAFE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; UNSAFE-NEXT: minsd %xmm1, %xmm0
; UNSAFE-NEXT: ret
-; FINITE: x_ule:
+; FINITE: ule_x:
; FINITE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; FINITE-NEXT: minsd %xmm1, %xmm0
; FINITE-NEXT: ret
-define double @x_ule(double %x) nounwind {
+define double @ule_x(double %x) nounwind {
%c = fcmp ule double %x, 0.000000e+00
%d = select i1 %c, double %x, double 0.000000e+00
ret double %d
}
-; CHECK: x_uge_inverse:
+; CHECK: uge_inverse_x:
; CHECK-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; CHECK-NEXT: minsd %xmm1, %xmm0
; CHECK-NEXT: ret
-; UNSAFE: x_uge_inverse:
+; UNSAFE: uge_inverse_x:
; UNSAFE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; UNSAFE-NEXT: minsd %xmm0, %xmm1
; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0
; UNSAFE-NEXT: ret
-; FINITE: x_uge_inverse:
+; FINITE: uge_inverse_x:
; FINITE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; FINITE-NEXT: minsd %xmm0, %xmm1
; FINITE-NEXT: movap{{[sd]}} %xmm1, %xmm0
; FINITE-NEXT: ret
-define double @x_uge_inverse(double %x) nounwind {
+define double @uge_inverse_x(double %x) nounwind {
%c = fcmp uge double %x, 0.000000e+00
%d = select i1 %c, double 0.000000e+00, double %x
ret double %d
}
-; CHECK: x_ule_inverse:
+; CHECK: ule_inverse_x:
; CHECK-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; CHECK-NEXT: maxsd %xmm1, %xmm0
; CHECK-NEXT: ret
-; UNSAFE: x_ule_inverse:
+; UNSAFE: ule_inverse_x:
; UNSAFE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; UNSAFE-NEXT: maxsd %xmm0, %xmm1
; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0
; UNSAFE-NEXT: ret
-; FINITE: x_ule_inverse:
+; FINITE: ule_inverse_x:
; FINITE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; FINITE-NEXT: maxsd %xmm0, %xmm1
; FINITE-NEXT: movap{{[sd]}} %xmm1, %xmm0
; FINITE-NEXT: ret
-define double @x_ule_inverse(double %x) nounwind {
+define double @ule_inverse_x(double %x) nounwind {
%c = fcmp ule double %x, 0.000000e+00
%d = select i1 %c, double 0.000000e+00, double %x
ret double %d
}
-; CHECK: y_ogt:
+; CHECK: ogt_y:
; CHECK-NEXT: maxsd {{[^,]*}}, %xmm0
; CHECK-NEXT: ret
-; UNSAFE: y_ogt:
+; UNSAFE: ogt_y:
; UNSAFE-NEXT: maxsd {{[^,]*}}, %xmm0
; UNSAFE-NEXT: ret
-; FINITE: y_ogt:
+; FINITE: ogt_y:
; FINITE-NEXT: maxsd {{[^,]*}}, %xmm0
; FINITE-NEXT: ret
-define double @y_ogt(double %x) nounwind {
+define double @ogt_y(double %x) nounwind {
%c = fcmp ogt double %x, -0.000000e+00
%d = select i1 %c, double %x, double -0.000000e+00
ret double %d
}
-; CHECK: y_olt:
+; CHECK: olt_y:
; CHECK-NEXT: minsd {{[^,]*}}, %xmm0
; CHECK-NEXT: ret
-; UNSAFE: y_olt:
+; UNSAFE: olt_y:
; UNSAFE-NEXT: minsd {{[^,]*}}, %xmm0
; UNSAFE-NEXT: ret
-; FINITE: y_olt:
+; FINITE: olt_y:
; FINITE-NEXT: minsd {{[^,]*}}, %xmm0
; FINITE-NEXT: ret
-define double @y_olt(double %x) nounwind {
+define double @olt_y(double %x) nounwind {
%c = fcmp olt double %x, -0.000000e+00
%d = select i1 %c, double %x, double -0.000000e+00
ret double %d
}
-; CHECK: y_ogt_inverse:
+; CHECK: ogt_inverse_y:
; CHECK-NEXT: movsd {{[^,]*}}, %xmm1
; CHECK-NEXT: minsd %xmm0, %xmm1
; CHECK-NEXT: movap{{[sd]}} %xmm1, %xmm0
; CHECK-NEXT: ret
-; UNSAFE: y_ogt_inverse:
+; UNSAFE: ogt_inverse_y:
; UNSAFE-NEXT: movsd {{[^,]*}}, %xmm1
; UNSAFE-NEXT: minsd %xmm0, %xmm1
; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0
; UNSAFE-NEXT: ret
-; FINITE: y_ogt_inverse:
+; FINITE: ogt_inverse_y:
; FINITE-NEXT: movsd {{[^,]*}}, %xmm1
; FINITE-NEXT: minsd %xmm0, %xmm1
; FINITE-NEXT: movap{{[sd]}} %xmm1, %xmm0
; FINITE-NEXT: ret
-define double @y_ogt_inverse(double %x) nounwind {
+define double @ogt_inverse_y(double %x) nounwind {
%c = fcmp ogt double %x, -0.000000e+00
%d = select i1 %c, double -0.000000e+00, double %x
ret double %d
}
-; CHECK: y_olt_inverse:
+; CHECK: olt_inverse_y:
; CHECK-NEXT: movsd {{[^,]*}}, %xmm1
; CHECK-NEXT: maxsd %xmm0, %xmm1
; CHECK-NEXT: movap{{[sd]}} %xmm1, %xmm0
; CHECK-NEXT: ret
-; UNSAFE: y_olt_inverse:
+; UNSAFE: olt_inverse_y:
; UNSAFE-NEXT: movsd {{[^,]*}}, %xmm1
; UNSAFE-NEXT: maxsd %xmm0, %xmm1
; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0
; UNSAFE-NEXT: ret
-; FINITE: y_olt_inverse:
+; FINITE: olt_inverse_y:
; FINITE-NEXT: movsd {{[^,]*}}, %xmm1
; FINITE-NEXT: maxsd %xmm0, %xmm1
; FINITE-NEXT: movap{{[sd]}} %xmm1, %xmm0
; FINITE-NEXT: ret
-define double @y_olt_inverse(double %x) nounwind {
+define double @olt_inverse_y(double %x) nounwind {
%c = fcmp olt double %x, -0.000000e+00
%d = select i1 %c, double -0.000000e+00, double %x
ret double %d
}
-; CHECK: y_oge:
+; CHECK: oge_y:
; CHECK: ucomisd %xmm1, %xmm0
-; UNSAFE: y_oge:
+; UNSAFE: oge_y:
; UNSAFE-NEXT: maxsd {{[^,]*}}, %xmm0
; UNSAFE-NEXT: ret
-; FINITE: y_oge:
+; FINITE: oge_y:
; FINITE-NEXT: maxsd {{[^,]*}}, %xmm0
; FINITE-NEXT: ret
-define double @y_oge(double %x) nounwind {
+define double @oge_y(double %x) nounwind {
%c = fcmp oge double %x, -0.000000e+00
%d = select i1 %c, double %x, double -0.000000e+00
ret double %d
}
-; CHECK: y_ole:
+; CHECK: ole_y:
; CHECK: ucomisd %xmm0, %xmm1
-; UNSAFE: y_ole:
+; UNSAFE: ole_y:
; UNSAFE-NEXT: minsd {{[^,]*}}, %xmm0
; UNSAFE-NEXT: ret
-; FINITE: y_ole:
+; FINITE: ole_y:
; FINITE-NEXT: minsd {{[^,]*}}, %xmm0
; FINITE-NEXT: ret
-define double @y_ole(double %x) nounwind {
+define double @ole_y(double %x) nounwind {
%c = fcmp ole double %x, -0.000000e+00
%d = select i1 %c, double %x, double -0.000000e+00
ret double %d
}
-; CHECK: y_oge_inverse:
-; CHECK: ucomisd %xmm1, %xmm0
-; UNSAFE: y_oge_inverse:
+; CHECK: oge_inverse_y:
+; CHECK: ucomisd %xmm
+; UNSAFE: oge_inverse_y:
; UNSAFE-NEXT: movsd {{[^,]*}}, %xmm1
; UNSAFE-NEXT: minsd %xmm0, %xmm1
; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0
; UNSAFE-NEXT: ret
-; FINITE: y_oge_inverse:
+; FINITE: oge_inverse_y:
; FINITE-NEXT: movsd {{[^,]*}}, %xmm1
; FINITE-NEXT: minsd %xmm0, %xmm1
; FINITE-NEXT: movap{{[sd]}} %xmm1, %xmm0
; FINITE-NEXT: ret
-define double @y_oge_inverse(double %x) nounwind {
+define double @oge_inverse_y(double %x) nounwind {
%c = fcmp oge double %x, -0.000000e+00
%d = select i1 %c, double -0.000000e+00, double %x
ret double %d
}
-; CHECK: y_ole_inverse:
-; CHECK: ucomisd %xmm0, %xmm1
-; UNSAFE: y_ole_inverse:
+; CHECK: ole_inverse_y:
+; CHECK: ucomisd %xmm
+; UNSAFE: ole_inverse_y:
; UNSAFE-NEXT: movsd {{[^,]*}}, %xmm1
; UNSAFE-NEXT: maxsd %xmm0, %xmm1
; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0
; UNSAFE-NEXT: ret
-; FINITE: y_ole_inverse:
+; FINITE: ole_inverse_y:
; FINITE-NEXT: movsd {{[^,]*}}, %xmm1
; FINITE-NEXT: maxsd %xmm0, %xmm1
; FINITE-NEXT: movap{{[sd]}} %xmm1, %xmm0
; FINITE-NEXT: ret
-define double @y_ole_inverse(double %x) nounwind {
+define double @ole_inverse_y(double %x) nounwind {
%c = fcmp ole double %x, -0.000000e+00
%d = select i1 %c, double -0.000000e+00, double %x
ret double %d
}
-; CHECK: y_ugt:
+; CHECK: ugt_y:
; CHECK: ucomisd %xmm0, %xmm1
-; UNSAFE: y_ugt:
+; UNSAFE: ugt_y:
; UNSAFE-NEXT: maxsd {{[^,]*}}, %xmm0
; UNSAFE-NEXT: ret
-; FINITE: y_ugt:
+; FINITE: ugt_y:
; FINITE-NEXT: maxsd {{[^,]*}}, %xmm0
; FINITE-NEXT: ret
-define double @y_ugt(double %x) nounwind {
+define double @ugt_y(double %x) nounwind {
%c = fcmp ugt double %x, -0.000000e+00
%d = select i1 %c, double %x, double -0.000000e+00
ret double %d
}
-; CHECK: y_ult:
+; CHECK: ult_y:
; CHECK: ucomisd %xmm1, %xmm0
-; UNSAFE: y_ult:
+; UNSAFE: ult_y:
; UNSAFE-NEXT: minsd {{[^,]*}}, %xmm0
; UNSAFE-NEXT: ret
-; FINITE: y_ult:
+; FINITE: ult_y:
; FINITE-NEXT: minsd {{[^,]*}}, %xmm0
; FINITE-NEXT: ret
-define double @y_ult(double %x) nounwind {
+define double @ult_y(double %x) nounwind {
%c = fcmp ult double %x, -0.000000e+00
%d = select i1 %c, double %x, double -0.000000e+00
ret double %d
}
-; CHECK: y_ugt_inverse:
-; CHECK: ucomisd %xmm0, %xmm1
-; UNSAFE: y_ugt_inverse:
+; CHECK: ugt_inverse_y:
+; CHECK: ucomisd %xmm
+; UNSAFE: ugt_inverse_y:
; UNSAFE-NEXT: movsd {{[^,]*}}, %xmm1
; UNSAFE-NEXT: minsd %xmm0, %xmm1
; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0
; UNSAFE-NEXT: ret
-; FINITE: y_ugt_inverse:
+; FINITE: ugt_inverse_y:
; FINITE-NEXT: movsd {{[^,]*}}, %xmm1
; FINITE-NEXT: minsd %xmm0, %xmm1
; FINITE-NEXT: movap{{[sd]}} %xmm1, %xmm0
; FINITE-NEXT: ret
-define double @y_ugt_inverse(double %x) nounwind {
+define double @ugt_inverse_y(double %x) nounwind {
%c = fcmp ugt double %x, -0.000000e+00
%d = select i1 %c, double -0.000000e+00, double %x
ret double %d
}
-; CHECK: y_ult_inverse:
-; CHECK: ucomisd %xmm1, %xmm0
-; UNSAFE: y_ult_inverse:
+; CHECK: ult_inverse_y:
+; CHECK: ucomisd %xmm
+; UNSAFE: ult_inverse_y:
; UNSAFE-NEXT: movsd {{[^,]*}}, %xmm1
; UNSAFE-NEXT: maxsd %xmm0, %xmm1
; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0
; UNSAFE-NEXT: ret
-; FINITE: y_ult_inverse:
+; FINITE: ult_inverse_y:
; FINITE-NEXT: movsd {{[^,]*}}, %xmm1
; FINITE-NEXT: maxsd %xmm0, %xmm1
; FINITE-NEXT: movap{{[sd]}} %xmm1, %xmm0
; FINITE-NEXT: ret
-define double @y_ult_inverse(double %x) nounwind {
+define double @ult_inverse_y(double %x) nounwind {
%c = fcmp ult double %x, -0.000000e+00
%d = select i1 %c, double -0.000000e+00, double %x
ret double %d
}
-; CHECK: y_uge:
+; CHECK: uge_y:
; CHECK-NEXT: movsd {{[^,]*}}, %xmm1
; CHECK-NEXT: maxsd %xmm0, %xmm1
; CHECK-NEXT: movap{{[sd]}} %xmm1, %xmm0
; CHECK-NEXT: ret
-; UNSAFE: y_uge:
+; UNSAFE: uge_y:
; UNSAFE-NEXT: maxsd {{[^,]*}}, %xmm0
; UNSAFE-NEXT: ret
-; FINITE: y_uge:
+; FINITE: uge_y:
; FINITE-NEXT: maxsd {{[^,]*}}, %xmm0
; FINITE-NEXT: ret
-define double @y_uge(double %x) nounwind {
+define double @uge_y(double %x) nounwind {
%c = fcmp uge double %x, -0.000000e+00
%d = select i1 %c, double %x, double -0.000000e+00
ret double %d
}
-; CHECK: y_ule:
+; CHECK: ule_y:
; CHECK-NEXT: movsd {{[^,]*}}, %xmm1
; CHECK-NEXT: minsd %xmm0, %xmm1
; CHECK-NEXT: movap{{[sd]}} %xmm1, %xmm0
; CHECK-NEXT: ret
-; UNSAFE: y_ule:
+; UNSAFE: ule_y:
; UNSAFE-NEXT: minsd {{[^,]*}}, %xmm0
; UNSAFE-NEXT: ret
-; FINITE: y_ule:
+; FINITE: ule_y:
; FINITE-NEXT: minsd {{[^,]*}}, %xmm0
; FINITE-NEXT: ret
-define double @y_ule(double %x) nounwind {
+define double @ule_y(double %x) nounwind {
%c = fcmp ule double %x, -0.000000e+00
%d = select i1 %c, double %x, double -0.000000e+00
ret double %d
}
-; CHECK: y_uge_inverse:
+; CHECK: uge_inverse_y:
; CHECK-NEXT: minsd {{[^,]*}}, %xmm0
; CHECK-NEXT: ret
-; UNSAFE: y_uge_inverse:
+; UNSAFE: uge_inverse_y:
; UNSAFE-NEXT: movsd {{[^,]*}}, %xmm1
; UNSAFE-NEXT: minsd %xmm0, %xmm1
; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0
; UNSAFE-NEXT: ret
-; FINITE: y_uge_inverse:
+; FINITE: uge_inverse_y:
; FINITE-NEXT: movsd {{[^,]*}}, %xmm1
; FINITE-NEXT: minsd %xmm0, %xmm1
; FINITE-NEXT: movap{{[sd]}} %xmm1, %xmm0
; FINITE-NEXT: ret
-define double @y_uge_inverse(double %x) nounwind {
+define double @uge_inverse_y(double %x) nounwind {
%c = fcmp uge double %x, -0.000000e+00
%d = select i1 %c, double -0.000000e+00, double %x
ret double %d
}
-; CHECK: y_ule_inverse:
+; CHECK: ule_inverse_y:
; CHECK-NEXT: maxsd {{[^,]*}}, %xmm0
; CHECK-NEXT: ret
-; UNSAFE: y_ule_inverse:
+; UNSAFE: ule_inverse_y:
; UNSAFE-NEXT: movsd {{[^,]*}}, %xmm1
; UNSAFE-NEXT: maxsd %xmm0, %xmm1
; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0
; UNSAFE-NEXT: ret
-; FINITE: y_ule_inverse:
+; FINITE: ule_inverse_y:
; FINITE-NEXT: movsd {{[^,]*}}, %xmm1
; FINITE-NEXT: maxsd %xmm0, %xmm1
; FINITE-NEXT: movap{{[sd]}} %xmm1, %xmm0
; FINITE-NEXT: ret
-define double @y_ule_inverse(double %x) nounwind {
+define double @ule_inverse_y(double %x) nounwind {
%c = fcmp ule double %x, -0.000000e+00
%d = select i1 %c, double -0.000000e+00, double %x
ret double %d
diff --git a/test/CodeGen/X86/win64_alloca_dynalloca.ll b/test/CodeGen/X86/win64_alloca_dynalloca.ll
index a961c6af18..cc11e4c28e 100644
--- a/test/CodeGen/X86/win64_alloca_dynalloca.ll
+++ b/test/CodeGen/X86/win64_alloca_dynalloca.ll
@@ -1,12 +1,9 @@
-; RUN: llc < %s -join-physregs -mcpu=generic -mtriple=x86_64-mingw32 | FileCheck %s -check-prefix=M64
-; RUN: llc < %s -join-physregs -mcpu=generic -mtriple=x86_64-win32 | FileCheck %s -check-prefix=W64
-; RUN: llc < %s -join-physregs -mcpu=generic -mtriple=x86_64-win32-macho | FileCheck %s -check-prefix=EFI
+; RUN: llc < %s -mcpu=generic -mtriple=x86_64-mingw32 | FileCheck %s -check-prefix=M64
+; RUN: llc < %s -mcpu=generic -mtriple=x86_64-win32 | FileCheck %s -check-prefix=W64
+; RUN: llc < %s -mcpu=generic -mtriple=x86_64-win32-macho | FileCheck %s -check-prefix=EFI
; PR8777
; PR8778
-; Passing the same value in two registers creates a false interference that
-; only -join-physregs resolves. It could also be handled by a parallel copy.
-
define i64 @foo(i64 %n, i64 %x) nounwind {
entry:
@@ -31,19 +28,19 @@ entry:
%buf1 = alloca i8, i64 %n, align 1
-; M64: leaq 15(%rcx), %rax
+; M64: leaq 15(%{{.*}}), %rax
; M64: andq $-16, %rax
; M64: callq ___chkstk
; M64-NOT: %rsp
; M64: movq %rsp, %rax
-; W64: leaq 15(%rcx), %rax
+; W64: leaq 15(%{{.*}}), %rax
; W64: andq $-16, %rax
; W64: callq __chkstk
; W64: subq %rax, %rsp
; W64: movq %rsp, %rax
-; EFI: leaq 15(%rcx), [[R1:%r.*]]
+; EFI: leaq 15(%{{.*}}), [[R1:%r.*]]
; EFI: andq $-16, [[R1]]
; EFI: movq %rsp, [[R64:%r.*]]
; EFI: subq [[R1]], [[R64]]