summaryrefslogtreecommitdiff
path: root/test/CodeGen
diff options
context:
space:
mode:
authorAndrew Trick <atrick@apple.com>2011-01-21 05:51:33 +0000
committerAndrew Trick <atrick@apple.com>2011-01-21 05:51:33 +0000
commitc8bfd1d78ff9a307d1d4cb57cce4549b538e60f4 (patch)
tree13af786aec75effa04412a01b22f838fdeee111f /test/CodeGen
parentcd151d2f95eabae61b3cf8e675717d5674afbe85 (diff)
downloadllvm-c8bfd1d78ff9a307d1d4cb57cce4549b538e60f4.tar.gz
llvm-c8bfd1d78ff9a307d1d4cb57cce4549b538e60f4.tar.bz2
llvm-c8bfd1d78ff9a307d1d4cb57cce4549b538e60f4.tar.xz
Convert -enable-sched-cycles and -enable-sched-hazard to -disable
flags. They are still not enable in this revision. Added TargetInstrInfo::isZeroCost() to fix a fundamental problem with the scheduler's model of operand latency in the selection DAG. Generalized unit tests to work with sched-cycles. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@123969 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen')
-rw-r--r--test/CodeGen/ARM/fnegs.ll20
-rw-r--r--test/CodeGen/ARM/fnmscs.ll8
-rw-r--r--test/CodeGen/ARM/fpconsts.ll4
-rw-r--r--test/CodeGen/ARM/unaligned_load_store.ll2
4 files changed, 17 insertions, 17 deletions
diff --git a/test/CodeGen/ARM/fnegs.ll b/test/CodeGen/ARM/fnegs.ll
index c15005e6e8..418b59803d 100644
--- a/test/CodeGen/ARM/fnegs.ll
+++ b/test/CodeGen/ARM/fnegs.ll
@@ -13,19 +13,19 @@ entry:
ret float %retval
}
; VFP2: test1:
-; VFP2: vneg.f32 s1, s0
+; VFP2: vneg.f32 s{{.*}}, s{{.*}}
; NFP1: test1:
-; NFP1: vneg.f32 d1, d0
+; NFP1: vneg.f32 d{{.*}}, d{{.*}}
; NFP0: test1:
-; NFP0: vneg.f32 s1, s0
+; NFP0: vneg.f32 s{{.*}}, s{{.*}}
; CORTEXA8: test1:
-; CORTEXA8: vneg.f32 d1, d0
+; CORTEXA8: vneg.f32 d{{.*}}, d{{.*}}
; CORTEXA9: test1:
-; CORTEXA9: vneg.f32 s1, s0
+; CORTEXA9: vneg.f32 s{{.*}}, s{{.*}}
define float @test2(float* %a) {
entry:
@@ -37,17 +37,17 @@ entry:
ret float %retval
}
; VFP2: test2:
-; VFP2: vneg.f32 s1, s0
+; VFP2: vneg.f32 s{{.*}}, s{{.*}}
; NFP1: test2:
-; NFP1: vneg.f32 d1, d0
+; NFP1: vneg.f32 d{{.*}}, d{{.*}}
; NFP0: test2:
-; NFP0: vneg.f32 s1, s0
+; NFP0: vneg.f32 s{{.*}}, s{{.*}}
; CORTEXA8: test2:
-; CORTEXA8: vneg.f32 d1, d0
+; CORTEXA8: vneg.f32 d{{.*}}, d{{.*}}
; CORTEXA9: test2:
-; CORTEXA9: vneg.f32 s1, s0
+; CORTEXA9: vneg.f32 s{{.*}}, s{{.*}}
diff --git a/test/CodeGen/ARM/fnmscs.ll b/test/CodeGen/ARM/fnmscs.ll
index 5d832537c0..76c806761f 100644
--- a/test/CodeGen/ARM/fnmscs.ll
+++ b/test/CodeGen/ARM/fnmscs.ll
@@ -11,7 +11,7 @@ entry:
; NEON: vnmla.f32
; A8: t1:
-; A8: vnmul.f32 s0, s1, s0
+; A8: vnmul.f32 s0, s{{[01]}}, s{{[01]}}
; A8: vsub.f32 d0, d0, d1
%0 = fmul float %a, %b
%1 = fsub float -0.0, %0
@@ -28,7 +28,7 @@ entry:
; NEON: vnmla.f32
; A8: t2:
-; A8: vnmul.f32 s0, s1, s0
+; A8: vnmul.f32 s0, s{{[01]}}, s{{[01]}}
; A8: vsub.f32 d0, d0, d1
%0 = fmul float %a, %b
%1 = fmul float -1.0, %0
@@ -45,7 +45,7 @@ entry:
; NEON: vnmla.f64
; A8: t3:
-; A8: vnmul.f64 d16, d16, d17
+; A8: vnmul.f64 d16, d1{{[67]}}, d1{{[67]}}
; A8: vsub.f64 d16, d16, d17
%0 = fmul double %a, %b
%1 = fsub double -0.0, %0
@@ -62,7 +62,7 @@ entry:
; NEON: vnmla.f64
; A8: t4:
-; A8: vnmul.f64 d16, d16, d17
+; A8: vnmul.f64 d16, d1{{[67]}}, d1{{[67]}}
; A8: vsub.f64 d16, d16, d17
%0 = fmul double %a, %b
%1 = fmul double -1.0, %0
diff --git a/test/CodeGen/ARM/fpconsts.ll b/test/CodeGen/ARM/fpconsts.ll
index 9e7a8ae28b..638dde9d8a 100644
--- a/test/CodeGen/ARM/fpconsts.ll
+++ b/test/CodeGen/ARM/fpconsts.ll
@@ -3,7 +3,7 @@
define float @t1(float %x) nounwind readnone optsize {
entry:
; CHECK: t1:
-; CHECK: vmov.f32 s1, #4.000000e+00
+; CHECK: vmov.f32 s{{.*}}, #4.000000e+00
%0 = fadd float %x, 4.000000e+00
ret float %0
}
@@ -27,7 +27,7 @@ entry:
define float @t4(float %x) nounwind readnone optsize {
entry:
; CHECK: t4:
-; CHECK: vmov.f32 s1, #-2.400000e+01
+; CHECK: vmov.f32 s{{.*}}, #-2.400000e+01
%0 = fmul float %x, -2.400000e+01
ret float %0
}
diff --git a/test/CodeGen/ARM/unaligned_load_store.ll b/test/CodeGen/ARM/unaligned_load_store.ll
index 354895e099..b42e11f2c4 100644
--- a/test/CodeGen/ARM/unaligned_load_store.ll
+++ b/test/CodeGen/ARM/unaligned_load_store.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm | FileCheck %s -check-prefix=GENERIC
+; RUN: llc < %s -march=arm -pre-RA-sched=source | FileCheck %s -check-prefix=GENERIC
; RUN: llc < %s -mtriple=armv6-apple-darwin | FileCheck %s -check-prefix=DARWIN_V6
; RUN: llc < %s -mtriple=armv6-apple-darwin -arm-strict-align | FileCheck %s -check-prefix=GENERIC
; RUN: llc < %s -mtriple=armv6-linux | FileCheck %s -check-prefix=GENERIC