summaryrefslogtreecommitdiff
path: root/test/CodeGen/X86/lsr-reuse-trunc.ll
diff options
context:
space:
mode:
authorDan Gohman <gohman@apple.com>2010-02-16 01:42:53 +0000
committerDan Gohman <gohman@apple.com>2010-02-16 01:42:53 +0000
commitc2385a0741c43bd93eb2033e2f11eaae83cdb1cb (patch)
treec09c437e569bc5979b10189ef10afef9191ffcc2 /test/CodeGen/X86/lsr-reuse-trunc.ll
parentf463d9554b5bfa735e896ceb06f5fea42d33955b (diff)
downloadllvm-c2385a0741c43bd93eb2033e2f11eaae83cdb1cb.tar.gz
llvm-c2385a0741c43bd93eb2033e2f11eaae83cdb1cb.tar.bz2
llvm-c2385a0741c43bd93eb2033e2f11eaae83cdb1cb.tar.xz
Split the main for-each-use loop again, this time for GenerateTruncates,
as it also peeks at which registers are being used by other uses. This makes LSR less sensitive to use-list order. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@96308 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/X86/lsr-reuse-trunc.ll')
-rw-r--r--test/CodeGen/X86/lsr-reuse-trunc.ll59
1 files changed, 59 insertions, 0 deletions
diff --git a/test/CodeGen/X86/lsr-reuse-trunc.ll b/test/CodeGen/X86/lsr-reuse-trunc.ll
new file mode 100644
index 0000000000..d1d714491f
--- /dev/null
+++ b/test/CodeGen/X86/lsr-reuse-trunc.ll
@@ -0,0 +1,59 @@
+; RUN: llc < %s -march=x86-64 | FileCheck %s
+
+; Full strength reduction wouldn't reduce register pressure, so LSR should
+; stick with indexing here.
+
+; CHECK: movaps (%rsi,%rax,4), %xmm3
+; CHECK: movaps %xmm3, (%rdi,%rax,4)
+; CHECK: addq $4, %rax
+; CHECK: cmpl %eax, (%rdx)
+; CHECK-NEXT: jg
+
+define void @vvfloorf(float* nocapture %y, float* nocapture %x, i32* nocapture %n) nounwind {
+entry:
+ %0 = load i32* %n, align 4
+ %1 = icmp sgt i32 %0, 0
+ br i1 %1, label %bb, label %return
+
+bb:
+ %indvar = phi i64 [ %indvar.next, %bb ], [ 0, %entry ]
+ %tmp = shl i64 %indvar, 2
+ %scevgep = getelementptr float* %y, i64 %tmp
+ %scevgep9 = bitcast float* %scevgep to <4 x float>*
+ %scevgep10 = getelementptr float* %x, i64 %tmp
+ %scevgep1011 = bitcast float* %scevgep10 to <4 x float>*
+ %2 = load <4 x float>* %scevgep1011, align 16
+ %3 = bitcast <4 x float> %2 to <4 x i32>
+ %4 = and <4 x i32> %3, <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647>
+ %5 = bitcast <4 x i32> %4 to <4 x float>
+ %6 = and <4 x i32> %3, <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648>
+ %7 = tail call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %5, <4 x float> <float 8.388608e+06, float 8.388608e+06, float 8.388608e+06, float 8.388608e+06>, i8 5) nounwind
+ %tmp.i4 = bitcast <4 x float> %7 to <4 x i32>
+ %8 = xor <4 x i32> %tmp.i4, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %9 = and <4 x i32> %8, <i32 1258291200, i32 1258291200, i32 1258291200, i32 1258291200>
+ %10 = or <4 x i32> %9, %6
+ %11 = bitcast <4 x i32> %10 to <4 x float>
+ %12 = fadd <4 x float> %2, %11
+ %13 = fsub <4 x float> %12, %11
+ %14 = tail call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %2, <4 x float> %13, i8 1) nounwind
+ %15 = bitcast <4 x float> %14 to <4 x i32>
+ %16 = tail call <4 x float> @llvm.x86.sse2.cvtdq2ps(<4 x i32> %15) nounwind readnone
+ %17 = fadd <4 x float> %13, %16
+ %tmp.i = bitcast <4 x float> %17 to <4 x i32>
+ %18 = or <4 x i32> %tmp.i, %6
+ %19 = bitcast <4 x i32> %18 to <4 x float>
+ store <4 x float> %19, <4 x float>* %scevgep9, align 16
+ %tmp12 = add i64 %tmp, 4
+ %tmp13 = trunc i64 %tmp12 to i32
+ %20 = load i32* %n, align 4
+ %21 = icmp sgt i32 %20, %tmp13
+ %indvar.next = add i64 %indvar, 1
+ br i1 %21, label %bb, label %return
+
+return:
+ ret void
+}
+
+declare <4 x float> @llvm.x86.sse.cmp.ps(<4 x float>, <4 x float>, i8) nounwind readnone
+
+declare <4 x float> @llvm.x86.sse2.cvtdq2ps(<4 x i32>) nounwind readnone