summaryrefslogtreecommitdiff
path: root/test/CodeGen/X86/full-lsr.ll
diff options
context:
space:
mode:
authorDan Gohman <gohman@apple.com>2009-02-20 04:17:46 +0000
committerDan Gohman <gohman@apple.com>2009-02-20 04:17:46 +0000
commitc17e0cf6c03a36f424fafe88497b5fdf351cd50a (patch)
tree96b006e655ecd13cc33279849534ec54753e2932 /test/CodeGen/X86/full-lsr.ll
parent4ed0c5fb07a2a4db3dd1e6a266ba961429aba2ec (diff)
downloadllvm-c17e0cf6c03a36f424fafe88497b5fdf351cd50a.tar.gz
llvm-c17e0cf6c03a36f424fafe88497b5fdf351cd50a.tar.bz2
llvm-c17e0cf6c03a36f424fafe88497b5fdf351cd50a.tar.xz
Implement "superhero" strength reduction, or full strength
reduction of address calculations down to basic pointer arithmetic. This is currently off by default, as it needs a few other features before it becomes generally useful. And even when enabled, full strength reduction is only performed when it doesn't increase register pressure, and when several other conditions are true. This also factors out a bunch of exisiting LSR code out of StrengthReduceStridedIVUsers into separate functions, and tidies up IV insertion. This actually decreases register pressure even in non-superhero mode. The change in iv-users-in-other-loops.ll is an example of this; there are two more adds because there are two fewer leas, and there is less spilling. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@65108 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/X86/full-lsr.ll')
-rw-r--r--test/CodeGen/X86/full-lsr.ll33
1 files changed, 33 insertions, 0 deletions
diff --git a/test/CodeGen/X86/full-lsr.ll b/test/CodeGen/X86/full-lsr.ll
new file mode 100644
index 0000000000..ee9eaf95c8
--- /dev/null
+++ b/test/CodeGen/X86/full-lsr.ll
@@ -0,0 +1,33 @@
+; RUN: llvm-as < %s | llc -march=x86 -enable-full-lsr >%t
+; RUN: grep {addl \\\$4,} %t | count 3
+; RUN: not grep {,%} %t
+
+define void @foo(float* nocapture %A, float* nocapture %B, float* nocapture %C, i32 %N) nounwind {
+entry:
+ %0 = icmp sgt i32 %N, 0 ; <i1> [#uses=1]
+ br i1 %0, label %bb, label %return
+
+bb: ; preds = %bb, %entry
+ %i.03 = phi i32 [ 0, %entry ], [ %indvar.next, %bb ] ; <i32> [#uses=5]
+ %1 = getelementptr float* %A, i32 %i.03 ; <float*> [#uses=1]
+ %2 = load float* %1, align 4 ; <float> [#uses=1]
+ %3 = getelementptr float* %B, i32 %i.03 ; <float*> [#uses=1]
+ %4 = load float* %3, align 4 ; <float> [#uses=1]
+ %5 = add float %2, %4 ; <float> [#uses=1]
+ %6 = getelementptr float* %C, i32 %i.03 ; <float*> [#uses=1]
+ store float %5, float* %6, align 4
+ %7 = add i32 %i.03, 10 ; <i32> [#uses=3]
+ %8 = getelementptr float* %A, i32 %7 ; <float*> [#uses=1]
+ %9 = load float* %8, align 4 ; <float> [#uses=1]
+ %10 = getelementptr float* %B, i32 %7 ; <float*> [#uses=1]
+ %11 = load float* %10, align 4 ; <float> [#uses=1]
+ %12 = add float %9, %11 ; <float> [#uses=1]
+ %13 = getelementptr float* %C, i32 %7 ; <float*> [#uses=1]
+ store float %12, float* %13, align 4
+ %indvar.next = add i32 %i.03, 1 ; <i32> [#uses=2]
+ %exitcond = icmp eq i32 %indvar.next, %N ; <i1> [#uses=1]
+ br i1 %exitcond, label %return, label %bb
+
+return: ; preds = %bb, %entry
+ ret void
+}