summaryrefslogtreecommitdiff
path: root/test/Transforms/SLPVectorizer
diff options
context:
space:
mode:
authorNadav Rotem <nrotem@apple.com>2013-05-22 19:47:32 +0000
committerNadav Rotem <nrotem@apple.com>2013-05-22 19:47:32 +0000
commit3f75c6cfb575917c8c112b2de9593cb860f79e56 (patch)
treec90086ea1adb8122e79c7b220703139936e0e6bb /test/Transforms/SLPVectorizer
parent23d1d5eb566dbd10a81d9ce2dc67ad1548110b08 (diff)
downloadllvm-3f75c6cfb575917c8c112b2de9593cb860f79e56.tar.gz
llvm-3f75c6cfb575917c8c112b2de9593cb860f79e56.tar.bz2
llvm-3f75c6cfb575917c8c112b2de9593cb860f79e56.tar.xz
SLPVectorizer: Change the order in which new instructions are added to the function.
We are not working on a DAG and I ran into a number of problems when I enabled the vectorizations of 'diamond-trees' (trees that share leafs). * Imroved the numbering API. * Changed the placement of new instructions to the last root. * Fixed a bug with external tree users with non-zero lane. * Fixed a bug in the placement of in-tree users. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@182508 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/Transforms/SLPVectorizer')
-rw-r--r--test/Transforms/SLPVectorizer/X86/crash_povray.ll34
-rw-r--r--test/Transforms/SLPVectorizer/X86/in-tree-user.ll50
-rw-r--r--test/Transforms/SLPVectorizer/X86/multi_user.ll2
-rw-r--r--test/Transforms/SLPVectorizer/X86/ordering.ll19
4 files changed, 104 insertions, 1 deletions
diff --git a/test/Transforms/SLPVectorizer/X86/crash_povray.ll b/test/Transforms/SLPVectorizer/X86/crash_povray.ll
new file mode 100644
index 0000000000..7ef8df49f0
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/X86/crash_povray.ll
@@ -0,0 +1,34 @@
+; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.8.0"
+
+%struct.hoge = type { double, double, double}
+
+define void @zot(%struct.hoge* %arg) {
+bb:
+ %tmp = load double* undef, align 8
+ %tmp1 = fsub double %tmp, undef
+ %tmp2 = load double* undef, align 8
+ %tmp3 = fsub double %tmp2, undef
+ %tmp4 = fmul double %tmp3, undef
+ %tmp5 = fmul double %tmp3, undef
+ %tmp6 = fsub double %tmp5, undef
+ %tmp7 = getelementptr inbounds %struct.hoge* %arg, i64 0, i32 1
+ store double %tmp6, double* %tmp7, align 8
+ %tmp8 = fmul double %tmp1, undef
+ %tmp9 = fsub double %tmp8, undef
+ %tmp10 = getelementptr inbounds %struct.hoge* %arg, i64 0, i32 2
+ store double %tmp9, double* %tmp10, align 8
+ br i1 undef, label %bb11, label %bb12
+
+bb11: ; preds = %bb
+ br label %bb14
+
+bb12: ; preds = %bb
+ %tmp13 = fmul double undef, %tmp2
+ br label %bb14
+
+bb14: ; preds = %bb12, %bb11
+ ret void
+}
diff --git a/test/Transforms/SLPVectorizer/X86/in-tree-user.ll b/test/Transforms/SLPVectorizer/X86/in-tree-user.ll
new file mode 100644
index 0000000000..69dc8897d6
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/X86/in-tree-user.ll
@@ -0,0 +1,50 @@
+; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.7.0"
+
+@.str = private unnamed_addr constant [6 x i8] c"bingo\00", align 1
+
+; We can't vectorize when the roots are used inside the tree.
+;CHECK: @in_tree_user
+;CHECK-NOT: load <2 x double>
+;CHECK: ret
+define void @in_tree_user(double* nocapture %A, i32 %n) {
+entry:
+ %conv = sitofp i32 %n to double
+ br label %for.body
+
+for.body: ; preds = %for.inc, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.inc ]
+ %0 = shl nsw i64 %indvars.iv, 1
+ %arrayidx = getelementptr inbounds double* %A, i64 %0
+ %1 = load double* %arrayidx, align 8
+ %mul1 = fmul double %conv, %1
+ %mul2 = fmul double %mul1, 7.000000e+00
+ %add = fadd double %mul2, 5.000000e+00
+ %BadValue = fadd double %add, %add ; <------------------ In tree user.
+ %2 = or i64 %0, 1
+ %arrayidx6 = getelementptr inbounds double* %A, i64 %2
+ %3 = load double* %arrayidx6, align 8
+ %mul8 = fmul double %conv, %3
+ %mul9 = fmul double %mul8, 4.000000e+00
+ %add10 = fadd double %mul9, 9.000000e+00
+ %cmp11 = fcmp ogt double %add, %add10
+ br i1 %cmp11, label %if.then, label %for.inc
+
+if.then: ; preds = %for.body
+ %call = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([6 x i8]* @.str, i64 0, i64 0))
+ br label %for.inc
+
+for.inc: ; preds = %for.body, %if.then
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, 100
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.inc
+ ret void
+}
+
+declare i32 @printf(i8* nocapture, ...)
+
diff --git a/test/Transforms/SLPVectorizer/X86/multi_user.ll b/test/Transforms/SLPVectorizer/X86/multi_user.ll
index aaa6063fde..d4d4d28950 100644
--- a/test/Transforms/SLPVectorizer/X86/multi_user.ll
+++ b/test/Transforms/SLPVectorizer/X86/multi_user.ll
@@ -12,8 +12,8 @@ target triple = "x86_64-apple-macosx10.7.0"
;}
;CHECK: @foo
-;CHECK: insertelement <4 x i32>
;CHECK: load <4 x i32>
+;CHECK: insertelement <4 x i32>
;CHECK: add <4 x i32>
;CHECK: store <4 x i32>
;CHECK: ret
diff --git a/test/Transforms/SLPVectorizer/X86/ordering.ll b/test/Transforms/SLPVectorizer/X86/ordering.ll
new file mode 100644
index 0000000000..588e115f5f
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/X86/ordering.ll
@@ -0,0 +1,19 @@
+; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.8.0"
+
+define void @updateModelQPFrame(i32 %m_Bits) {
+entry:
+ %0 = load double* undef, align 8
+ %mul = fmul double undef, %0
+ %mul2 = fmul double undef, %mul
+ %mul4 = fmul double %0, %mul2
+ %mul5 = fmul double undef, 4.000000e+00
+ %mul7 = fmul double undef, %mul5
+ %conv = sitofp i32 %m_Bits to double
+ %mul8 = fmul double %conv, %mul7
+ %add = fadd double %mul4, %mul8
+ %cmp11 = fcmp olt double %add, 0.000000e+00
+ ret void
+}