summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorYi Jiang <yjiang@apple.com>2013-10-02 20:20:39 +0000
committerYi Jiang <yjiang@apple.com>2013-10-02 20:20:39 +0000
commitd0132a783341696eba8ac97b83ae3388d95b4563 (patch)
treeb6c277c5e933f642983821d62c0e863768497d8f
parent7b7294c534f97f97860090401672a9c9831033db (diff)
downloadllvm-d0132a783341696eba8ac97b83ae3388d95b4563.tar.gz
llvm-d0132a783341696eba8ac97b83ae3388d95b4563.tar.bz2
llvm-d0132a783341696eba8ac97b83ae3388d95b4563.tar.xz
Apply slp vectorization on fully-vectorizable tree of height 2
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@191852 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--lib/Transforms/Vectorize/SLPVectorizer.cpp25
-rw-r--r--test/Transforms/SLPVectorizer/X86/tiny-tree.ll147
2 files changed, 151 insertions, 21 deletions
diff --git a/lib/Transforms/Vectorize/SLPVectorizer.cpp b/lib/Transforms/Vectorize/SLPVectorizer.cpp
index 4bee2cbf89..7d7e8774d1 100644
--- a/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -311,6 +311,10 @@ private:
/// \returns a vector from a collection of scalars in \p VL.
Value *Gather(ArrayRef<Value *> VL, VectorType *Ty);
+ /// \returns whether the VectorizableTree is fully vectoriable and will
+ /// be beneficial even the tree height is tiny.
+ bool isFullyVectorizableTinyTree();
+
struct TreeEntry {
TreeEntry() : Scalars(), VectorizedValue(0), LastScalarIndex(0),
NeedToGather(0) {}
@@ -917,15 +921,28 @@ int BoUpSLP::getEntryCost(TreeEntry *E) {
}
}
+bool BoUpSLP::isFullyVectorizableTinyTree() {
+ DEBUG(dbgs() << "SLP: Check whether the tree with height " <<
+ VectorizableTree.size() << " is fully vectorizable .\n");
+
+ // We only handle trees of height 2.
+ if (VectorizableTree.size() != 2)
+ return false;
+
+ // Gathering cost would be too much for tiny trees.
+ if (VectorizableTree[0].NeedToGather || VectorizableTree[1].NeedToGather)
+ return false;
+
+ return true;
+}
+
int BoUpSLP::getTreeCost() {
int Cost = 0;
DEBUG(dbgs() << "SLP: Calculating cost for tree of size " <<
VectorizableTree.size() << ".\n");
- // Don't vectorize tiny trees. Small load/store chains or consecutive stores
- // of constants will be vectoried in SelectionDAG in MergeConsecutiveStores.
- // The SelectionDAG vectorizer can only handle pairs (trees of height = 2).
- if (VectorizableTree.size() < 3) {
+ // We only vectorize tiny trees if it is fully vectorizable.
+ if (VectorizableTree.size() < 3 && !isFullyVectorizableTinyTree()) {
if (!VectorizableTree.size()) {
assert(!ExternalUses.size() && "We should not have any external users");
}
diff --git a/test/Transforms/SLPVectorizer/X86/tiny-tree.ll b/test/Transforms/SLPVectorizer/X86/tiny-tree.ll
index d816232baf..2747a1f489 100644
--- a/test/Transforms/SLPVectorizer/X86/tiny-tree.ll
+++ b/test/Transforms/SLPVectorizer/X86/tiny-tree.ll
@@ -1,27 +1,140 @@
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.8.0"
-; RUN: opt < %s -basicaa -slp-vectorizer -slp-threshold=-5 -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7 | FileCheck %s
+; RUN: opt < %s -basicaa -slp-vectorizer -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7 | FileCheck %s
-%t1 = type {%t2, double, double, i8 }
-%t2 = type { double, double, double }
-%t3 = type { double, double, i8 }
-; We check that SLP vectorizer will not try to vectorize tiny trees
-; even with a negative threshold.
-; CHECK: tiny_tree_test
-; CHECK-NOT: <2 x double>
+; CHECK: tiny_tree_fully_vectorizable
+; CHECK: load <2 x double>
+; CHECK: store <2 x double>
; CHECK: ret
-define void @tiny_tree_test(%t3* %this, %t1* %m) align 2 {
+define void @tiny_tree_fully_vectorizable(double* noalias nocapture %dst, double* noalias nocapture readonly %src, i64 %count) #0 {
entry:
- %m41.i = getelementptr inbounds %t1* %m, i64 0, i32 0, i32 1
- %0 = load double* %m41.i, align 8
- %_tx = getelementptr inbounds %t3* %this, i64 0, i32 0
- store double %0, double* %_tx, align 8
- %m42.i = getelementptr inbounds %t1* %m, i64 0, i32 0, i32 2
- %1 = load double* %m42.i, align 8
- %_ty = getelementptr inbounds %t3* %this, i64 0, i32 1
- store double %1, double* %_ty, align 8
+ %cmp12 = icmp eq i64 %count, 0
+ br i1 %cmp12, label %for.end, label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %i.015 = phi i64 [ %inc, %for.body ], [ 0, %entry ]
+ %dst.addr.014 = phi double* [ %add.ptr4, %for.body ], [ %dst, %entry ]
+ %src.addr.013 = phi double* [ %add.ptr, %for.body ], [ %src, %entry ]
+ %0 = load double* %src.addr.013, align 8
+ store double %0, double* %dst.addr.014, align 8
+ %arrayidx2 = getelementptr inbounds double* %src.addr.013, i64 1
+ %1 = load double* %arrayidx2, align 8
+ %arrayidx3 = getelementptr inbounds double* %dst.addr.014, i64 1
+ store double %1, double* %arrayidx3, align 8
+ %add.ptr = getelementptr inbounds double* %src.addr.013, i64 %i.015
+ %add.ptr4 = getelementptr inbounds double* %dst.addr.014, i64 %i.015
+ %inc = add i64 %i.015, 1
+ %exitcond = icmp eq i64 %inc, %count
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+; CHECK: tiny_tree_fully_vectorizable2
+; CHECK: load <4 x float>
+; CHECK: store <4 x float>
+; CHECK: ret
+
+define void @tiny_tree_fully_vectorizable2(float* noalias nocapture %dst, float* noalias nocapture readonly %src, i64 %count) #0 {
+entry:
+ %cmp20 = icmp eq i64 %count, 0
+ br i1 %cmp20, label %for.end, label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %i.023 = phi i64 [ %inc, %for.body ], [ 0, %entry ]
+ %dst.addr.022 = phi float* [ %add.ptr8, %for.body ], [ %dst, %entry ]
+ %src.addr.021 = phi float* [ %add.ptr, %for.body ], [ %src, %entry ]
+ %0 = load float* %src.addr.021, align 4
+ store float %0, float* %dst.addr.022, align 4
+ %arrayidx2 = getelementptr inbounds float* %src.addr.021, i64 1
+ %1 = load float* %arrayidx2, align 4
+ %arrayidx3 = getelementptr inbounds float* %dst.addr.022, i64 1
+ store float %1, float* %arrayidx3, align 4
+ %arrayidx4 = getelementptr inbounds float* %src.addr.021, i64 2
+ %2 = load float* %arrayidx4, align 4
+ %arrayidx5 = getelementptr inbounds float* %dst.addr.022, i64 2
+ store float %2, float* %arrayidx5, align 4
+ %arrayidx6 = getelementptr inbounds float* %src.addr.021, i64 3
+ %3 = load float* %arrayidx6, align 4
+ %arrayidx7 = getelementptr inbounds float* %dst.addr.022, i64 3
+ store float %3, float* %arrayidx7, align 4
+ %add.ptr = getelementptr inbounds float* %src.addr.021, i64 %i.023
+ %add.ptr8 = getelementptr inbounds float* %dst.addr.022, i64 %i.023
+ %inc = add i64 %i.023, 1
+ %exitcond = icmp eq i64 %inc, %count
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+; We do not vectorize the tiny tree which is not fully vectorizable.
+; CHECK: tiny_tree_not_fully_vectorizable
+; CHECK-NOT: <2 x double>
+; CHECK: ret
+
+define void @tiny_tree_not_fully_vectorizable(double* noalias nocapture %dst, double* noalias nocapture readonly %src, i64 %count) #0 {
+entry:
+ %cmp12 = icmp eq i64 %count, 0
+ br i1 %cmp12, label %for.end, label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %i.015 = phi i64 [ %inc, %for.body ], [ 0, %entry ]
+ %dst.addr.014 = phi double* [ %add.ptr4, %for.body ], [ %dst, %entry ]
+ %src.addr.013 = phi double* [ %add.ptr, %for.body ], [ %src, %entry ]
+ %0 = load double* %src.addr.013, align 8
+ store double %0, double* %dst.addr.014, align 8
+ %arrayidx2 = getelementptr inbounds double* %src.addr.013, i64 2
+ %1 = load double* %arrayidx2, align 8
+ %arrayidx3 = getelementptr inbounds double* %dst.addr.014, i64 1
+ store double %1, double* %arrayidx3, align 8
+ %add.ptr = getelementptr inbounds double* %src.addr.013, i64 %i.015
+ %add.ptr4 = getelementptr inbounds double* %dst.addr.014, i64 %i.015
+ %inc = add i64 %i.015, 1
+ %exitcond = icmp eq i64 %inc, %count
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
ret void
}
+
+; CHECK: tiny_tree_not_fully_vectorizable2
+; CHECK-NOT: <2 x double>
+; CHECK: ret
+
+define void @tiny_tree_not_fully_vectorizable2(float* noalias nocapture %dst, float* noalias nocapture readonly %src, i64 %count) #0 {
+entry:
+ %cmp20 = icmp eq i64 %count, 0
+ br i1 %cmp20, label %for.end, label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %i.023 = phi i64 [ %inc, %for.body ], [ 0, %entry ]
+ %dst.addr.022 = phi float* [ %add.ptr8, %for.body ], [ %dst, %entry ]
+ %src.addr.021 = phi float* [ %add.ptr, %for.body ], [ %src, %entry ]
+ %0 = load float* %src.addr.021, align 4
+ store float %0, float* %dst.addr.022, align 4
+ %arrayidx2 = getelementptr inbounds float* %src.addr.021, i64 4
+ %1 = load float* %arrayidx2, align 4
+ %arrayidx3 = getelementptr inbounds float* %dst.addr.022, i64 1
+ store float %1, float* %arrayidx3, align 4
+ %arrayidx4 = getelementptr inbounds float* %src.addr.021, i64 2
+ %2 = load float* %arrayidx4, align 4
+ %arrayidx5 = getelementptr inbounds float* %dst.addr.022, i64 2
+ store float %2, float* %arrayidx5, align 4
+ %arrayidx6 = getelementptr inbounds float* %src.addr.021, i64 3
+ %3 = load float* %arrayidx6, align 4
+ %arrayidx7 = getelementptr inbounds float* %dst.addr.022, i64 3
+ store float %3, float* %arrayidx7, align 4
+ %add.ptr = getelementptr inbounds float* %src.addr.021, i64 %i.023
+ %add.ptr8 = getelementptr inbounds float* %dst.addr.022, i64 %i.023
+ %inc = add i64 %i.023, 1
+ %exitcond = icmp eq i64 %inc, %count
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}