summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNadav Rotem <nrotem@apple.com>2013-06-19 17:33:16 +0000
committerNadav Rotem <nrotem@apple.com>2013-06-19 17:33:16 +0000
commit80a6d6526c4ea4de90327c1404e2689a260a9fee (patch)
tree1ce8ce13b766cda1ba0f291bd7d2b8483de6bee0
parentdd3ef1a13b57b9086de5473d2576b16acba98e82 (diff)
downloadllvm-80a6d6526c4ea4de90327c1404e2689a260a9fee.tar.gz
llvm-80a6d6526c4ea4de90327c1404e2689a260a9fee.tar.bz2
llvm-80a6d6526c4ea4de90327c1404e2689a260a9fee.tar.xz
SLPVectorizer: handle scalars that are extracted from vectors (using ExtractElementInst).
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@184325 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--lib/Transforms/Vectorize/VecUtils.cpp44
-rw-r--r--test/Transforms/SLPVectorizer/X86/extract.ll59
2 files changed, 103 insertions, 0 deletions
diff --git a/lib/Transforms/Vectorize/VecUtils.cpp b/lib/Transforms/Vectorize/VecUtils.cpp
index 1e97ed400b..e79f08a56d 100644
--- a/lib/Transforms/Vectorize/VecUtils.cpp
+++ b/lib/Transforms/Vectorize/VecUtils.cpp
@@ -331,6 +331,34 @@ int BoUpSLP::getTreeCost(ArrayRef<Value *> VL) {
return getTreeCost_rec(VL, 0);
}
+static bool CanReuseExtract(ArrayRef<Value *> VL, unsigned VF,
+ VectorType *VecTy) {
+ // Check if all of the extracts come from the same vector and from the
+ // correct offset.
+ Value *VL0 = VL[0];
+ ExtractElementInst *E0 = cast<ExtractElementInst>(VL0);
+ Value *Vec = E0->getOperand(0);
+
+ // We have to extract from the same vector type.
+ if (Vec->getType() != VecTy)
+ return false;
+
+ // Check that all of the indices extract from the correct offset.
+ ConstantInt *CI = dyn_cast<ConstantInt>(E0->getOperand(1));
+ if (!CI || CI->getZExtValue())
+ return false;
+
+ for (unsigned i = 1, e = VF; i < e; ++i) {
+ ExtractElementInst *E = cast<ExtractElementInst>(VL[i]);
+ ConstantInt *CI = dyn_cast<ConstantInt>(E->getOperand(1));
+
+ if (!CI || CI->getZExtValue() != i || E->getOperand(0) != Vec)
+ return false;
+ }
+
+ return true;
+}
+
void BoUpSLP::getTreeUses_rec(ArrayRef<Value *> VL, unsigned Depth) {
if (Depth == RecursionMaxDepth) return;
@@ -386,6 +414,12 @@ void BoUpSLP::getTreeUses_rec(ArrayRef<Value *> VL, unsigned Depth) {
}
switch (Opcode) {
+ case Instruction::ExtractElement: {
+ VectorType *VecTy = VectorType::get(VL[0]->getType(), VL.size());
+ // No need to follow ExtractElements that are going to be optimized away.
+ if (CanReuseExtract(VL, VL.size(), VecTy)) return;
+ // Fall through.
+ }
case Instruction::ZExt:
case Instruction::SExt:
case Instruction::FPToUI:
@@ -523,6 +557,11 @@ int BoUpSLP::getTreeCost_rec(ArrayRef<Value *> VL, unsigned Depth) {
TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, i);
switch (Opcode) {
+ case Instruction::ExtractElement: {
+ if (CanReuseExtract(VL, VL.size(), VecTy))
+ return 0;
+ return getScalarizationCost(VecTy);
+ }
case Instruction::ZExt:
case Instruction::SExt:
case Instruction::FPToUI:
@@ -786,6 +825,11 @@ Value *BoUpSLP::vectorizeTree_rec(ArrayRef<Value *> VL, int VF) {
}
switch (Opcode) {
+ case Instruction::ExtractElement: {
+ if (CanReuseExtract(VL, VL.size(), VecTy))
+ return VL0->getOperand(0);
+ return Scalarize(VL, VecTy);
+ }
case Instruction::ZExt:
case Instruction::SExt:
case Instruction::FPToUI:
diff --git a/test/Transforms/SLPVectorizer/X86/extract.ll b/test/Transforms/SLPVectorizer/X86/extract.ll
new file mode 100644
index 0000000000..f611fd4ec2
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/X86/extract.ll
@@ -0,0 +1,59 @@
+; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.8.0"
+;CHECK: fextr
+;CHECK-NOT: insertelement
+;CHECK-NOT: extractelement
+;CHECK: fadd <2 x double>
+;CHECK: ret void
+define void @fextr(double* %ptr) {
+entry:
+ %LD = load <2 x double>* undef
+ %V0 = extractelement <2 x double> %LD, i32 0
+ %V1 = extractelement <2 x double> %LD, i32 1
+ %P0 = getelementptr inbounds double* %ptr, i64 0
+ %P1 = getelementptr inbounds double* %ptr, i64 1
+ %A0 = fadd double %V0, 0.0
+ %A1 = fadd double %V1, 1.1
+ store double %A0, double* %P0, align 4
+ store double %A1, double* %P1, align 4
+ ret void
+}
+
+;CHECK: fextr1
+;CHECK: insertelement
+;CHECK: insertelement
+;CHECK: ret void
+define void @fextr1(double* %ptr) {
+entry:
+ %LD = load <2 x double>* undef
+ %V0 = extractelement <2 x double> %LD, i32 0
+ %V1 = extractelement <2 x double> %LD, i32 1
+ %P0 = getelementptr inbounds double* %ptr, i64 1 ; <--- incorrect order
+ %P1 = getelementptr inbounds double* %ptr, i64 0
+ %A0 = fadd double %V0, 1.2
+ %A1 = fadd double %V1, 3.4
+ store double %A0, double* %P0, align 4
+ store double %A1, double* %P1, align 4
+ ret void
+}
+
+;CHECK: fextr2
+;CHECK: insertelement
+;CHECK: insertelement
+;CHECK: ret void
+define void @fextr2(double* %ptr) {
+entry:
+ %LD = load <4 x double>* undef
+ %V0 = extractelement <4 x double> %LD, i32 0 ; <--- invalid size.
+ %V1 = extractelement <4 x double> %LD, i32 1
+ %P0 = getelementptr inbounds double* %ptr, i64 0
+ %P1 = getelementptr inbounds double* %ptr, i64 1
+ %A0 = fadd double %V0, 5.5
+ %A1 = fadd double %V1, 6.6
+ store double %A0, double* %P0, align 4
+ store double %A1, double* %P1, align 4
+ ret void
+}
+