summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--lib/Target/ARM/ARMTargetTransformInfo.cpp18
-rw-r--r--lib/Transforms/Vectorize/SLPVectorizer.cpp21
-rw-r--r--test/Transforms/SLPVectorizer/ARM/sroa.ll52
3 files changed, 85 insertions, 6 deletions
diff --git a/lib/Target/ARM/ARMTargetTransformInfo.cpp b/lib/Target/ARM/ARMTargetTransformInfo.cpp
index 9dc1cd1fee..86b6215f09 100644
--- a/lib/Target/ARM/ARMTargetTransformInfo.cpp
+++ b/lib/Target/ARM/ARMTargetTransformInfo.cpp
@@ -523,8 +523,20 @@ unsigned ARMTTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty, OperandValueK
if (Idx != -1)
return LT.first * CostTbl[Idx].Cost;
-
- return TargetTransformInfo::getArithmeticInstrCost(Opcode, Ty, Op1Info,
- Op2Info);
+ unsigned Cost =
+ TargetTransformInfo::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info);
+
+ // This is somewhat of a hack. The problem that we are facing is that SROA
+ // creates a sequence of shift, and, or instructions to construct values.
+ // These sequences are recognized by the ISel and have zero-cost. Not so for
+ // the vectorized code. Because we have support for v2i64 but not i64 those
+ // sequences look particularily beneficial to vectorize.
+ // To work around this we increase the cost of v2i64 operations to make them
+ // seem less beneficial.
+ if (LT.second == MVT::v2i64 &&
+ Op2Info == TargetTransformInfo::OK_UniformConstantValue)
+ Cost += 4;
+
+ return Cost;
}
diff --git a/lib/Transforms/Vectorize/SLPVectorizer.cpp b/lib/Transforms/Vectorize/SLPVectorizer.cpp
index 012521ad01..2797a21421 100644
--- a/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -1013,9 +1013,24 @@ int BoUpSLP::getEntryCost(TreeEntry *E) {
TTI->getCmpSelInstrCost(Opcode, ScalarTy, Builder.getInt1Ty());
VecCost = TTI->getCmpSelInstrCost(Opcode, VecTy, MaskTy);
} else {
- ScalarCost = VecTy->getNumElements() *
- TTI->getArithmeticInstrCost(Opcode, ScalarTy);
- VecCost = TTI->getArithmeticInstrCost(Opcode, VecTy);
+ // Certain instructions can be cheaper to vectorize if they have a
+ // constant second vector operand.
+ TargetTransformInfo::OperandValueKind Op1VK =
+ TargetTransformInfo::OK_AnyValue;
+ TargetTransformInfo::OperandValueKind Op2VK =
+ TargetTransformInfo::OK_UniformConstantValue;
+
+ // Check whether all second operands are constant.
+ for (unsigned i = 0; i < VL.size(); ++i)
+ if (!isa<ConstantInt>(cast<Instruction>(VL[i])->getOperand(1))) {
+ Op2VK = TargetTransformInfo::OK_AnyValue;
+ break;
+ }
+
+ ScalarCost =
+ VecTy->getNumElements() *
+ TTI->getArithmeticInstrCost(Opcode, ScalarTy, Op1VK, Op2VK);
+ VecCost = TTI->getArithmeticInstrCost(Opcode, VecTy, Op1VK, Op2VK);
}
return VecCost - ScalarCost;
}
diff --git a/test/Transforms/SLPVectorizer/ARM/sroa.ll b/test/Transforms/SLPVectorizer/ARM/sroa.ll
new file mode 100644
index 0000000000..e0c75b147f
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/ARM/sroa.ll
@@ -0,0 +1,52 @@
+; RUN: opt -S -mcpu=swift -mtriple=thumbv7-apple-ios -basicaa -slp-vectorizer < %s | FileCheck %s
+
+target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32"
+
+%class.Complex = type { double, double }
+
+; Code like this is the result of SROA. Make sure we don't vectorize this
+; because the in the scalar version of this the shl/or are handled by the
+; backend and disappear, the vectorized code stays.
+
+; CHECK-LABEL: SROAed
+; CHECK-NOT: shl <2 x i64>
+; CHECK-NOT: or <2 x i64>
+
+define void @SROAed(%class.Complex* noalias nocapture sret %agg.result, [4 x i32] %a.coerce, [4 x i32] %b.coerce) {
+entry:
+ %a.coerce.fca.0.extract = extractvalue [4 x i32] %a.coerce, 0
+ %a.sroa.0.0.insert.ext = zext i32 %a.coerce.fca.0.extract to i64
+ %a.coerce.fca.1.extract = extractvalue [4 x i32] %a.coerce, 1
+ %a.sroa.0.4.insert.ext = zext i32 %a.coerce.fca.1.extract to i64
+ %a.sroa.0.4.insert.shift = shl nuw i64 %a.sroa.0.4.insert.ext, 32
+ %a.sroa.0.4.insert.insert = or i64 %a.sroa.0.4.insert.shift, %a.sroa.0.0.insert.ext
+ %0 = bitcast i64 %a.sroa.0.4.insert.insert to double
+ %a.coerce.fca.2.extract = extractvalue [4 x i32] %a.coerce, 2
+ %a.sroa.3.8.insert.ext = zext i32 %a.coerce.fca.2.extract to i64
+ %a.coerce.fca.3.extract = extractvalue [4 x i32] %a.coerce, 3
+ %a.sroa.3.12.insert.ext = zext i32 %a.coerce.fca.3.extract to i64
+ %a.sroa.3.12.insert.shift = shl nuw i64 %a.sroa.3.12.insert.ext, 32
+ %a.sroa.3.12.insert.insert = or i64 %a.sroa.3.12.insert.shift, %a.sroa.3.8.insert.ext
+ %1 = bitcast i64 %a.sroa.3.12.insert.insert to double
+ %b.coerce.fca.0.extract = extractvalue [4 x i32] %b.coerce, 0
+ %b.sroa.0.0.insert.ext = zext i32 %b.coerce.fca.0.extract to i64
+ %b.coerce.fca.1.extract = extractvalue [4 x i32] %b.coerce, 1
+ %b.sroa.0.4.insert.ext = zext i32 %b.coerce.fca.1.extract to i64
+ %b.sroa.0.4.insert.shift = shl nuw i64 %b.sroa.0.4.insert.ext, 32
+ %b.sroa.0.4.insert.insert = or i64 %b.sroa.0.4.insert.shift, %b.sroa.0.0.insert.ext
+ %2 = bitcast i64 %b.sroa.0.4.insert.insert to double
+ %b.coerce.fca.2.extract = extractvalue [4 x i32] %b.coerce, 2
+ %b.sroa.3.8.insert.ext = zext i32 %b.coerce.fca.2.extract to i64
+ %b.coerce.fca.3.extract = extractvalue [4 x i32] %b.coerce, 3
+ %b.sroa.3.12.insert.ext = zext i32 %b.coerce.fca.3.extract to i64
+ %b.sroa.3.12.insert.shift = shl nuw i64 %b.sroa.3.12.insert.ext, 32
+ %b.sroa.3.12.insert.insert = or i64 %b.sroa.3.12.insert.shift, %b.sroa.3.8.insert.ext
+ %3 = bitcast i64 %b.sroa.3.12.insert.insert to double
+ %add = fadd double %0, %2
+ %add3 = fadd double %1, %3
+ %re.i.i = getelementptr inbounds %class.Complex* %agg.result, i32 0, i32 0
+ store double %add, double* %re.i.i, align 4
+ %im.i.i = getelementptr inbounds %class.Complex* %agg.result, i32 0, i32 1
+ store double %add3, double* %im.i.i, align 4
+ ret void
+}