summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Liao <michael.liao@intel.com>2013-03-20 22:01:10 +0000
committerMichael Liao <michael.liao@intel.com>2013-03-20 22:01:10 +0000
commitf74e9bf650d7c40d595d3bb60e3c901e2bccec4b (patch)
tree26fc6ab996a2aa6e0516091cb0f5bee4483168c7
parent6178e5f50c0c8be26913cd93238a5035a39cdf37 (diff)
downloadllvm-f74e9bf650d7c40d595d3bb60e3c901e2bccec4b.tar.gz
llvm-f74e9bf650d7c40d595d3bb60e3c901e2bccec4b.tar.bz2
llvm-f74e9bf650d7c40d595d3bb60e3c901e2bccec4b.tar.xz
Correct cost model for vector shift on AVX2
- After moving logic recognizing vector shift with scalar amount from DAG combining into DAG lowering, we declare to customize all vector shifts even vector shift on AVX is legal. As a result, the cost model needs special tuning to identify these legal cases. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@177586 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--lib/Target/X86/X86TargetTransformInfo.cpp23
-rw-r--r--test/Analysis/CostModel/X86/arith.ll54
2 files changed, 77 insertions, 0 deletions
diff --git a/lib/Target/X86/X86TargetTransformInfo.cpp b/lib/Target/X86/X86TargetTransformInfo.cpp
index 777ef508ec..3e3b86edbb 100644
--- a/lib/Target/X86/X86TargetTransformInfo.cpp
+++ b/lib/Target/X86/X86TargetTransformInfo.cpp
@@ -169,6 +169,29 @@ unsigned X86TTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty) const {
int ISD = TLI->InstructionOpcodeToISD(Opcode);
assert(ISD && "Invalid opcode");
+ static const CostTblEntry<MVT> AVX2CostTable[] = {
+ // Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to
+ // customize them to detect the cases where shift amount is a scalar one.
+ { ISD::SHL, MVT::v4i32, 1 },
+ { ISD::SRL, MVT::v4i32, 1 },
+ { ISD::SRA, MVT::v4i32, 1 },
+ { ISD::SHL, MVT::v8i32, 1 },
+ { ISD::SRL, MVT::v8i32, 1 },
+ { ISD::SRA, MVT::v8i32, 1 },
+ { ISD::SHL, MVT::v2i64, 1 },
+ { ISD::SRL, MVT::v2i64, 1 },
+ { ISD::SHL, MVT::v4i64, 1 },
+ { ISD::SRL, MVT::v4i64, 1 },
+ };
+
+ // Look for AVX2 lowering tricks.
+ if (ST->hasAVX2()) {
+ int Idx = CostTableLookup<MVT>(AVX2CostTable, array_lengthof(AVX2CostTable),
+ ISD, LT.second);
+ if (Idx != -1)
+ return LT.first * AVX2CostTable[Idx].Cost;
+ }
+
static const CostTblEntry<MVT> AVX1CostTable[] = {
// We don't have to scalarize unsupported ops. We can issue two half-sized
// operations and we only need to extract the upper YMM half.
diff --git a/test/Analysis/CostModel/X86/arith.ll b/test/Analysis/CostModel/X86/arith.ll
index f0521bae48..5f9444157a 100644
--- a/test/Analysis/CostModel/X86/arith.ll
+++ b/test/Analysis/CostModel/X86/arith.ll
@@ -72,3 +72,57 @@ define i32 @fmul(i32 %arg) {
%B = fmul <8 x float> undef, undef
ret i32 undef
}
+
+; AVX: shift
+; AVX2: shift
+define void @shift() {
+ ; AVX: cost of 2 {{.*}} shl
+ ; AVX2: cost of 1 {{.*}} shl
+ %A0 = shl <4 x i32> undef, undef
+ ; AVX: cost of 2 {{.*}} shl
+ ; AVX2: cost of 1 {{.*}} shl
+ %A1 = shl <2 x i64> undef, undef
+
+ ; AVX: cost of 2 {{.*}} lshr
+ ; AVX2: cost of 1 {{.*}} lshr
+ %B0 = lshr <4 x i32> undef, undef
+ ; AVX: cost of 2 {{.*}} lshr
+ ; AVX2: cost of 1 {{.*}} lshr
+ %B1 = lshr <2 x i64> undef, undef
+
+ ; AVX: cost of 2 {{.*}} ashr
+ ; AVX2: cost of 1 {{.*}} ashr
+ %C0 = ashr <4 x i32> undef, undef
+ ; AVX: cost of 6 {{.*}} ashr
+ ; AVX2: cost of 6 {{.*}} ashr
+ %C1 = ashr <2 x i64> undef, undef
+
+ ret void
+}
+
+; AVX: avx2shift
+; AVX2: avx2shift
+define void @avx2shift() {
+ ; AVX: cost of 2 {{.*}} shl
+ ; AVX2: cost of 1 {{.*}} shl
+ %A0 = shl <8 x i32> undef, undef
+ ; AVX: cost of 2 {{.*}} shl
+ ; AVX2: cost of 1 {{.*}} shl
+ %A1 = shl <4 x i64> undef, undef
+
+ ; AVX: cost of 2 {{.*}} lshr
+ ; AVX2: cost of 1 {{.*}} lshr
+ %B0 = lshr <8 x i32> undef, undef
+ ; AVX: cost of 2 {{.*}} lshr
+ ; AVX2: cost of 1 {{.*}} lshr
+ %B1 = lshr <4 x i64> undef, undef
+
+ ; AVX: cost of 2 {{.*}} ashr
+ ; AVX2: cost of 1 {{.*}} ashr
+ %C0 = ashr <8 x i32> undef, undef
+ ; AVX: cost of 12 {{.*}} ashr
+ ; AVX2: cost of 12 {{.*}} ashr
+ %C1 = ashr <4 x i64> undef, undef
+
+ ret void
+}