summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorArnold Schwaighofer <aschwaighofer@apple.com>2013-10-29 01:33:57 +0000
committerArnold Schwaighofer <aschwaighofer@apple.com>2013-10-29 01:33:57 +0000
commitc04d241d13820b33224b5cbd89a427fc08e5d1d9 (patch)
treedbf40b8b49ab3278f68615ad0af8db89758d9f07
parent7e8cebf22d170769b0bf0c2a69309faa0e36ac4c (diff)
downloadllvm-c04d241d13820b33224b5cbd89a427fc08e5d1d9.tar.gz
llvm-c04d241d13820b33224b5cbd89a427fc08e5d1d9.tar.bz2
llvm-c04d241d13820b33224b5cbd89a427fc08e5d1d9.tar.xz
ARM cost model: Unaligned vectorized double stores are expensive
Updated a test case that assumed that <2 x double> would vectorize to use <4 x float>. radar://15338229 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@193574 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--lib/Target/ARM/ARMTargetTransformInfo.cpp15
-rw-r--r--test/Transforms/LoopVectorize/ARM/width-detect.ll18
-rw-r--r--test/Transforms/SLPVectorizer/ARM/memory.ll20
3 files changed, 44 insertions, 9 deletions
diff --git a/lib/Target/ARM/ARMTargetTransformInfo.cpp b/lib/Target/ARM/ARMTargetTransformInfo.cpp
index 86b6215f09..6bbb38facc 100644
--- a/lib/Target/ARM/ARMTargetTransformInfo.cpp
+++ b/lib/Target/ARM/ARMTargetTransformInfo.cpp
@@ -129,6 +129,9 @@ public:
unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty,
OperandValueKind Op1Info = OK_AnyValue,
OperandValueKind Op2Info = OK_AnyValue) const;
+
+ unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
+ unsigned AddressSpace) const;
/// @}
};
@@ -540,3 +543,15 @@ unsigned ARMTTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty, OperandValueK
return Cost;
}
+unsigned ARMTTI::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
+ unsigned AddressSpace) const {
+ std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Src);
+
+ if (Src->isVectorTy() && Alignment != 16 &&
+ Src->getVectorElementType()->isDoubleTy()) {
+ // Unaligned loads/stores are extremely inefficient.
+ // We need 4 uops for vst.1/vld.1 vs 1uop for vldr/vstr.
+ return LT.first * 4;
+ }
+ return LT.first;
+}
diff --git a/test/Transforms/LoopVectorize/ARM/width-detect.ll b/test/Transforms/LoopVectorize/ARM/width-detect.ll
index c0795b6a79..99d7fa75ee 100644
--- a/test/Transforms/LoopVectorize/ARM/width-detect.ll
+++ b/test/Transforms/LoopVectorize/ARM/width-detect.ll
@@ -3,27 +3,27 @@
target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32"
target triple = "thumbv7-apple-ios3.0.0"
-;CHECK:foo_F64
-;CHECK: <2 x double>
+;CHECK:foo_F32
+;CHECK: <4 x float>
;CHECK:ret
-define double @foo_F64(double* nocapture %A, i32 %n) nounwind uwtable readonly ssp {
+define float @foo_F32(float* nocapture %A, i32 %n) nounwind uwtable readonly ssp {
%1 = icmp sgt i32 %n, 0
br i1 %1, label %.lr.ph, label %._crit_edge
.lr.ph: ; preds = %0, %.lr.ph
%indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %0 ]
- %prod.01 = phi double [ %4, %.lr.ph ], [ 0.000000e+00, %0 ]
- %2 = getelementptr inbounds double* %A, i64 %indvars.iv
- %3 = load double* %2, align 8
- %4 = fmul fast double %prod.01, %3
+ %prod.01 = phi float [ %4, %.lr.ph ], [ 0.000000e+00, %0 ]
+ %2 = getelementptr inbounds float* %A, i64 %indvars.iv
+ %3 = load float* %2, align 8
+ %4 = fmul fast float %prod.01, %3
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %n
br i1 %exitcond, label %._crit_edge, label %.lr.ph
._crit_edge: ; preds = %.lr.ph, %0
- %prod.0.lcssa = phi double [ 0.000000e+00, %0 ], [ %4, %.lr.ph ]
- ret double %prod.0.lcssa
+ %prod.0.lcssa = phi float [ 0.000000e+00, %0 ], [ %4, %.lr.ph ]
+ ret float %prod.0.lcssa
}
;CHECK:foo_I8
diff --git a/test/Transforms/SLPVectorizer/ARM/memory.ll b/test/Transforms/SLPVectorizer/ARM/memory.ll
new file mode 100644
index 0000000000..383c808d21
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/ARM/memory.ll
@@ -0,0 +1,20 @@
+; RUN: opt < %s -basicaa -slp-vectorizer -S -mtriple=thumbv7-apple-ios3.0.0 -mcpu=swift | FileCheck %s
+
+target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32"
+
+; On swift unaligned <2 x double> stores need 4uops and it is there for cheaper
+; to do this scalar.
+
+; CHECK-LABEL: expensive_double_store
+; CHECK-NOT: load <2 x double>
+; CHECK-NOT: store <2 x double>
+define void @expensive_double_store(double* noalias %dst, double* noalias %src, i64 %count) {
+entry:
+ %0 = load double* %src, align 8
+ store double %0, double* %dst, align 8
+ %arrayidx2 = getelementptr inbounds double* %src, i64 1
+ %1 = load double* %arrayidx2, align 8
+ %arrayidx3 = getelementptr inbounds double* %dst, i64 1
+ store double %1, double* %arrayidx3, align 8
+ ret void
+}