summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAdam Nemet <anemet@apple.com>2014-03-31 21:54:48 +0000
committerAdam Nemet <anemet@apple.com>2014-03-31 21:54:48 +0000
commit4ffbb65494d1991993c0fb7cc4f999e96a799af8 (patch)
tree2bc53a5f169ced178987d00a07f31d309eb0cbd5
parentc779918bcdcc2ce0ec309c0e66c301cecd4cdf7d (diff)
downloadllvm-4ffbb65494d1991993c0fb7cc4f999e96a799af8.tar.gz
llvm-4ffbb65494d1991993c0fb7cc4f999e96a799af8.tar.bz2
llvm-4ffbb65494d1991993c0fb7cc4f999e96a799af8.tar.xz
[X86] Adjust cost of FP_TO_UINT v4f64->v4i32 as well
Pretty obvious follow-on to r205159 to also handle conversion from double besides float. Fixes <rdar://problem/16373208> git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@205253 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--lib/Target/X86/X86TargetTransformInfo.cpp1
-rw-r--r--test/Transforms/LoopVectorize/X86/fp64_to_uint32-cost-model.ll40
2 files changed, 41 insertions, 0 deletions
diff --git a/lib/Target/X86/X86TargetTransformInfo.cpp b/lib/Target/X86/X86TargetTransformInfo.cpp
index e1e151328f..ed04cdc4e4 100644
--- a/lib/Target/X86/X86TargetTransformInfo.cpp
+++ b/lib/Target/X86/X86TargetTransformInfo.cpp
@@ -528,6 +528,7 @@ unsigned X86TTI::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) const {
// problem is that the inserts form a read-modify-write chain so latency
// should be factored in too. Inflating the cost per element by 1.
{ ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 8*4 },
+ { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 4*4 },
};
if (ST->hasAVX2()) {
diff --git a/test/Transforms/LoopVectorize/X86/fp64_to_uint32-cost-model.ll b/test/Transforms/LoopVectorize/X86/fp64_to_uint32-cost-model.ll
new file mode 100644
index 0000000000..ef3e3bec79
--- /dev/null
+++ b/test/Transforms/LoopVectorize/X86/fp64_to_uint32-cost-model.ll
@@ -0,0 +1,40 @@
+; RUN: opt < %s -mcpu=core-avx2 -loop-vectorize -S | llc -mcpu=core-avx2 | FileCheck %s
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx"
+
+@n = global i32 10000, align 4
+@double_array = common global [10000 x double] zeroinitializer, align 16
+@unsigned_array = common global [10000 x i32] zeroinitializer, align 16
+
+; If we need to scalarize the fptoui and then use inserts to build up the
+; vector again, then there is certainly no value in going 256-bit wide.
+; CHECK-NOT: vpinsrd
+
+define void @convert() {
+entry:
+ %0 = load i32* @n, align 4
+ %cmp4 = icmp eq i32 %0, 0
+ br i1 %cmp4, label %for.end, label %for.body.preheader
+
+for.body.preheader: ; preds = %entry
+ br label %for.body
+
+for.body: ; preds = %for.body.preheader, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
+ %arrayidx = getelementptr inbounds [10000 x double]* @double_array, i64 0, i64 %indvars.iv
+ %1 = load double* %arrayidx, align 8
+ %conv = fptoui double %1 to i32
+ %arrayidx2 = getelementptr inbounds [10000 x i32]* @unsigned_array, i64 0, i64 %indvars.iv
+ store i32 %conv, i32* %arrayidx2, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %2 = trunc i64 %indvars.iv.next to i32
+ %cmp = icmp ult i32 %2, %0
+ br i1 %cmp, label %for.body, label %for.end.loopexit
+
+for.end.loopexit: ; preds = %for.body
+ br label %for.end
+
+for.end: ; preds = %for.end.loopexit, %entry
+ ret void
+}