summaryrefslogtreecommitdiff
path: root/test/CodeGen/PowerPC
diff options
context:
space:
mode:
authorHal Finkel <hfinkel@anl.gov>2013-05-16 19:58:38 +0000
committerHal Finkel <hfinkel@anl.gov>2013-05-16 19:58:38 +0000
commitc482454e3cc2a33a2cf2d1cf0881c7c5e2641c80 (patch)
tree6880887689abe3c97fd79a896ab470b8da4090b7 /test/CodeGen/PowerPC
parent02e168003f45cf8e0a277c6b8c85c1a3032b1dec (diff)
downloadllvm-c482454e3cc2a33a2cf2d1cf0881c7c5e2641c80.tar.gz
llvm-c482454e3cc2a33a2cf2d1cf0881c7c5e2641c80.tar.bz2
llvm-c482454e3cc2a33a2cf2d1cf0881c7c5e2641c80.tar.xz
Create an new preheader in PPCCTRLoops to avoid counter register clobbers
Some IR-level instructions (such as FP <-> i64 conversions) are not chained w.r.t. the mtctr intrinsic and yet may become function calls that clobber the counter register. At the selection-DAG level, these might be reordered with the mtctr intrinsic causing miscompiles. To avoid this situation, if an existing preheader has instructions that might use the counter register, create a new preheader for the mtctr intrinsic. This extra block will be remerged with the old preheader at the MI level, but will prevent unwanted reordering at the selection-DAG level. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@182045 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/PowerPC')
-rw-r--r--test/CodeGen/PowerPC/ctrloop-fp64.ll32
1 files changed, 32 insertions, 0 deletions
diff --git a/test/CodeGen/PowerPC/ctrloop-fp64.ll b/test/CodeGen/PowerPC/ctrloop-fp64.ll
index 78974248da..313177c91e 100644
--- a/test/CodeGen/PowerPC/ctrloop-fp64.ll
+++ b/test/CodeGen/PowerPC/ctrloop-fp64.ll
@@ -26,3 +26,35 @@ for.end: ; preds = %for.body
; CHECK: @foo
; CHECK-NOT: mtctr
+@init_value = global double 1.000000e+00, align 8
+@data64 = global [8000 x i64] zeroinitializer, align 8
+
+define i32 @main(i32 %argc, i8** nocapture %argv) {
+entry:
+ %0 = load double* @init_value, align 8
+ %conv = fptosi double %0 to i64
+ %broadcast.splatinsert.i = insertelement <2 x i64> undef, i64 %conv, i32 0
+ %broadcast.splat.i = shufflevector <2 x i64> %broadcast.splatinsert.i, <2 x i64> undef, <2 x i32> zeroinitializer
+ br label %vector.body.i
+
+vector.body.i: ; preds = %vector.body.i, %entry
+ %index.i = phi i32 [ 0, %entry ], [ %index.next.i, %vector.body.i ]
+ %next.gep.i = getelementptr [8000 x i64]* @data64, i32 0, i32 %index.i
+ %1 = bitcast i64* %next.gep.i to <2 x i64>*
+ store <2 x i64> %broadcast.splat.i, <2 x i64>* %1, align 8
+ %next.gep.sum24.i = or i32 %index.i, 2
+ %2 = getelementptr [8000 x i64]* @data64, i32 0, i32 %next.gep.sum24.i
+ %3 = bitcast i64* %2 to <2 x i64>*
+ store <2 x i64> %broadcast.splat.i, <2 x i64>* %3, align 8
+ %index.next.i = add i32 %index.i, 4
+ %4 = icmp eq i32 %index.next.i, 8000
+ br i1 %4, label %_Z4fillIPxxEvT_S1_T0_.exit, label %vector.body.i
+
+_Z4fillIPxxEvT_S1_T0_.exit: ; preds = %vector.body.i
+ ret i32 0
+}
+
+; CHECK: @main
+; CHECK: __fixdfdi
+; CHECK: mtctr
+