summaryrefslogtreecommitdiff
path: root/test/Transforms/GVN
diff options
context:
space:
mode:
authorOwen Anderson <resistor@mac.com>2010-09-25 05:26:18 +0000
committerOwen Anderson <resistor@mac.com>2010-09-25 05:26:18 +0000
commitb0ba0f4170dcfe1dbce17680c16cffce311e3ad8 (patch)
tree716553f742e1467d69a529722534887435221692 /test/Transforms/GVN
parent5981fc67883bfa74ac13625b05442b54ea7f6a1f (diff)
downloadllvm-b0ba0f4170dcfe1dbce17680c16cffce311e3ad8.tar.gz
llvm-b0ba0f4170dcfe1dbce17680c16cffce311e3ad8.tar.bz2
llvm-b0ba0f4170dcfe1dbce17680c16cffce311e3ad8.tar.xz
LoadPRE was not properly checking that the load it was PRE'ing post-dominated the block it was being hoisted to.
Splitting critical edges at the merge point only addressed part of the issue; it is also possible for non-post-domination to occur when the path from the load to the merge has branches in it. Unfortunately, full anticipation analysis is time-consuming, so for now approximate it. This is strictly more conservative than real anticipation, so we will miss some cases that real PRE would allow, but we also no longer insert loads into paths where they didn't exist before. :-) This is a very slight net positive on SPEC for me (0.5% on average). Most of the benchmarks are largely unaffected, but when it pays off it pays off decently: 181.mcf improves by 4.5% on my machine. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@114785 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/Transforms/GVN')
-rw-r--r--test/Transforms/GVN/pre-single-pred.ll14
1 files changed, 13 insertions, 1 deletions
diff --git a/test/Transforms/GVN/pre-single-pred.ll b/test/Transforms/GVN/pre-single-pred.ll
index 706a16b7bd..f1f5c71a93 100644
--- a/test/Transforms/GVN/pre-single-pred.ll
+++ b/test/Transforms/GVN/pre-single-pred.ll
@@ -1,4 +1,13 @@
-; RUN: opt < %s -gvn -enable-load-pre -S | not grep {tmp3 = load}
+; RUN: opt < %s -gvn -enable-load-pre -S | FileCheck %s
+; This testcase assumed we'll PRE the load into %for.cond, but we don't actually
+; verify that doing so is safe. If there didn't _happen_ to be a load in
+; %for.end, we would actually be lengthening the execution on some paths, and
+; we were never actually checking that case. Now we actually do perform some
+; conservative checking to make sure we don't make paths longer, but we don't
+; currently get this case, which we got lucky on previously.
+;
+; Now that that faulty assumption is corrected, test that we DON'T incorrectly
+; hoist the load. Doing the right thing for the wrong reasons is still a bug.
@p = external global i32
define i32 @f(i32 %n) nounwind {
@@ -13,6 +22,8 @@ for.cond: ; preds = %for.inc, %entry
for.cond.for.end_crit_edge: ; preds = %for.cond
br label %for.end
+; CHECK: for.body:
+; CHECK-NEXT: %tmp3 = load i32* @p
for.body: ; preds = %for.cond
%tmp3 = load i32* @p ; <i32> [#uses=1]
%dec = add i32 %tmp3, -1 ; <i32> [#uses=2]
@@ -20,6 +31,7 @@ for.body: ; preds = %for.cond
%cmp6 = icmp slt i32 %dec, 0 ; <i1> [#uses=1]
br i1 %cmp6, label %for.body.for.end_crit_edge, label %for.inc
+; CHECK: for.body.for.end_crit_edge:
for.body.for.end_crit_edge: ; preds = %for.body
br label %for.end