summaryrefslogtreecommitdiff
path: root/test/Analysis
diff options
context:
space:
mode:
authorChris Lattner <sabre@nondot.org>2011-01-09 02:28:48 +0000
committerChris Lattner <sabre@nondot.org>2011-01-09 02:28:48 +0000
commit6d5a2411aad379141df29687b8cec0938ab7ae61 (patch)
tree7949687e72243113a25f85b2be1ce3efc56a3cd5 /test/Analysis
parentf1859891b7bef1ac8d5cea100f152aeb5783c3b3 (diff)
downloadllvm-6d5a2411aad379141df29687b8cec0938ab7ae61.tar.gz
llvm-6d5a2411aad379141df29687b8cec0938ab7ae61.tar.bz2
llvm-6d5a2411aad379141df29687b8cec0938ab7ae61.tar.xz
teach SCEV analysis of PHI nodes that PHI recurences formed
with GEP instructions are always NUW, because PHIs cannot wrap the end of the address space. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@123105 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/Analysis')
-rw-r--r--test/Analysis/ScalarEvolution/nsw.ll42
1 files changed, 40 insertions, 2 deletions
diff --git a/test/Analysis/ScalarEvolution/nsw.ll b/test/Analysis/ScalarEvolution/nsw.ll
index c4b48ee1c7..530ed39ac1 100644
--- a/test/Analysis/ScalarEvolution/nsw.ll
+++ b/test/Analysis/ScalarEvolution/nsw.ll
@@ -1,10 +1,11 @@
-; RUN: opt < %s -analyze -scalar-evolution | grep { --> {.*,+,.*}.*<%bb>} | count 8
+; RUN: opt < %s -analyze -scalar-evolution | FileCheck %s
; The addrecs in this loop are analyzable only by using nsw information.
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64"
-define void @foo(double* %p) nounwind {
+; CHECK: Classifying expressions for: @test1
+define void @test1(double* %p) nounwind {
entry:
%tmp = load double* %p, align 8 ; <double> [#uses=1]
%tmp1 = fcmp ogt double %tmp, 2.000000e+00 ; <i1> [#uses=1]
@@ -15,19 +16,29 @@ bb.nph: ; preds = %entry
bb: ; preds = %bb1, %bb.nph
%i.01 = phi i32 [ %tmp8, %bb1 ], [ 0, %bb.nph ] ; <i32> [#uses=3]
+; CHECK: %i.01
+; CHECK-NEXT: --> {0,+,1}<nuw><nsw><%bb>
%tmp2 = sext i32 %i.01 to i64 ; <i64> [#uses=1]
%tmp3 = getelementptr double* %p, i64 %tmp2 ; <double*> [#uses=1]
%tmp4 = load double* %tmp3, align 8 ; <double> [#uses=1]
%tmp5 = fmul double %tmp4, 9.200000e+00 ; <double> [#uses=1]
%tmp6 = sext i32 %i.01 to i64 ; <i64> [#uses=1]
%tmp7 = getelementptr double* %p, i64 %tmp6 ; <double*> [#uses=1]
+; CHECK: %tmp7
+; CHECK-NEXT: --> {%p,+,8}<%bb>
store double %tmp5, double* %tmp7, align 8
%tmp8 = add nsw i32 %i.01, 1 ; <i32> [#uses=2]
+; CHECK: %tmp8
+; CHECK-NEXT: --> {1,+,1}<nuw><nsw><%bb>
br label %bb1
bb1: ; preds = %bb
%phitmp = sext i32 %tmp8 to i64 ; <i64> [#uses=1]
+; CHECK: %phitmp
+; CHECK-NEXT: --> {1,+,1}<%bb>
%tmp9 = getelementptr double* %p, i64 %phitmp ; <double*> [#uses=1]
+; CHECK: %tmp9
+; CHECK-NEXT: --> {(8 + %p),+,8}<%bb>
%tmp10 = load double* %tmp9, align 8 ; <double> [#uses=1]
%tmp11 = fcmp ogt double %tmp10, 2.000000e+00 ; <i1> [#uses=1]
br i1 %tmp11, label %bb, label %bb1.return_crit_edge
@@ -38,3 +49,30 @@ bb1.return_crit_edge: ; preds = %bb1
return: ; preds = %bb1.return_crit_edge, %entry
ret void
}
+
+; CHECK: Classifying expressions for: @test2
+define void @test2(i32* %begin, i32* %end) ssp {
+entry:
+ %cmp1.i.i = icmp eq i32* %begin, %end
+ br i1 %cmp1.i.i, label %_ZSt4fillIPiiEvT_S1_RKT0_.exit, label %for.body.lr.ph.i.i
+
+for.body.lr.ph.i.i: ; preds = %entry
+ br label %for.body.i.i
+
+for.body.i.i: ; preds = %for.body.i.i, %for.body.lr.ph.i.i
+ %__first.addr.02.i.i = phi i32* [ %begin, %for.body.lr.ph.i.i ], [ %ptrincdec.i.i, %for.body.i.i ]
+; CHECK: %__first.addr.02.i.i
+; CHECK-NEXT: --> {%begin,+,4}<nuw><%for.body.i.i>
+ store i32 0, i32* %__first.addr.02.i.i, align 4
+ %ptrincdec.i.i = getelementptr inbounds i32* %__first.addr.02.i.i, i64 1
+; CHECK: %ptrincdec.i.i
+; CHECK-NEXT: --> {(4 + %begin),+,4}<nuw><%for.body.i.i>
+ %cmp.i.i = icmp eq i32* %ptrincdec.i.i, %end
+ br i1 %cmp.i.i, label %for.cond.for.end_crit_edge.i.i, label %for.body.i.i
+
+for.cond.for.end_crit_edge.i.i: ; preds = %for.body.i.i
+ br label %_ZSt4fillIPiiEvT_S1_RKT0_.exit
+
+_ZSt4fillIPiiEvT_S1_RKT0_.exit: ; preds = %entry, %for.cond.for.end_crit_edge.i.i
+ ret void
+}