summaryrefslogtreecommitdiff
path: root/test/Transforms/Inline
diff options
context:
space:
mode:
authorDale Johannesen <dalej@apple.com>2009-10-09 00:11:32 +0000
committerDale Johannesen <dalej@apple.com>2009-10-09 00:11:32 +0000
commite91b9a3b59688023e20cee8441179300b87c844e (patch)
treefbc872069fb17e6e453ee94d1e307224bcd74835 /test/Transforms/Inline
parent3645b01002e7ac244c1f3d163e5e350df21d869d (diff)
downloadllvm-e91b9a3b59688023e20cee8441179300b87c844e.tar.gz
llvm-e91b9a3b59688023e20cee8441179300b87c844e.tar.bz2
llvm-e91b9a3b59688023e20cee8441179300b87c844e.tar.xz
When considering whether to inline Callee into Caller,
and that will make Caller too big to inline, see if it might be better to inline Caller into its callers instead. This situation is described in PR 2973, although I haven't tried the specific case in SPASS. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@83602 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/Transforms/Inline')
-rw-r--r--test/Transforms/Inline/nested-inline.ll111
1 files changed, 111 insertions, 0 deletions
diff --git a/test/Transforms/Inline/nested-inline.ll b/test/Transforms/Inline/nested-inline.ll
new file mode 100644
index 0000000000..1292667172
--- /dev/null
+++ b/test/Transforms/Inline/nested-inline.ll
@@ -0,0 +1,111 @@
+; RUN: opt < %s -inline -S | FileCheck %s
+; Test that bar and bar2 are both inlined throughout and removed.
+@A = weak global i32 0 ; <i32*> [#uses=1]
+@B = weak global i32 0 ; <i32*> [#uses=1]
+@C = weak global i32 0 ; <i32*> [#uses=1]
+
+define fastcc void @foo(i32 %X) {
+entry:
+; CHECK: @foo
+ %ALL = alloca i32, align 4 ; <i32*> [#uses=1]
+ %tmp1 = and i32 %X, 1 ; <i32> [#uses=1]
+ %tmp1.upgrd.1 = icmp eq i32 %tmp1, 0 ; <i1> [#uses=1]
+ br i1 %tmp1.upgrd.1, label %cond_next, label %cond_true
+
+cond_true: ; preds = %entry
+ store i32 1, i32* @A
+ br label %cond_next
+
+cond_next: ; preds = %cond_true, %entry
+ %tmp4 = and i32 %X, 2 ; <i32> [#uses=1]
+ %tmp4.upgrd.2 = icmp eq i32 %tmp4, 0 ; <i1> [#uses=1]
+ br i1 %tmp4.upgrd.2, label %cond_next7, label %cond_true5
+
+cond_true5: ; preds = %cond_next
+ store i32 1, i32* @B
+ br label %cond_next7
+
+cond_next7: ; preds = %cond_true5, %cond_next
+ %tmp10 = and i32 %X, 4 ; <i32> [#uses=1]
+ %tmp10.upgrd.3 = icmp eq i32 %tmp10, 0 ; <i1> [#uses=1]
+ br i1 %tmp10.upgrd.3, label %cond_next13, label %cond_true11
+
+cond_true11: ; preds = %cond_next7
+ store i32 1, i32* @C
+ br label %cond_next13
+
+cond_next13: ; preds = %cond_true11, %cond_next7
+ %tmp16 = and i32 %X, 8 ; <i32> [#uses=1]
+ %tmp16.upgrd.4 = icmp eq i32 %tmp16, 0 ; <i1> [#uses=1]
+ br i1 %tmp16.upgrd.4, label %UnifiedReturnBlock, label %cond_true17
+
+cond_true17: ; preds = %cond_next13
+ call void @ext( i32* %ALL )
+ ret void
+
+UnifiedReturnBlock: ; preds = %cond_next13
+ ret void
+}
+
+; CHECK-NOT: @bar
+define internal fastcc void @bar(i32 %X) {
+entry:
+ %ALL = alloca i32, align 4 ; <i32*> [#uses=1]
+ %tmp1 = and i32 %X, 1 ; <i32> [#uses=1]
+ %tmp1.upgrd.1 = icmp eq i32 %tmp1, 0 ; <i1> [#uses=1]
+ br i1 %tmp1.upgrd.1, label %cond_next, label %cond_true
+
+cond_true: ; preds = %entry
+ store i32 1, i32* @A
+ br label %cond_next
+
+cond_next: ; preds = %cond_true, %entry
+ %tmp4 = and i32 %X, 2 ; <i32> [#uses=1]
+ %tmp4.upgrd.2 = icmp eq i32 %tmp4, 0 ; <i1> [#uses=1]
+ br i1 %tmp4.upgrd.2, label %cond_next7, label %cond_true5
+
+cond_true5: ; preds = %cond_next
+ store i32 1, i32* @B
+ br label %cond_next7
+
+cond_next7: ; preds = %cond_true5, %cond_next
+ %tmp10 = and i32 %X, 4 ; <i32> [#uses=1]
+ %tmp10.upgrd.3 = icmp eq i32 %tmp10, 0 ; <i1> [#uses=1]
+ br i1 %tmp10.upgrd.3, label %cond_next13, label %cond_true11
+
+cond_true11: ; preds = %cond_next7
+ store i32 1, i32* @C
+ br label %cond_next13
+
+cond_next13: ; preds = %cond_true11, %cond_next7
+ %tmp16 = and i32 %X, 8 ; <i32> [#uses=1]
+ %tmp16.upgrd.4 = icmp eq i32 %tmp16, 0 ; <i1> [#uses=1]
+ br i1 %tmp16.upgrd.4, label %UnifiedReturnBlock, label %cond_true17
+
+cond_true17: ; preds = %cond_next13
+ call void @foo( i32 %X )
+ ret void
+
+UnifiedReturnBlock: ; preds = %cond_next13
+ ret void
+}
+
+define internal fastcc void @bar2(i32 %X) {
+entry:
+ call void @foo( i32 %X )
+ ret void
+}
+
+declare void @ext(i32*)
+
+define void @test(i32 %X) {
+entry:
+; CHECK: test
+; CHECK-NOT: @bar
+ tail call fastcc void @bar( i32 %X )
+ tail call fastcc void @bar( i32 %X )
+ tail call fastcc void @bar2( i32 %X )
+ tail call fastcc void @bar2( i32 %X )
+ ret void
+; CHECK: ret
+}