summaryrefslogtreecommitdiff
path: root/test/CodeGen/X86/block-placement.ll
diff options
context:
space:
mode:
authorChandler Carruth <chandlerc@gmail.com>2011-11-27 09:22:53 +0000
committerChandler Carruth <chandlerc@gmail.com>2011-11-27 09:22:53 +0000
commit2eb5a744b18d429928751b06e205cbb88f668ae7 (patch)
treea4f36f4f737e68b9478c51427132f21d90bfe620 /test/CodeGen/X86/block-placement.ll
parentb5dd9de7240f3018dcd8ce84c158547a5e0f1131 (diff)
downloadllvm-2eb5a744b18d429928751b06e205cbb88f668ae7.tar.gz
llvm-2eb5a744b18d429928751b06e205cbb88f668ae7.tar.bz2
llvm-2eb5a744b18d429928751b06e205cbb88f668ae7.tar.xz
Rework a bit of the implementation of loop block rotation to not rely so
heavily on AnalyzeBranch. That routine doesn't behave as we want given that rotation occurs mid-way through re-ordering the function. Instead merely check that there are not unanalyzable branching constructs present, and then reason about the CFG via successor lists. This actually simplifies my mental model for all of this as well. The concrete result is that we now will rotate more loop chains. I've added a test case from Olden highlighting the effect. There is still a bit more to do here though in order to regain all of the performance in Olden. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@145179 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/X86/block-placement.ll')
-rw-r--r--test/CodeGen/X86/block-placement.ll40
1 files changed, 39 insertions, 1 deletions
diff --git a/test/CodeGen/X86/block-placement.ll b/test/CodeGen/X86/block-placement.ll
index 66def49e4b..52f7c3081c 100644
--- a/test/CodeGen/X86/block-placement.ll
+++ b/test/CodeGen/X86/block-placement.ll
@@ -199,6 +199,44 @@ exit:
ret i32 %base
}
+define void @test_loop_rotate_reversed_blocks() {
+; This test case (greatly reduced from an Olden bencmark) ensures that the loop
+; rotate implementation doesn't assume that loops are laid out in a particular
+; order. The first loop will get split into two basic blocks, with the loop
+; header coming after the loop latch.
+;
+; CHECK: test_loop_rotate_reversed_blocks
+; CHECK: %entry
+; Look for a jump into the middle of the loop, and no branches mid-way.
+; CHECK: jmp
+; CHECK: %loop1
+; CHECK-NOT: j{{\w*}} .LBB{{.*}}
+; CHECK: %loop1
+; CHECK: je
+
+entry:
+ %cond1 = load volatile i1* undef
+ br i1 %cond1, label %loop2.preheader, label %loop1
+
+loop1:
+ call i32 @f()
+ %cond2 = load volatile i1* undef
+ br i1 %cond2, label %loop2.preheader, label %loop1
+
+loop2.preheader:
+ call i32 @f()
+ %cond3 = load volatile i1* undef
+ br i1 %cond3, label %exit, label %loop2
+
+loop2:
+ call i32 @f()
+ %cond4 = load volatile i1* undef
+ br i1 %cond4, label %exit, label %loop2
+
+exit:
+ ret void
+}
+
define i32 @test_loop_align(i32 %i, i32* %a) {
; Check that we provide basic loop body alignment with the block placement
; pass.
@@ -567,8 +605,8 @@ define void @test_unnatural_cfg_backwards_inner_loop() {
; CHECK: test_unnatural_cfg_backwards_inner_loop
; CHECK: %entry
; CHECK: %body
-; CHECK: %loop1
; CHECK: %loop2b
+; CHECK: %loop1
; CHECK: %loop2a
entry: