summaryrefslogtreecommitdiff
path: root/test/CodeGen/X86/stack-align.ll
diff options
context:
space:
mode:
authorChris Lattner <sabre@nondot.org>2011-02-13 22:25:43 +0000
committerChris Lattner <sabre@nondot.org>2011-02-13 22:25:43 +0000
commit0a9481f44fe4fc76e59109992940a76b2a3f9b3b (patch)
tree58e330925b67825f38c827f416eb9dc2e5d9ee1e /test/CodeGen/X86/stack-align.ll
parenteafbe659f8cd88584bef5f7ad2500b42227d02ab (diff)
downloadllvm-0a9481f44fe4fc76e59109992940a76b2a3f9b3b.tar.gz
llvm-0a9481f44fe4fc76e59109992940a76b2a3f9b3b.tar.bz2
llvm-0a9481f44fe4fc76e59109992940a76b2a3f9b3b.tar.xz
Enhance ComputeMaskedBits to know that aligned frameindexes
have their low bits set to zero. This allows us to optimize out explicit stack alignment code like in stack-align.ll:test4 when it is redundant. Doing this causes the code generator to start turning FI+cst into FI|cst all over the place, which is general goodness (that is the canonical form) except that various pieces of the code generator don't handle OR aggressively. Fix this by introducing a new SelectionDAG::isBaseWithConstantOffset predicate, and using it in places that are looking for ADD(X,CST). The ARM backend in particular was missing a lot of addressing mode folding opportunities around OR. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@125470 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/X86/stack-align.ll')
-rw-r--r--test/CodeGen/X86/stack-align.ll17
1 files changed, 14 insertions, 3 deletions
diff --git a/test/CodeGen/X86/stack-align.ll b/test/CodeGen/X86/stack-align.ll
index 8ca0b12b54..793c026712 100644
--- a/test/CodeGen/X86/stack-align.ll
+++ b/test/CodeGen/X86/stack-align.ll
@@ -7,7 +7,7 @@ target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f3
target triple = "i686-apple-darwin8"
@G = external global double
-define void @test({ double, double }* byval %z, double* %P) {
+define void @test({ double, double }* byval %z, double* %P) nounwind {
entry:
%tmp3 = load double* @G, align 16 ; <double> [#uses=1]
%tmp4 = tail call double @fabs( double %tmp3 ) ; <double> [#uses=1]
@@ -21,14 +21,14 @@ entry:
ret void
}
-define void @test2() alignstack(16) {
+define void @test2() alignstack(16) nounwind {
entry:
; CHECK: andl{{.*}}$-16, %esp
ret void
}
; Use a call to force a spill.
-define <2 x double> @test3(<2 x double> %x, <2 x double> %y) alignstack(32) {
+define <2 x double> @test3(<2 x double> %x, <2 x double> %y) alignstack(32) nounwind {
entry:
; CHECK: andl{{.*}}$-32, %esp
call void @test2()
@@ -38,3 +38,14 @@ entry:
declare double @fabs(double)
+; The pointer is already known aligned, so and x,-16 is eliminable.
+define i32 @test4() nounwind {
+entry:
+ %buffer = alloca [2048 x i8], align 16
+ %0 = ptrtoint [2048 x i8]* %buffer to i32
+ %and = and i32 %0, -16
+ ret i32 %and
+; CHECK: test4:
+; CHECK-NOT: and
+; CHECK: ret
+}