diff options
author | Hans Wennborg <hans@hanshq.net> | 2014-03-05 02:43:26 +0000 |
---|---|---|
committer | Hans Wennborg <hans@hanshq.net> | 2014-03-05 02:43:26 +0000 |
commit | 2f471c83a0bb19806053e2d784a97f153fbb8771 (patch) | |
tree | f886f798e01115292c0d04256abe2e65f85bf087 /test/CodeGen | |
parent | 3b9afd308728feb1853596bbb8ccd57cf5f6b3b9 (diff) | |
download | llvm-2f471c83a0bb19806053e2d784a97f153fbb8771.tar.gz llvm-2f471c83a0bb19806053e2d784a97f153fbb8771.tar.bz2 llvm-2f471c83a0bb19806053e2d784a97f153fbb8771.tar.xz |
Check for dynamic allocas and inline asm that clobbers sp before building
selection dag (PR19012)
In X86SelectionDagInfo::EmitTargetCodeForMemcpy we check with MachineFrameInfo
to make sure that ESI isn't used as a base pointer register before we choose to
emit rep movs (which clobbers esi).
The problem is that MachineFrameInfo wouldn't know about dynamic allocas or
inline asm that clobbers the stack pointer until SelectionDAGBuilder has
encountered them.
This patch fixes the problem by checking for such things when building the
FunctionLoweringInfo.
Differential Revision: http://llvm-reviews.chandlerc.com/D2954
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@202930 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen')
-rw-r--r-- | test/CodeGen/X86/inline-asm-sp-clobber-memcpy.ll | 17 | ||||
-rw-r--r-- | test/CodeGen/X86/stack-align-memcpy.ll | 27 |
2 files changed, 43 insertions, 1 deletions
diff --git a/test/CodeGen/X86/inline-asm-sp-clobber-memcpy.ll b/test/CodeGen/X86/inline-asm-sp-clobber-memcpy.ll new file mode 100644 index 0000000000..b55571bcba --- /dev/null +++ b/test/CodeGen/X86/inline-asm-sp-clobber-memcpy.ll @@ -0,0 +1,17 @@ +; RUN: llc < %s -force-align-stack -mtriple i386-apple-darwin -mcpu=i486 | FileCheck %s + +%struct.foo = type { [88 x i8] } + +declare void @bar(i8* nocapture, %struct.foo* align 4 byval) nounwind + +; PR19012 +; Don't clobber %esi if we have inline asm that clobbers %esp. +define void @test1(%struct.foo* nocapture %x, i32 %y, i8* %z) nounwind { + call void @bar(i8* %z, %struct.foo* align 4 byval %x) + call void asm sideeffect inteldialect "xor esp, esp", "=*m,~{flags},~{esp},~{esp},~{dirflag},~{fpsr},~{flags}"(i8* %z) + ret void + +; CHECK-LABEL: test1: +; CHECK: movl %esp, %esi +; CHECK-NOT: rep;movsl +} diff --git a/test/CodeGen/X86/stack-align-memcpy.ll b/test/CodeGen/X86/stack-align-memcpy.ll index 87bb85fad8..0cc3aa8488 100644 --- a/test/CodeGen/X86/stack-align-memcpy.ll +++ b/test/CodeGen/X86/stack-align-memcpy.ll @@ -2,6 +2,9 @@ %struct.foo = type { [88 x i8] } +declare void @bar(i8* nocapture, %struct.foo* align 4 byval) nounwind +declare void @baz(i8*) nounwind + ; PR15249 ; We can't use rep;movsl here because it clobbers the base pointer in %esi. define void @test1(%struct.foo* nocapture %x, i32 %y) nounwind { @@ -15,4 +18,26 @@ define void @test1(%struct.foo* nocapture %x, i32 %y) nounwind { ; CHECK-NOT: rep;movsl } -declare void @bar(i8* nocapture, %struct.foo* align 4 byval) nounwind +; PR19012 +; Also don't clobber %esi if the dynamic alloca comes after the memcpy. +define void @test2(%struct.foo* nocapture %x, i32 %y, i8* %z) nounwind { + call void @bar(i8* %z, %struct.foo* align 4 byval %x) + %dynalloc = alloca i8, i32 %y, align 1 + call void @baz(i8* %dynalloc) + ret void + +; CHECK-LABEL: test2: +; CHECK: movl %esp, %esi +; CHECK-NOT: rep;movsl +} + +; Check that we do use rep movs if we make the alloca static. +define void @test3(%struct.foo* nocapture %x, i32 %y, i8* %z) nounwind { + call void @bar(i8* %z, %struct.foo* align 4 byval %x) + %statalloc = alloca i8, i32 8, align 1 + call void @baz(i8* %statalloc) + ret void + +; CHECK-LABEL: test3: +; CHECK: rep;movsl +} |