summaryrefslogtreecommitdiff
path: root/test/CodeGen/X86/segmented-stacks-dynamic.ll
diff options
context:
space:
mode:
authorRafael Espindola <rafael.espindola@gmail.com>2012-01-11 18:51:03 +0000
committerRafael Espindola <rafael.espindola@gmail.com>2012-01-11 18:51:03 +0000
commit7692ce9e810ed1707da46faf20c84f1ffd54bc55 (patch)
treeef97af9800622fd06bf59e75f862ad9f33ac4cc9 /test/CodeGen/X86/segmented-stacks-dynamic.ll
parent25cd4ff97e882ad5039dec15b0c9be8fef062b6b (diff)
downloadllvm-7692ce9e810ed1707da46faf20c84f1ffd54bc55.tar.gz
llvm-7692ce9e810ed1707da46faf20c84f1ffd54bc55.tar.bz2
llvm-7692ce9e810ed1707da46faf20c84f1ffd54bc55.tar.xz
Split segmented stacks tests into tests for static- and dynamic-size frames.
Patch by Brian Anderson. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@147959 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/X86/segmented-stacks-dynamic.ll')
-rw-r--r--test/CodeGen/X86/segmented-stacks-dynamic.ll64
1 files changed, 64 insertions, 0 deletions
diff --git a/test/CodeGen/X86/segmented-stacks-dynamic.ll b/test/CodeGen/X86/segmented-stacks-dynamic.ll
new file mode 100644
index 0000000000..103551b91a
--- /dev/null
+++ b/test/CodeGen/X86/segmented-stacks-dynamic.ll
@@ -0,0 +1,64 @@
+; RUN: llc < %s -mtriple=i686-linux -segmented-stacks -verify-machineinstrs | FileCheck %s -check-prefix=X32
+; RUN: llc < %s -mtriple=x86_64-linux -segmented-stacks -verify-machineinstrs | FileCheck %s -check-prefix=X64
+; RUN: llc < %s -mtriple=i686-linux -segmented-stacks -filetype=obj
+; RUN: llc < %s -mtriple=x86_64-linux -segmented-stacks -filetype=obj
+
+; Just to prevent the alloca from being optimized away
+declare void @dummy_use(i32*, i32)
+
+define i32 @test_basic(i32 %l) {
+ %mem = alloca i32, i32 %l
+ call void @dummy_use (i32* %mem, i32 %l)
+ %terminate = icmp eq i32 %l, 0
+ br i1 %terminate, label %true, label %false
+
+true:
+ ret i32 0
+
+false:
+ %newlen = sub i32 %l, 1
+ %retvalue = call i32 @test_basic(i32 %newlen)
+ ret i32 %retvalue
+
+; X32: test_basic:
+
+; X32: cmpl %gs:48, %esp
+; X32-NEXT: ja .LBB0_2
+
+; X32: pushl $4
+; X32-NEXT: pushl $12
+; X32-NEXT: calll __morestack
+; X32-NEXT: ret
+
+; X32: movl %esp, %eax
+; X32-NEXT: subl %ecx, %eax
+; X32-NEXT: cmpl %eax, %gs:48
+
+; X32: movl %eax, %esp
+
+; X32: subl $12, %esp
+; X32-NEXT: pushl %ecx
+; X32-NEXT: calll __morestack_allocate_stack_space
+; X32-NEXT: addl $16, %esp
+
+; X64: test_basic:
+
+; X64: cmpq %fs:112, %rsp
+; X64-NEXT: ja .LBB0_2
+
+; X64: movabsq $24, %r10
+; X64-NEXT: movabsq $0, %r11
+; X64-NEXT: callq __morestack
+; X64-NEXT: ret
+
+; X64: movq %rsp, %rdi
+; X64-NEXT: subq %rax, %rdi
+; X64-NEXT: cmpq %rdi, %fs:112
+
+; X64: movq %rdi, %rsp
+
+; X64: movq %rax, %rdi
+; X64-NEXT: callq __morestack_allocate_stack_space
+; X64-NEXT: movq %rax, %rdi
+
+}