1 ; RUN: llc < %s -mtriple=i686-linux -segmented-stacks -verify-machineinstrs | FileCheck %s -check-prefix=X32
2 ; RUN: llc < %s -mtriple=x86_64-linux -segmented-stacks -verify-machineinstrs | FileCheck %s -check-prefix=X64
4 ; Just to prevent the alloca from being optimized away
5 declare void @dummy_use(i32*, i32)
7 define i32 @test_basic(i32 %l) {
8 %mem = alloca i32, i32 %l
9 call void @dummy_use (i32* %mem, i32 %l)
10 %terminate = icmp eq i32 %l, 0
11 br i1 %terminate, label %true, label %false
17 %newlen = sub i32 %l, 1
18 %retvalue = call i32 @test_basic(i32 %newlen)
23 ; X32: leal -12(%esp), %ecx
24 ; X32-NEXT: cmpl %gs:48, %ecx
29 ; X32-NEXT: calll __morestack
30 ; X32-NEXT: addl $8, %esp
33 ; X32: movl %esp, %eax
34 ; X32-NEXT: subl %ecx, %eax
35 ; X32-NEXT: cmpl %eax, %gs:48
37 ; X32: movl %eax, %esp
40 ; X32-NEXT: pushl %ecx
41 ; X32-NEXT: calll __morestack_allocate_stack_space
42 ; X32-NEXT: addl $16, %esp
46 ; X64: leaq -24(%rsp), %r11
47 ; X64-NEXT: cmpq %fs:112, %r11
49 ; X64: movabsq $24, %r10
50 ; X64-NEXT: movabsq $0, %r11
51 ; X64-NEXT: callq __morestack
54 ; X64: movq %rsp, %rdi
55 ; X64-NEXT: subq %rax, %rdi
56 ; X64-NEXT: cmpq %rdi, %fs:112
58 ; X64: movq %rdi, %rsp
60 ; X64: movq %rax, %rdi
61 ; X64-NEXT: callq __morestack_allocate_stack_space
62 ; X64-NEXT: movq %rax, %rdi
66 define i32 @test_nested(i32 * nest %closure, i32 %other) {
67 %addend = load i32 * %closure
68 %result = add i32 %other, %addend
71 ; X32: leal (%esp), %edx
72 ; X32-NEXT: cmpl %gs:48, %edx
78 ; X32-NEXT: calll __morestack
79 ; X32-NEXT: addl $8, %esp
82 ; X64: leaq (%rsp), %r11
83 ; X64-NEXT: cmpq %fs:112, %r11
85 ; X64: movq %r10, %rax
86 ; X64-NEXT: movabsq $0, %r10
87 ; X64-NEXT: movabsq $0, %r11
88 ; X64-NEXT: callq __morestack
90 ; X64-NEXT: movq %rax, %r10