1 ; RUN: llc < %s -mtriple=i686-linux -segmented-stacks | FileCheck %s -check-prefix=X32
2 ; RUN: llc < %s -mtriple=x86_64-linux -segmented-stacks | FileCheck %s -check-prefix=X64
4 ; X86FrameLowering::adjustForSegmentedStacks is inserting code after a RET.
7 ; Just to prevent the alloca from being optimized away
8 declare void @dummy_use(i32*, i32)
10 define i32 @test_basic(i32 %l) {
11 %mem = alloca i32, i32 %l
12 call void @dummy_use (i32* %mem, i32 %l)
13 %terminate = icmp eq i32 %l, 0
14 br i1 %terminate, label %true, label %false
20 %newlen = sub i32 %l, 1
21 %retvalue = call i32 @test_basic(i32 %newlen)
26 ; X32: leal -12(%esp), %ecx
27 ; X32-NEXT: cmpl %gs:48, %ecx
32 ; X32-NEXT: calll __morestack
33 ; X32-NEXT: addl $8, %esp
36 ; X32: movl %eax, %esp
39 ; X32-NEXT: pushl %ecx
40 ; X32-NEXT: calll __morestack_allocate_stack_space
41 ; X32-NEXT: addl $16, %esp
45 ; X64: leaq -24(%rsp), %r11
46 ; X64-NEXT: cmpq %fs:112, %r11
48 ; X64: movabsq $24, %r10
49 ; X64-NEXT: movabsq $0, %r11
50 ; X64-NEXT: callq __morestack
53 ; X64: movq %rsp, %rax
54 ; X64-NEXT: subq %rcx, %rax
55 ; X64-NEXT: cmpq %rax, %fs:112
57 ; X64: movq %rax, %rsp
59 ; X64: movq %rcx, %rdi
60 ; X64-NEXT: callq __morestack_allocate_stack_space
64 define i32 @test_nested(i32 * nest %closure, i32 %other) {
65 %addend = load i32 * %closure
66 %result = add i32 %other, %addend
69 ; X32: leal (%esp), %edx
70 ; X32-NEXT: cmpl %gs:48, %edx
76 ; X32-NEXT: calll __morestack
77 ; X32-NEXT: addl $8, %esp
80 ; X64: leaq (%rsp), %r11
81 ; X64-NEXT: cmpq %fs:112, %r11
83 ; X64: movq %r10, %rax
84 ; X64-NEXT: movabsq $0, %r10
85 ; X64-NEXT: movabsq $0, %r11
86 ; X64-NEXT: callq __morestack
88 ; X64-NEXT: movq %rax, %r10