1 ; RUN: llc < %s -march=x86-64 -mattr=+avx -mtriple=i686-apple-darwin10 | FileCheck %s
4 ; no VLAs or dynamic alignment
5 define i32 @t1() nounwind uwtable ssp {
7 %a = alloca i32, align 4
8 call void @t1_helper(i32* %a) nounwind
9 %0 = load i32* %a, align 4
10 %add = add nsw i32 %0, 13
14 ; CHECK-NOT: andq $-{{[0-9]+}}, %rsp
15 ; CHECK: leaq [[OFFSET:[0-9]*]](%rsp), %rdi
16 ; CHECK: callq _t1_helper
17 ; CHECK: movl [[OFFSET]](%rsp), %eax
18 ; CHECK: addl $13, %eax
21 declare void @t1_helper(i32*)
24 define i32 @t2() nounwind uwtable ssp {
26 %a = alloca i32, align 4
27 %v = alloca <8 x float>, align 32
28 call void @t2_helper(i32* %a, <8 x float>* %v) nounwind
29 %0 = load i32* %a, align 4
30 %add = add nsw i32 %0, 13
35 ; CHECK: movq %rsp, %rbp
36 ; CHECK: andq $-32, %rsp
37 ; CHECK: subq ${{[0-9]+}}, %rsp
39 ; CHECK: leaq {{[0-9]*}}(%rsp), %rdi
40 ; CHECK: leaq {{[0-9]*}}(%rsp), %rsi
41 ; CHECK: callq _t2_helper
43 ; CHECK: movq %rbp, %rsp
47 declare void @t2_helper(i32*, <8 x float>*)
50 define i32 @t3(i64 %sz) nounwind uwtable ssp {
52 %a = alloca i32, align 4
53 %vla = alloca i32, i64 %sz, align 16
54 call void @t3_helper(i32* %a, i32* %vla) nounwind
55 %0 = load i32* %a, align 4
56 %add = add nsw i32 %0, 13
61 ; CHECK: movq %rsp, %rbp
63 ; CHECK-NOT: andq $-{{[0-9]+}}, %rsp
64 ; CHECK: subq ${{[0-9]+}}, %rsp
66 ; CHECK: leaq -{{[0-9]+}}(%rbp), %rsp
71 declare void @t3_helper(i32*, i32*)
73 ; VLAs + Dynamic realignment
74 define i32 @t4(i64 %sz) nounwind uwtable ssp {
76 %a = alloca i32, align 4
77 %v = alloca <8 x float>, align 32
78 %vla = alloca i32, i64 %sz, align 16
79 call void @t4_helper(i32* %a, i32* %vla, <8 x float>* %v) nounwind
80 %0 = load i32* %a, align 4
81 %add = add nsw i32 %0, 13
86 ; CHECK: movq %rsp, %rbp
87 ; CHECK: andq $-32, %rsp
90 ; CHECK: subq $[[STACKADJ:[0-9]+]], %rsp
91 ; CHECK: movq %rsp, %rbx
93 ; CHECK: leaq {{[0-9]*}}(%rbx), %rdi
94 ; CHECK: leaq {{[0-9]*}}(%rbx), %rdx
95 ; CHECK: callq _t4_helper
97 ; CHECK: addq $[[STACKADJ]], %rsp
100 ; CHECK: movq %rbp, %rsp
104 declare void @t4_helper(i32*, i32*, <8 x float>*)
106 ; Dynamic realignment + Spill
107 define i32 @t5(float* nocapture %f) nounwind uwtable ssp {
109 %a = alloca i32, align 4
110 %0 = bitcast float* %f to <8 x float>*
111 %1 = load <8 x float>* %0, align 32
112 call void @t5_helper1(i32* %a) nounwind
113 call void @t5_helper2(<8 x float> %1) nounwind
114 %2 = load i32* %a, align 4
115 %add = add nsw i32 %2, 13
120 ; CHECK: movq %rsp, %rbp
121 ; CHECK: andq $-32, %rsp
122 ; CHECK: subq ${{[0-9]+}}, %rsp
124 ; CHECK: vmovaps (%rdi), [[AVXREG:%ymm[0-9]+]]
125 ; CHECK: vmovaps [[AVXREG]], (%rsp)
126 ; CHECK: leaq {{[0-9]+}}(%rsp), %rdi
127 ; CHECK: callq _t5_helper1
128 ; CHECK: vmovaps (%rsp), %ymm0
129 ; CHECK: callq _t5_helper2
130 ; CHECK: movl {{[0-9]+}}(%rsp), %eax
132 ; CHECK: movq %rbp, %rsp
136 declare void @t5_helper1(i32*)
138 declare void @t5_helper2(<8 x float>)
140 ; VLAs + Dynamic realignment + Spill
141 ; FIXME: RA has already reserved RBX, so we can't do dynamic realignment.
142 define i32 @t6(i64 %sz, float* nocapture %f) nounwind uwtable ssp {
145 %a = alloca i32, align 4
146 %0 = bitcast float* %f to <8 x float>*
147 %1 = load <8 x float>* %0, align 32
148 %vla = alloca i32, i64 %sz, align 16
149 call void @t6_helper1(i32* %a, i32* %vla) nounwind
150 call void @t6_helper2(<8 x float> %1) nounwind
151 %2 = load i32* %a, align 4
152 %add = add nsw i32 %2, 13
156 declare void @t6_helper1(i32*, i32*)
158 declare void @t6_helper2(<8 x float>)