1 ; RUN: llc < %s -mattr=-avx -fast-isel -mcpu=core2 -O0 -regalloc=fast -asm-verbose=0 -fast-isel-abort | FileCheck %s
2 ; RUN: llc < %s -mattr=+avx -fast-isel -mcpu=core2 -O0 -regalloc=fast -asm-verbose=0 -fast-isel-abort | FileCheck %s --check-prefix=AVX
4 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
5 target triple = "x86_64-apple-darwin10.0.0"
7 ; Make sure that fast-isel folds the immediate into the binop even though it
9 define i32 @test1(i32 %i) nounwind ssp {
18 ; rdar://9289512 - The load should fold into the compare.
19 define void @test2(i64 %x) nounwind ssp {
21 %x.addr = alloca i64, align 8
22 store i64 %x, i64* %x.addr, align 8
23 %tmp = load i64* %x.addr, align 8
24 %cmp = icmp sgt i64 %tmp, 42
25 br i1 %cmp, label %if.then, label %if.end
27 if.then: ; preds = %entry
30 if.end: ; preds = %if.then, %entry
33 ; CHECK: movq %rdi, -8(%rsp)
34 ; CHECK: cmpq $42, -8(%rsp)
40 @G = external global i32
41 define i64 @test3() nounwind {
42 %A = ptrtoint i32* @G to i64
45 ; CHECK: movq _G@GOTPCREL(%rip), %rax
52 @rtx_length = external global [153 x i8]
54 define i32 @test4(i64 %idxprom9) nounwind {
55 %arrayidx10 = getelementptr inbounds [153 x i8]* @rtx_length, i32 0, i64 %idxprom9
56 %tmp11 = load i8* %arrayidx10, align 1
57 %conv = zext i8 %tmp11 to i32
61 ; CHECK: movq _rtx_length@GOTPCREL(%rip), %rax
62 ; CHECK-NEXT: movzbl (%rax,%rdi), %eax
67 ; PR3242 - Out of range shifts should not be folded by fastisel.
68 define void @test5(i32 %x, i32* %p) nounwind {
69 %y = ashr i32 %x, 50000
74 ; CHECK: movl $50000, %ecx
75 ; CHECK: sarl %cl, %edi
79 ; rdar://9289501 - fast isel should fold trivial multiplies to shifts.
80 define i64 @test6(i64 %x) nounwind ssp {
82 %mul = mul nsw i64 %x, 8
86 ; CHECK: shlq $3, %rdi
89 define i32 @test7(i32 %x) nounwind ssp {
91 %mul = mul nsw i32 %x, 8
94 ; CHECK: shll $3, %edi
98 ; rdar://9289507 - folding of immediates into 64-bit operations.
99 define i64 @test8(i64 %x) nounwind ssp {
101 %add = add nsw i64 %x, 7
104 ; CHECK-LABEL: test8:
105 ; CHECK: addq $7, %rdi
108 define i64 @test9(i64 %x) nounwind ssp {
110 %add = mul nsw i64 %x, 7
112 ; CHECK-LABEL: test9:
113 ; CHECK: imulq $7, %rdi, %rax
116 ; rdar://9297011 - Don't reject udiv by a power of 2.
117 define i32 @test10(i32 %X) nounwind {
120 ; CHECK-LABEL: test10:
124 define i32 @test11(i32 %X) nounwind {
125 %Y = sdiv exact i32 %X, 8
127 ; CHECK-LABEL: test11:
132 ; rdar://9297006 - Trunc to bool.
133 define void @test12(i8 %tmp) nounwind ssp noredzone {
135 %tobool = trunc i8 %tmp to i1
136 br i1 %tobool, label %if.then, label %if.end
138 if.then: ; preds = %entry
139 call void @test12(i8 0) noredzone
142 if.end: ; preds = %if.then, %entry
144 ; CHECK-LABEL: test12:
147 ; CHECK-NEXT: xorl %edi, %edi
151 declare void @test13f(i1 %X)
153 define void @test13() nounwind {
154 call void @test13f(i1 0)
156 ; CHECK-LABEL: test13:
157 ; CHECK: xorl %edi, %edi
163 ; rdar://9297003 - fast isel bails out on all functions taking bools
164 define void @test14(i8 %tmp) nounwind ssp noredzone {
166 %tobool = trunc i8 %tmp to i1
167 call void @test13f(i1 zeroext %tobool) noredzone
169 ; CHECK-LABEL: test14:
174 declare void @llvm.memcpy.p0i8.p0i8.i64(i8*, i8*, i64, i32, i1)
176 ; rdar://9289488 - fast-isel shouldn't bail out on llvm.memcpy
177 define void @test15(i8* %a, i8* %b) nounwind {
178 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %a, i8* %b, i64 4, i32 4, i1 false)
180 ; CHECK-LABEL: test15:
181 ; CHECK-NEXT: movl (%rsi), %eax
182 ; CHECK-NEXT: movl %eax, (%rdi)
186 ; Handling for varargs calls
187 declare void @test16callee(...) nounwind
188 define void @test16() nounwind {
189 ; CHECK-LABEL: test16:
190 ; CHECK: movl $1, %edi
191 ; CHECK: movb $0, %al
192 ; CHECK: callq _test16callee
193 call void (...)* @test16callee(i32 1)
197 ; CHECK: movsd LCP{{.*}}_{{.*}}(%rip), %xmm0
198 ; CHECK: movb $1, %al
199 ; CHECK: callq _test16callee
201 ; AVX: vmovsd LCP{{.*}}_{{.*}}(%rip), %xmm0
203 ; AVX: callq _test16callee
204 call void (...)* @test16callee(double 1.000000e+00)
209 declare void @foo() unnamed_addr ssp align 2
211 ; Verify that we don't fold the load into the compare here. That would move it
213 define i32 @test17(i32 *%P) ssp nounwind {
216 %cmp = icmp ne i32 %tmp, 5
218 br i1 %cmp, label %if.then, label %if.else
220 if.then: ; preds = %entry
223 if.else: ; preds = %entry
225 ; CHECK-LABEL: test17:
226 ; CHECK: movl (%rdi), %eax
228 ; CHECK: cmpl $5, %eax
232 ; Check that 0.0 is materialized using xorps
233 define void @test18(float* %p1) {
234 store float 0.0, float* %p1
236 ; CHECK-LABEL: test18:
240 ; Without any type hints, doubles use the smaller xorps instead of xorpd.
241 define void @test19(double* %p1) {
242 store double 0.0, double* %p1
244 ; CHECK-LABEL: test19:
248 ; Check that we fast-isel sret
249 %struct.a = type { i64, i64, i64 }
250 define void @test20() nounwind ssp {
252 %tmp = alloca %struct.a, align 8
253 call void @test20sret(%struct.a* sret %tmp)
255 ; CHECK-LABEL: test20:
256 ; CHECK: leaq (%rsp), %rdi
257 ; CHECK: callq _test20sret
259 declare void @test20sret(%struct.a* sret)
261 ; Check that -0.0 is not materialized using xor
262 define void @test21(double* %p1) {
263 store double -0.0, double* %p1
265 ; CHECK-LABEL: test21:
270 ; Check that immediate arguments to a function
271 ; do not cause massive spilling and are used
272 ; as immediates just before the call.
273 define void @test22() nounwind {
275 call void @foo22(i32 0)
276 call void @foo22(i32 1)
277 call void @foo22(i32 2)
278 call void @foo22(i32 3)
280 ; CHECK-LABEL: test22:
281 ; CHECK: xorl %edi, %edi
282 ; CHECK: callq _foo22
283 ; CHECK: movl $1, %edi
284 ; CHECK: callq _foo22
285 ; CHECK: movl $2, %edi
286 ; CHECK: callq _foo22
287 ; CHECK: movl $3, %edi
288 ; CHECK: callq _foo22
291 declare void @foo22(i32)
294 define void @test23(i8* noalias sret %result) {
296 %b = call i8* @foo23()
298 ; CHECK-LABEL: test23:
300 ; CHECK: movq %rdi, %rax