1 ; RUN: llc < %s -mattr=+sse2 -mtriple=i686-apple-darwin -mcpu=core2 | FileCheck %s -check-prefix=SSE2-Darwin
2 ; RUN: llc < %s -mattr=+sse2 -mtriple=i686-pc-mingw32 -mcpu=core2 | FileCheck %s -check-prefix=SSE2-Mingw32
3 ; RUN: llc < %s -mattr=+sse,-sse2 -mtriple=i686-apple-darwin -mcpu=core2 | FileCheck %s -check-prefix=SSE1
4 ; RUN: llc < %s -mattr=-sse -mtriple=i686-apple-darwin -mcpu=core2 | FileCheck %s -check-prefix=NOSSE
5 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=core2 | FileCheck %s -check-prefix=X86-64
6 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=nehalem | FileCheck %s -check-prefix=NHM_64
8 ;;; TODO: The last run line chooses cpu=nehalem to reveal possible bugs in the "t4" test case.
10 ;;; Nehalem has a 'fast unaligned memory' attribute, so (1) some of the loads and stores
11 ;;; are certainly unaligned and (2) the first load and first store overlap with the second
12 ;;; load and second store respectively.
14 ;;; Is either of the sequences ideal?
15 ;;; Is the ideal code being generated for all CPU models?
18 @.str = internal constant [25 x i8] c"image\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00"
19 @.str2 = internal constant [30 x i8] c"xxxxxxxxxxxxxxxxxxxxxxxxxxxxx\00", align 4
21 define void @t1(i32 %argc, i8** %argv) nounwind {
23 ; SSE2-Darwin-LABEL: t1:
24 ; SSE2-Darwin: movsd _.str+16, %xmm0
25 ; SSE2-Darwin: movsd %xmm0, 16(%esp)
26 ; SSE2-Darwin: movaps _.str, %xmm0
27 ; SSE2-Darwin: movaps %xmm0
28 ; SSE2-Darwin: movb $0, 24(%esp)
30 ; SSE2-Mingw32-LABEL: t1:
31 ; SSE2-Mingw32: movsd _.str+16, %xmm0
32 ; SSE2-Mingw32: movsd %xmm0, 16(%esp)
33 ; SSE2-Mingw32: movaps _.str, %xmm0
34 ; SSE2-Mingw32: movups %xmm0
35 ; SSE2-Mingw32: movb $0, 24(%esp)
38 ; SSE1: movaps _.str, %xmm0
40 ; SSE1: movb $0, 24(%esp)
41 ; SSE1: movl $0, 20(%esp)
42 ; SSE1: movl $0, 16(%esp)
51 ; NOSSE: movl $1734438249
54 ; X86-64: movaps _.str(%rip), %xmm0
55 ; X86-64: movaps %xmm0
58 %tmp1 = alloca [25 x i8]
59 %tmp2 = bitcast [25 x i8]* %tmp1 to i8*
60 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp2, i8* getelementptr inbounds ([25 x i8], [25 x i8]* @.str, i32 0, i32 0), i32 25, i32 1, i1 false)
65 %struct.s0 = type { [2 x double] }
67 define void @t2(%struct.s0* nocapture %a, %struct.s0* nocapture %b) nounwind ssp {
69 ; SSE2-Darwin-LABEL: t2:
70 ; SSE2-Darwin: movaps (%ecx), %xmm0
71 ; SSE2-Darwin: movaps %xmm0, (%eax)
73 ; SSE2-Mingw32-LABEL: t2:
74 ; SSE2-Mingw32: movaps (%ecx), %xmm0
75 ; SSE2-Mingw32: movaps %xmm0, (%eax)
78 ; SSE1: movaps (%ecx), %xmm0
79 ; SSE1: movaps %xmm0, (%eax)
94 ; X86-64: movaps (%rsi), %xmm0
95 ; X86-64: movaps %xmm0, (%rdi)
96 %tmp2 = bitcast %struct.s0* %a to i8* ; <i8*> [#uses=1]
97 %tmp3 = bitcast %struct.s0* %b to i8* ; <i8*> [#uses=1]
98 tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp2, i8* %tmp3, i32 16, i32 16, i1 false)
102 define void @t3(%struct.s0* nocapture %a, %struct.s0* nocapture %b) nounwind ssp {
104 ; SSE2-Darwin-LABEL: t3:
105 ; SSE2-Darwin: movsd (%ecx), %xmm0
106 ; SSE2-Darwin: movsd 8(%ecx), %xmm1
107 ; SSE2-Darwin: movsd %xmm1, 8(%eax)
108 ; SSE2-Darwin: movsd %xmm0, (%eax)
110 ; SSE2-Mingw32-LABEL: t3:
111 ; SSE2-Mingw32: movsd (%ecx), %xmm0
112 ; SSE2-Mingw32: movsd 8(%ecx), %xmm1
113 ; SSE2-Mingw32: movsd %xmm1, 8(%eax)
114 ; SSE2-Mingw32: movsd %xmm0, (%eax)
141 ; X86-64: movq (%rsi), %rax
142 ; X86-64: movq 8(%rsi), %rcx
143 ; X86-64: movq %rcx, 8(%rdi)
144 ; X86-64: movq %rax, (%rdi)
145 %tmp2 = bitcast %struct.s0* %a to i8* ; <i8*> [#uses=1]
146 %tmp3 = bitcast %struct.s0* %b to i8* ; <i8*> [#uses=1]
147 tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp2, i8* %tmp3, i32 16, i32 8, i1 false)
151 define void @t4() nounwind {
153 ; SSE2-Darwin-LABEL: t4:
154 ; SSE2-Darwin: movw $120
155 ; SSE2-Darwin: movl $2021161080
156 ; SSE2-Darwin: movl $2021161080
157 ; SSE2-Darwin: movl $2021161080
158 ; SSE2-Darwin: movl $2021161080
159 ; SSE2-Darwin: movl $2021161080
160 ; SSE2-Darwin: movl $2021161080
161 ; SSE2-Darwin: movl $2021161080
163 ; SSE2-Mingw32-LABEL: t4:
164 ; SSE2-Mingw32: movw $120
165 ; SSE2-Mingw32: movl $2021161080
166 ; SSE2-Mingw32: movl $2021161080
167 ; SSE2-Mingw32: movl $2021161080
168 ; SSE2-Mingw32: movl $2021161080
169 ; SSE2-Mingw32: movl $2021161080
170 ; SSE2-Mingw32: movl $2021161080
171 ; SSE2-Mingw32: movl $2021161080
175 ; SSE1: movl $2021161080
176 ; SSE1: movl $2021161080
177 ; SSE1: movl $2021161080
178 ; SSE1: movl $2021161080
179 ; SSE1: movl $2021161080
180 ; SSE1: movl $2021161080
181 ; SSE1: movl $2021161080
185 ; NOSSE: movl $2021161080
186 ; NOSSE: movl $2021161080
187 ; NOSSE: movl $2021161080
188 ; NOSSE: movl $2021161080
189 ; NOSSE: movl $2021161080
190 ; NOSSE: movl $2021161080
191 ; NOSSE: movl $2021161080
194 ; X86-64: movabsq $8680820740569200760, %rax
199 ; X86-64: movl $2021161080
202 ; NHM_64: movups _.str2+14(%rip), %xmm0
203 ; NHM_64: movups %xmm0, -26(%rsp)
204 ; NHM_64: movups _.str2(%rip), %xmm0
205 ; NHM_64: movaps %xmm0, -40(%rsp)
207 %tmp1 = alloca [30 x i8]
208 %tmp2 = bitcast [30 x i8]* %tmp1 to i8*
209 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp2, i8* getelementptr inbounds ([30 x i8], [30 x i8]* @.str2, i32 0, i32 0), i32 30, i32 1, i1 false)
213 declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind