1 ; RUN: llc < %s -march=arm64 -arm64-neon-syntax=apple -verify-machineinstrs | FileCheck %s
3 %struct.__neon_int8x8x2_t = type { <8 x i8>, <8 x i8> }
4 %struct.__neon_int8x8x3_t = type { <8 x i8>, <8 x i8>, <8 x i8> }
5 %struct.__neon_int8x8x4_t = type { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> }
7 define %struct.__neon_int8x8x2_t @ld2_8b(i8* %A) nounwind {
9 ; Make sure we are loading into the results defined by the ABI (i.e., v0, v1)
10 ; and from the argument of the function also defined by ABI (i.e., x0)
11 ; CHECK ld2.8b { v0, v1 }, [x0]
13 %tmp2 = call %struct.__neon_int8x8x2_t @llvm.arm64.neon.ld2.v8i8.p0i8(i8* %A)
14 ret %struct.__neon_int8x8x2_t %tmp2
17 define %struct.__neon_int8x8x3_t @ld3_8b(i8* %A) nounwind {
19 ; Make sure we are using the operands defined by the ABI
20 ; CHECK ld3.8b { v0, v1, v2 }, [x0]
22 %tmp2 = call %struct.__neon_int8x8x3_t @llvm.arm64.neon.ld3.v8i8.p0i8(i8* %A)
23 ret %struct.__neon_int8x8x3_t %tmp2
26 define %struct.__neon_int8x8x4_t @ld4_8b(i8* %A) nounwind {
28 ; Make sure we are using the operands defined by the ABI
29 ; CHECK ld4.8b { v0, v1, v2, v3 }, [x0]
31 %tmp2 = call %struct.__neon_int8x8x4_t @llvm.arm64.neon.ld4.v8i8.p0i8(i8* %A)
32 ret %struct.__neon_int8x8x4_t %tmp2
35 declare %struct.__neon_int8x8x2_t @llvm.arm64.neon.ld2.v8i8.p0i8(i8*) nounwind readonly
36 declare %struct.__neon_int8x8x3_t @llvm.arm64.neon.ld3.v8i8.p0i8(i8*) nounwind readonly
37 declare %struct.__neon_int8x8x4_t @llvm.arm64.neon.ld4.v8i8.p0i8(i8*) nounwind readonly
39 %struct.__neon_int8x16x2_t = type { <16 x i8>, <16 x i8> }
40 %struct.__neon_int8x16x3_t = type { <16 x i8>, <16 x i8>, <16 x i8> }
41 %struct.__neon_int8x16x4_t = type { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> }
43 define %struct.__neon_int8x16x2_t @ld2_16b(i8* %A) nounwind {
45 ; Make sure we are using the operands defined by the ABI
46 ; CHECK ld2.16b { v0, v1 }, [x0]
48 %tmp2 = call %struct.__neon_int8x16x2_t @llvm.arm64.neon.ld2.v16i8.p0i8(i8* %A)
49 ret %struct.__neon_int8x16x2_t %tmp2
52 define %struct.__neon_int8x16x3_t @ld3_16b(i8* %A) nounwind {
54 ; Make sure we are using the operands defined by the ABI
55 ; CHECK ld3.16b { v0, v1, v2 }, [x0]
57 %tmp2 = call %struct.__neon_int8x16x3_t @llvm.arm64.neon.ld3.v16i8.p0i8(i8* %A)
58 ret %struct.__neon_int8x16x3_t %tmp2
61 define %struct.__neon_int8x16x4_t @ld4_16b(i8* %A) nounwind {
63 ; Make sure we are using the operands defined by the ABI
64 ; CHECK ld4.16b { v0, v1, v2, v3 }, [x0]
66 %tmp2 = call %struct.__neon_int8x16x4_t @llvm.arm64.neon.ld4.v16i8.p0i8(i8* %A)
67 ret %struct.__neon_int8x16x4_t %tmp2
70 declare %struct.__neon_int8x16x2_t @llvm.arm64.neon.ld2.v16i8.p0i8(i8*) nounwind readonly
71 declare %struct.__neon_int8x16x3_t @llvm.arm64.neon.ld3.v16i8.p0i8(i8*) nounwind readonly
72 declare %struct.__neon_int8x16x4_t @llvm.arm64.neon.ld4.v16i8.p0i8(i8*) nounwind readonly
74 %struct.__neon_int16x4x2_t = type { <4 x i16>, <4 x i16> }
75 %struct.__neon_int16x4x3_t = type { <4 x i16>, <4 x i16>, <4 x i16> }
76 %struct.__neon_int16x4x4_t = type { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> }
78 define %struct.__neon_int16x4x2_t @ld2_4h(i16* %A) nounwind {
80 ; Make sure we are using the operands defined by the ABI
81 ; CHECK ld2.4h { v0, v1 }, [x0]
83 %tmp2 = call %struct.__neon_int16x4x2_t @llvm.arm64.neon.ld2.v4i16.p0i16(i16* %A)
84 ret %struct.__neon_int16x4x2_t %tmp2
87 define %struct.__neon_int16x4x3_t @ld3_4h(i16* %A) nounwind {
89 ; Make sure we are using the operands defined by the ABI
90 ; CHECK ld3.4h { v0, v1, v2 }, [x0]
92 %tmp2 = call %struct.__neon_int16x4x3_t @llvm.arm64.neon.ld3.v4i16.p0i16(i16* %A)
93 ret %struct.__neon_int16x4x3_t %tmp2
96 define %struct.__neon_int16x4x4_t @ld4_4h(i16* %A) nounwind {
98 ; Make sure we are using the operands defined by the ABI
99 ; CHECK ld4.4h { v0, v1, v2, v3 }, [x0]
101 %tmp2 = call %struct.__neon_int16x4x4_t @llvm.arm64.neon.ld4.v4i16.p0i16(i16* %A)
102 ret %struct.__neon_int16x4x4_t %tmp2
105 declare %struct.__neon_int16x4x2_t @llvm.arm64.neon.ld2.v4i16.p0i16(i16*) nounwind readonly
106 declare %struct.__neon_int16x4x3_t @llvm.arm64.neon.ld3.v4i16.p0i16(i16*) nounwind readonly
107 declare %struct.__neon_int16x4x4_t @llvm.arm64.neon.ld4.v4i16.p0i16(i16*) nounwind readonly
109 %struct.__neon_int16x8x2_t = type { <8 x i16>, <8 x i16> }
110 %struct.__neon_int16x8x3_t = type { <8 x i16>, <8 x i16>, <8 x i16> }
111 %struct.__neon_int16x8x4_t = type { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> }
113 define %struct.__neon_int16x8x2_t @ld2_8h(i16* %A) nounwind {
115 ; Make sure we are using the operands defined by the ABI
116 ; CHECK ld2.8h { v0, v1 }, [x0]
118 %tmp2 = call %struct.__neon_int16x8x2_t @llvm.arm64.neon.ld2.v8i16.p0i16(i16* %A)
119 ret %struct.__neon_int16x8x2_t %tmp2
122 define %struct.__neon_int16x8x3_t @ld3_8h(i16* %A) nounwind {
124 ; Make sure we are using the operands defined by the ABI
125 ; CHECK ld3.8h { v0, v1, v2 }, [x0]
127 %tmp2 = call %struct.__neon_int16x8x3_t @llvm.arm64.neon.ld3.v8i16.p0i16(i16* %A)
128 ret %struct.__neon_int16x8x3_t %tmp2
131 define %struct.__neon_int16x8x4_t @ld4_8h(i16* %A) nounwind {
133 ; Make sure we are using the operands defined by the ABI
134 ; CHECK ld4.8h { v0, v1, v2, v3 }, [x0]
136 %tmp2 = call %struct.__neon_int16x8x4_t @llvm.arm64.neon.ld4.v8i16.p0i16(i16* %A)
137 ret %struct.__neon_int16x8x4_t %tmp2
140 declare %struct.__neon_int16x8x2_t @llvm.arm64.neon.ld2.v8i16.p0i16(i16*) nounwind readonly
141 declare %struct.__neon_int16x8x3_t @llvm.arm64.neon.ld3.v8i16.p0i16(i16*) nounwind readonly
142 declare %struct.__neon_int16x8x4_t @llvm.arm64.neon.ld4.v8i16.p0i16(i16*) nounwind readonly
144 %struct.__neon_int32x2x2_t = type { <2 x i32>, <2 x i32> }
145 %struct.__neon_int32x2x3_t = type { <2 x i32>, <2 x i32>, <2 x i32> }
146 %struct.__neon_int32x2x4_t = type { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> }
148 define %struct.__neon_int32x2x2_t @ld2_2s(i32* %A) nounwind {
150 ; Make sure we are using the operands defined by the ABI
151 ; CHECK ld2.2s { v0, v1 }, [x0]
153 %tmp2 = call %struct.__neon_int32x2x2_t @llvm.arm64.neon.ld2.v2i32.p0i32(i32* %A)
154 ret %struct.__neon_int32x2x2_t %tmp2
157 define %struct.__neon_int32x2x3_t @ld3_2s(i32* %A) nounwind {
159 ; Make sure we are using the operands defined by the ABI
160 ; CHECK ld3.2s { v0, v1, v2 }, [x0]
162 %tmp2 = call %struct.__neon_int32x2x3_t @llvm.arm64.neon.ld3.v2i32.p0i32(i32* %A)
163 ret %struct.__neon_int32x2x3_t %tmp2
166 define %struct.__neon_int32x2x4_t @ld4_2s(i32* %A) nounwind {
168 ; Make sure we are using the operands defined by the ABI
169 ; CHECK ld4.2s { v0, v1, v2, v3 }, [x0]
171 %tmp2 = call %struct.__neon_int32x2x4_t @llvm.arm64.neon.ld4.v2i32.p0i32(i32* %A)
172 ret %struct.__neon_int32x2x4_t %tmp2
175 declare %struct.__neon_int32x2x2_t @llvm.arm64.neon.ld2.v2i32.p0i32(i32*) nounwind readonly
176 declare %struct.__neon_int32x2x3_t @llvm.arm64.neon.ld3.v2i32.p0i32(i32*) nounwind readonly
177 declare %struct.__neon_int32x2x4_t @llvm.arm64.neon.ld4.v2i32.p0i32(i32*) nounwind readonly
179 %struct.__neon_int32x4x2_t = type { <4 x i32>, <4 x i32> }
180 %struct.__neon_int32x4x3_t = type { <4 x i32>, <4 x i32>, <4 x i32> }
181 %struct.__neon_int32x4x4_t = type { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> }
183 define %struct.__neon_int32x4x2_t @ld2_4s(i32* %A) nounwind {
185 ; Make sure we are using the operands defined by the ABI
186 ; CHECK ld2.4s { v0, v1 }, [x0]
188 %tmp2 = call %struct.__neon_int32x4x2_t @llvm.arm64.neon.ld2.v4i32.p0i32(i32* %A)
189 ret %struct.__neon_int32x4x2_t %tmp2
192 define %struct.__neon_int32x4x3_t @ld3_4s(i32* %A) nounwind {
194 ; Make sure we are using the operands defined by the ABI
195 ; CHECK ld3.4s { v0, v1, v2 }, [x0]
197 %tmp2 = call %struct.__neon_int32x4x3_t @llvm.arm64.neon.ld3.v4i32.p0i32(i32* %A)
198 ret %struct.__neon_int32x4x3_t %tmp2
201 define %struct.__neon_int32x4x4_t @ld4_4s(i32* %A) nounwind {
203 ; Make sure we are using the operands defined by the ABI
204 ; CHECK ld4.4s { v0, v1, v2, v3 }, [x0]
206 %tmp2 = call %struct.__neon_int32x4x4_t @llvm.arm64.neon.ld4.v4i32.p0i32(i32* %A)
207 ret %struct.__neon_int32x4x4_t %tmp2
210 declare %struct.__neon_int32x4x2_t @llvm.arm64.neon.ld2.v4i32.p0i32(i32*) nounwind readonly
211 declare %struct.__neon_int32x4x3_t @llvm.arm64.neon.ld3.v4i32.p0i32(i32*) nounwind readonly
212 declare %struct.__neon_int32x4x4_t @llvm.arm64.neon.ld4.v4i32.p0i32(i32*) nounwind readonly
214 %struct.__neon_int64x2x2_t = type { <2 x i64>, <2 x i64> }
215 %struct.__neon_int64x2x3_t = type { <2 x i64>, <2 x i64>, <2 x i64> }
216 %struct.__neon_int64x2x4_t = type { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> }
218 define %struct.__neon_int64x2x2_t @ld2_2d(i64* %A) nounwind {
220 ; Make sure we are using the operands defined by the ABI
221 ; CHECK ld2.2d { v0, v1 }, [x0]
223 %tmp2 = call %struct.__neon_int64x2x2_t @llvm.arm64.neon.ld2.v2i64.p0i64(i64* %A)
224 ret %struct.__neon_int64x2x2_t %tmp2
227 define %struct.__neon_int64x2x3_t @ld3_2d(i64* %A) nounwind {
229 ; Make sure we are using the operands defined by the ABI
230 ; CHECK ld3.2d { v0, v1, v2 }, [x0]
232 %tmp2 = call %struct.__neon_int64x2x3_t @llvm.arm64.neon.ld3.v2i64.p0i64(i64* %A)
233 ret %struct.__neon_int64x2x3_t %tmp2
236 define %struct.__neon_int64x2x4_t @ld4_2d(i64* %A) nounwind {
238 ; Make sure we are using the operands defined by the ABI
239 ; CHECK ld4.2d { v0, v1, v2, v3 }, [x0]
241 %tmp2 = call %struct.__neon_int64x2x4_t @llvm.arm64.neon.ld4.v2i64.p0i64(i64* %A)
242 ret %struct.__neon_int64x2x4_t %tmp2
245 declare %struct.__neon_int64x2x2_t @llvm.arm64.neon.ld2.v2i64.p0i64(i64*) nounwind readonly
246 declare %struct.__neon_int64x2x3_t @llvm.arm64.neon.ld3.v2i64.p0i64(i64*) nounwind readonly
247 declare %struct.__neon_int64x2x4_t @llvm.arm64.neon.ld4.v2i64.p0i64(i64*) nounwind readonly
249 %struct.__neon_int64x1x2_t = type { <1 x i64>, <1 x i64> }
250 %struct.__neon_int64x1x3_t = type { <1 x i64>, <1 x i64>, <1 x i64> }
251 %struct.__neon_int64x1x4_t = type { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> }
254 define %struct.__neon_int64x1x2_t @ld2_1di64(i64* %A) nounwind {
256 ; Make sure we are using the operands defined by the ABI
257 ; CHECK ld1.1d { v0, v1 }, [x0]
259 %tmp2 = call %struct.__neon_int64x1x2_t @llvm.arm64.neon.ld2.v1i64.p0i64(i64* %A)
260 ret %struct.__neon_int64x1x2_t %tmp2
263 define %struct.__neon_int64x1x3_t @ld3_1di64(i64* %A) nounwind {
265 ; Make sure we are using the operands defined by the ABI
266 ; CHECK ld1.1d { v0, v1, v2 }, [x0]
268 %tmp2 = call %struct.__neon_int64x1x3_t @llvm.arm64.neon.ld3.v1i64.p0i64(i64* %A)
269 ret %struct.__neon_int64x1x3_t %tmp2
272 define %struct.__neon_int64x1x4_t @ld4_1di64(i64* %A) nounwind {
274 ; Make sure we are using the operands defined by the ABI
275 ; CHECK ld1.1d { v0, v1, v2, v3 }, [x0]
277 %tmp2 = call %struct.__neon_int64x1x4_t @llvm.arm64.neon.ld4.v1i64.p0i64(i64* %A)
278 ret %struct.__neon_int64x1x4_t %tmp2
282 declare %struct.__neon_int64x1x2_t @llvm.arm64.neon.ld2.v1i64.p0i64(i64*) nounwind readonly
283 declare %struct.__neon_int64x1x3_t @llvm.arm64.neon.ld3.v1i64.p0i64(i64*) nounwind readonly
284 declare %struct.__neon_int64x1x4_t @llvm.arm64.neon.ld4.v1i64.p0i64(i64*) nounwind readonly
286 %struct.__neon_float64x1x2_t = type { <1 x double>, <1 x double> }
287 %struct.__neon_float64x1x3_t = type { <1 x double>, <1 x double>, <1 x double> }
288 %struct.__neon_float64x1x4_t = type { <1 x double>, <1 x double>, <1 x double>, <1 x double> }
291 define %struct.__neon_float64x1x2_t @ld2_1df64(double* %A) nounwind {
293 ; Make sure we are using the operands defined by the ABI
294 ; CHECK ld1.1d { v0, v1 }, [x0]
296 %tmp2 = call %struct.__neon_float64x1x2_t @llvm.arm64.neon.ld2.v1f64.p0f64(double* %A)
297 ret %struct.__neon_float64x1x2_t %tmp2
300 define %struct.__neon_float64x1x3_t @ld3_1df64(double* %A) nounwind {
302 ; Make sure we are using the operands defined by the ABI
303 ; CHECK ld1.1d { v0, v1, v2 }, [x0]
305 %tmp2 = call %struct.__neon_float64x1x3_t @llvm.arm64.neon.ld3.v1f64.p0f64(double* %A)
306 ret %struct.__neon_float64x1x3_t %tmp2
309 define %struct.__neon_float64x1x4_t @ld4_1df64(double* %A) nounwind {
311 ; Make sure we are using the operands defined by the ABI
312 ; CHECK ld1.1d { v0, v1, v2, v3 }, [x0]
314 %tmp2 = call %struct.__neon_float64x1x4_t @llvm.arm64.neon.ld4.v1f64.p0f64(double* %A)
315 ret %struct.__neon_float64x1x4_t %tmp2
318 declare %struct.__neon_float64x1x2_t @llvm.arm64.neon.ld2.v1f64.p0f64(double*) nounwind readonly
319 declare %struct.__neon_float64x1x3_t @llvm.arm64.neon.ld3.v1f64.p0f64(double*) nounwind readonly
320 declare %struct.__neon_float64x1x4_t @llvm.arm64.neon.ld4.v1f64.p0f64(double*) nounwind readonly
323 define %struct.__neon_int8x16x2_t @ld2lane_16b(<16 x i8> %L1, <16 x i8> %L2, i8* %A) nounwind {
324 ; Make sure we are using the operands defined by the ABI
326 ; CHECK ld2.b { v0, v1 }[1], [x0]
328 %tmp2 = call %struct.__neon_int8x16x2_t @llvm.arm64.neon.ld2lane.v16i8.p0i8(<16 x i8> %L1, <16 x i8> %L2, i64 1, i8* %A)
329 ret %struct.__neon_int8x16x2_t %tmp2
332 define %struct.__neon_int8x16x3_t @ld3lane_16b(<16 x i8> %L1, <16 x i8> %L2, <16 x i8> %L3, i8* %A) nounwind {
333 ; Make sure we are using the operands defined by the ABI
335 ; CHECK ld3.b { v0, v1, v2 }[1], [x0]
337 %tmp2 = call %struct.__neon_int8x16x3_t @llvm.arm64.neon.ld3lane.v16i8.p0i8(<16 x i8> %L1, <16 x i8> %L2, <16 x i8> %L3, i64 1, i8* %A)
338 ret %struct.__neon_int8x16x3_t %tmp2
341 define %struct.__neon_int8x16x4_t @ld4lane_16b(<16 x i8> %L1, <16 x i8> %L2, <16 x i8> %L3, <16 x i8> %L4, i8* %A) nounwind {
342 ; Make sure we are using the operands defined by the ABI
344 ; CHECK ld4.b { v0, v1, v2, v3 }[1], [x0]
346 %tmp2 = call %struct.__neon_int8x16x4_t @llvm.arm64.neon.ld4lane.v16i8.p0i8(<16 x i8> %L1, <16 x i8> %L2, <16 x i8> %L3, <16 x i8> %L4, i64 1, i8* %A)
347 ret %struct.__neon_int8x16x4_t %tmp2
350 declare %struct.__neon_int8x16x2_t @llvm.arm64.neon.ld2lane.v16i8.p0i8(<16 x i8>, <16 x i8>, i64, i8*) nounwind readonly
351 declare %struct.__neon_int8x16x3_t @llvm.arm64.neon.ld3lane.v16i8.p0i8(<16 x i8>, <16 x i8>, <16 x i8>, i64, i8*) nounwind readonly
352 declare %struct.__neon_int8x16x4_t @llvm.arm64.neon.ld4lane.v16i8.p0i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, i64, i8*) nounwind readonly
354 define %struct.__neon_int16x8x2_t @ld2lane_8h(<8 x i16> %L1, <8 x i16> %L2, i16* %A) nounwind {
355 ; Make sure we are using the operands defined by the ABI
357 ; CHECK ld2.h { v0, v1 }[1], [x0]
359 %tmp2 = call %struct.__neon_int16x8x2_t @llvm.arm64.neon.ld2lane.v8i16.p0i16(<8 x i16> %L1, <8 x i16> %L2, i64 1, i16* %A)
360 ret %struct.__neon_int16x8x2_t %tmp2
363 define %struct.__neon_int16x8x3_t @ld3lane_8h(<8 x i16> %L1, <8 x i16> %L2, <8 x i16> %L3, i16* %A) nounwind {
364 ; Make sure we are using the operands defined by the ABI
366 ; CHECK ld3.h { v0, v1, v3 }[1], [x0]
368 %tmp2 = call %struct.__neon_int16x8x3_t @llvm.arm64.neon.ld3lane.v8i16.p0i16(<8 x i16> %L1, <8 x i16> %L2, <8 x i16> %L3, i64 1, i16* %A)
369 ret %struct.__neon_int16x8x3_t %tmp2
372 define %struct.__neon_int16x8x4_t @ld4lane_8h(<8 x i16> %L1, <8 x i16> %L2, <8 x i16> %L3, <8 x i16> %L4, i16* %A) nounwind {
373 ; Make sure we are using the operands defined by the ABI
375 ; CHECK ld4.h { v0, v1, v2, v3 }[1], [x0]
377 %tmp2 = call %struct.__neon_int16x8x4_t @llvm.arm64.neon.ld4lane.v8i16.p0i16(<8 x i16> %L1, <8 x i16> %L2, <8 x i16> %L3, <8 x i16> %L4, i64 1, i16* %A)
378 ret %struct.__neon_int16x8x4_t %tmp2
381 declare %struct.__neon_int16x8x2_t @llvm.arm64.neon.ld2lane.v8i16.p0i16(<8 x i16>, <8 x i16>, i64, i16*) nounwind readonly
382 declare %struct.__neon_int16x8x3_t @llvm.arm64.neon.ld3lane.v8i16.p0i16(<8 x i16>, <8 x i16>, <8 x i16>, i64, i16*) nounwind readonly
383 declare %struct.__neon_int16x8x4_t @llvm.arm64.neon.ld4lane.v8i16.p0i16(<8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>, i64, i16*) nounwind readonly
385 define %struct.__neon_int32x4x2_t @ld2lane_4s(<4 x i32> %L1, <4 x i32> %L2, i32* %A) nounwind {
386 ; Make sure we are using the operands defined by the ABI
388 ; CHECK ld2.s { v0, v1 }[1], [x0]
390 %tmp2 = call %struct.__neon_int32x4x2_t @llvm.arm64.neon.ld2lane.v4i32.p0i32(<4 x i32> %L1, <4 x i32> %L2, i64 1, i32* %A)
391 ret %struct.__neon_int32x4x2_t %tmp2
394 define %struct.__neon_int32x4x3_t @ld3lane_4s(<4 x i32> %L1, <4 x i32> %L2, <4 x i32> %L3, i32* %A) nounwind {
395 ; Make sure we are using the operands defined by the ABI
397 ; CHECK ld3.s { v0, v1, v2 }[1], [x0]
399 %tmp2 = call %struct.__neon_int32x4x3_t @llvm.arm64.neon.ld3lane.v4i32.p0i32(<4 x i32> %L1, <4 x i32> %L2, <4 x i32> %L3, i64 1, i32* %A)
400 ret %struct.__neon_int32x4x3_t %tmp2
403 define %struct.__neon_int32x4x4_t @ld4lane_4s(<4 x i32> %L1, <4 x i32> %L2, <4 x i32> %L3, <4 x i32> %L4, i32* %A) nounwind {
404 ; Make sure we are using the operands defined by the ABI
406 ; CHECK ld4.s { v0, v1, v2, v3 }[1], [x0]
408 %tmp2 = call %struct.__neon_int32x4x4_t @llvm.arm64.neon.ld4lane.v4i32.p0i32(<4 x i32> %L1, <4 x i32> %L2, <4 x i32> %L3, <4 x i32> %L4, i64 1, i32* %A)
409 ret %struct.__neon_int32x4x4_t %tmp2
412 declare %struct.__neon_int32x4x2_t @llvm.arm64.neon.ld2lane.v4i32.p0i32(<4 x i32>, <4 x i32>, i64, i32*) nounwind readonly
413 declare %struct.__neon_int32x4x3_t @llvm.arm64.neon.ld3lane.v4i32.p0i32(<4 x i32>, <4 x i32>, <4 x i32>, i64, i32*) nounwind readonly
414 declare %struct.__neon_int32x4x4_t @llvm.arm64.neon.ld4lane.v4i32.p0i32(<4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, i64, i32*) nounwind readonly
416 define %struct.__neon_int64x2x2_t @ld2lane_2d(<2 x i64> %L1, <2 x i64> %L2, i64* %A) nounwind {
417 ; Make sure we are using the operands defined by the ABI
419 ; CHECK ld2.d { v0, v1 }[1], [x0]
421 %tmp2 = call %struct.__neon_int64x2x2_t @llvm.arm64.neon.ld2lane.v2i64.p0i64(<2 x i64> %L1, <2 x i64> %L2, i64 1, i64* %A)
422 ret %struct.__neon_int64x2x2_t %tmp2
425 define %struct.__neon_int64x2x3_t @ld3lane_2d(<2 x i64> %L1, <2 x i64> %L2, <2 x i64> %L3, i64* %A) nounwind {
426 ; Make sure we are using the operands defined by the ABI
428 ; CHECK ld3.d { v0, v1, v3 }[1], [x0]
430 %tmp2 = call %struct.__neon_int64x2x3_t @llvm.arm64.neon.ld3lane.v2i64.p0i64(<2 x i64> %L1, <2 x i64> %L2, <2 x i64> %L3, i64 1, i64* %A)
431 ret %struct.__neon_int64x2x3_t %tmp2
434 define %struct.__neon_int64x2x4_t @ld4lane_2d(<2 x i64> %L1, <2 x i64> %L2, <2 x i64> %L3, <2 x i64> %L4, i64* %A) nounwind {
435 ; Make sure we are using the operands defined by the ABI
437 ; CHECK ld4.d { v0, v1, v2, v3 }[1], [x0]
439 %tmp2 = call %struct.__neon_int64x2x4_t @llvm.arm64.neon.ld4lane.v2i64.p0i64(<2 x i64> %L1, <2 x i64> %L2, <2 x i64> %L3, <2 x i64> %L4, i64 1, i64* %A)
440 ret %struct.__neon_int64x2x4_t %tmp2
443 declare %struct.__neon_int64x2x2_t @llvm.arm64.neon.ld2lane.v2i64.p0i64(<2 x i64>, <2 x i64>, i64, i64*) nounwind readonly
444 declare %struct.__neon_int64x2x3_t @llvm.arm64.neon.ld3lane.v2i64.p0i64(<2 x i64>, <2 x i64>, <2 x i64>, i64, i64*) nounwind readonly
445 declare %struct.__neon_int64x2x4_t @llvm.arm64.neon.ld4lane.v2i64.p0i64(<2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>, i64, i64*) nounwind readonly
447 define <8 x i8> @ld1r_8b(i8* %bar) {
449 ; Make sure we are using the operands defined by the ABI
450 ; CHECK: ld1r.8b { v0 }, [x0]
452 %tmp1 = load i8* %bar
453 %tmp2 = insertelement <8 x i8> <i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef>, i8 %tmp1, i32 0
454 %tmp3 = insertelement <8 x i8> %tmp2, i8 %tmp1, i32 1
455 %tmp4 = insertelement <8 x i8> %tmp3, i8 %tmp1, i32 2
456 %tmp5 = insertelement <8 x i8> %tmp4, i8 %tmp1, i32 3
457 %tmp6 = insertelement <8 x i8> %tmp5, i8 %tmp1, i32 4
458 %tmp7 = insertelement <8 x i8> %tmp6, i8 %tmp1, i32 5
459 %tmp8 = insertelement <8 x i8> %tmp7, i8 %tmp1, i32 6
460 %tmp9 = insertelement <8 x i8> %tmp8, i8 %tmp1, i32 7
464 define <16 x i8> @ld1r_16b(i8* %bar) {
466 ; Make sure we are using the operands defined by the ABI
467 ; CHECK: ld1r.16b { v0 }, [x0]
469 %tmp1 = load i8* %bar
470 %tmp2 = insertelement <16 x i8> <i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef>, i8 %tmp1, i32 0
471 %tmp3 = insertelement <16 x i8> %tmp2, i8 %tmp1, i32 1
472 %tmp4 = insertelement <16 x i8> %tmp3, i8 %tmp1, i32 2
473 %tmp5 = insertelement <16 x i8> %tmp4, i8 %tmp1, i32 3
474 %tmp6 = insertelement <16 x i8> %tmp5, i8 %tmp1, i32 4
475 %tmp7 = insertelement <16 x i8> %tmp6, i8 %tmp1, i32 5
476 %tmp8 = insertelement <16 x i8> %tmp7, i8 %tmp1, i32 6
477 %tmp9 = insertelement <16 x i8> %tmp8, i8 %tmp1, i32 7
478 %tmp10 = insertelement <16 x i8> %tmp9, i8 %tmp1, i32 8
479 %tmp11 = insertelement <16 x i8> %tmp10, i8 %tmp1, i32 9
480 %tmp12 = insertelement <16 x i8> %tmp11, i8 %tmp1, i32 10
481 %tmp13 = insertelement <16 x i8> %tmp12, i8 %tmp1, i32 11
482 %tmp14 = insertelement <16 x i8> %tmp13, i8 %tmp1, i32 12
483 %tmp15 = insertelement <16 x i8> %tmp14, i8 %tmp1, i32 13
484 %tmp16 = insertelement <16 x i8> %tmp15, i8 %tmp1, i32 14
485 %tmp17 = insertelement <16 x i8> %tmp16, i8 %tmp1, i32 15
489 define <4 x i16> @ld1r_4h(i16* %bar) {
491 ; Make sure we are using the operands defined by the ABI
492 ; CHECK: ld1r.4h { v0 }, [x0]
494 %tmp1 = load i16* %bar
495 %tmp2 = insertelement <4 x i16> <i16 undef, i16 undef, i16 undef, i16 undef>, i16 %tmp1, i32 0
496 %tmp3 = insertelement <4 x i16> %tmp2, i16 %tmp1, i32 1
497 %tmp4 = insertelement <4 x i16> %tmp3, i16 %tmp1, i32 2
498 %tmp5 = insertelement <4 x i16> %tmp4, i16 %tmp1, i32 3
502 define <8 x i16> @ld1r_8h(i16* %bar) {
504 ; Make sure we are using the operands defined by the ABI
505 ; CHECK: ld1r.8h { v0 }, [x0]
507 %tmp1 = load i16* %bar
508 %tmp2 = insertelement <8 x i16> <i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef>, i16 %tmp1, i32 0
509 %tmp3 = insertelement <8 x i16> %tmp2, i16 %tmp1, i32 1
510 %tmp4 = insertelement <8 x i16> %tmp3, i16 %tmp1, i32 2
511 %tmp5 = insertelement <8 x i16> %tmp4, i16 %tmp1, i32 3
512 %tmp6 = insertelement <8 x i16> %tmp5, i16 %tmp1, i32 4
513 %tmp7 = insertelement <8 x i16> %tmp6, i16 %tmp1, i32 5
514 %tmp8 = insertelement <8 x i16> %tmp7, i16 %tmp1, i32 6
515 %tmp9 = insertelement <8 x i16> %tmp8, i16 %tmp1, i32 7
519 define <2 x i32> @ld1r_2s(i32* %bar) {
521 ; Make sure we are using the operands defined by the ABI
522 ; CHECK: ld1r.2s { v0 }, [x0]
524 %tmp1 = load i32* %bar
525 %tmp2 = insertelement <2 x i32> <i32 undef, i32 undef>, i32 %tmp1, i32 0
526 %tmp3 = insertelement <2 x i32> %tmp2, i32 %tmp1, i32 1
530 define <4 x i32> @ld1r_4s(i32* %bar) {
532 ; Make sure we are using the operands defined by the ABI
533 ; CHECK: ld1r.4s { v0 }, [x0]
535 %tmp1 = load i32* %bar
536 %tmp2 = insertelement <4 x i32> <i32 undef, i32 undef, i32 undef, i32 undef>, i32 %tmp1, i32 0
537 %tmp3 = insertelement <4 x i32> %tmp2, i32 %tmp1, i32 1
538 %tmp4 = insertelement <4 x i32> %tmp3, i32 %tmp1, i32 2
539 %tmp5 = insertelement <4 x i32> %tmp4, i32 %tmp1, i32 3
543 define <2 x i64> @ld1r_2d(i64* %bar) {
545 ; Make sure we are using the operands defined by the ABI
546 ; CHECK: ld1r.2d { v0 }, [x0]
548 %tmp1 = load i64* %bar
549 %tmp2 = insertelement <2 x i64> <i64 undef, i64 undef>, i64 %tmp1, i32 0
550 %tmp3 = insertelement <2 x i64> %tmp2, i64 %tmp1, i32 1
554 define %struct.__neon_int8x8x2_t @ld2r_8b(i8* %A) nounwind {
556 ; Make sure we are using the operands defined by the ABI
557 ; CHECK ld2r.8b { v0, v1 }, [x0]
559 %tmp2 = call %struct.__neon_int8x8x2_t @llvm.arm64.neon.ld2r.v8i8.p0i8(i8* %A)
560 ret %struct.__neon_int8x8x2_t %tmp2
563 define %struct.__neon_int8x8x3_t @ld3r_8b(i8* %A) nounwind {
565 ; Make sure we are using the operands defined by the ABI
566 ; CHECK ld3r.8b { v0, v1, v2 }, [x0]
568 %tmp2 = call %struct.__neon_int8x8x3_t @llvm.arm64.neon.ld3r.v8i8.p0i8(i8* %A)
569 ret %struct.__neon_int8x8x3_t %tmp2
572 define %struct.__neon_int8x8x4_t @ld4r_8b(i8* %A) nounwind {
574 ; Make sure we are using the operands defined by the ABI
575 ; CHECK ld4r.8b { v0, v1, v2, v3 }, [x0]
577 %tmp2 = call %struct.__neon_int8x8x4_t @llvm.arm64.neon.ld4r.v8i8.p0i8(i8* %A)
578 ret %struct.__neon_int8x8x4_t %tmp2
581 declare %struct.__neon_int8x8x2_t @llvm.arm64.neon.ld2r.v8i8.p0i8(i8*) nounwind readonly
582 declare %struct.__neon_int8x8x3_t @llvm.arm64.neon.ld3r.v8i8.p0i8(i8*) nounwind readonly
583 declare %struct.__neon_int8x8x4_t @llvm.arm64.neon.ld4r.v8i8.p0i8(i8*) nounwind readonly
585 define %struct.__neon_int8x16x2_t @ld2r_16b(i8* %A) nounwind {
587 ; Make sure we are using the operands defined by the ABI
588 ; CHECK ld2r.16b { v0, v1 }, [x0]
590 %tmp2 = call %struct.__neon_int8x16x2_t @llvm.arm64.neon.ld2r.v16i8.p0i8(i8* %A)
591 ret %struct.__neon_int8x16x2_t %tmp2
594 define %struct.__neon_int8x16x3_t @ld3r_16b(i8* %A) nounwind {
596 ; Make sure we are using the operands defined by the ABI
597 ; CHECK ld3r.16b { v0, v1, v2 }, [x0]
599 %tmp2 = call %struct.__neon_int8x16x3_t @llvm.arm64.neon.ld3r.v16i8.p0i8(i8* %A)
600 ret %struct.__neon_int8x16x3_t %tmp2
603 define %struct.__neon_int8x16x4_t @ld4r_16b(i8* %A) nounwind {
605 ; Make sure we are using the operands defined by the ABI
606 ; CHECK ld4r.16b { v0, v1, v2, v3 }, [x0]
608 %tmp2 = call %struct.__neon_int8x16x4_t @llvm.arm64.neon.ld4r.v16i8.p0i8(i8* %A)
609 ret %struct.__neon_int8x16x4_t %tmp2
612 declare %struct.__neon_int8x16x2_t @llvm.arm64.neon.ld2r.v16i8.p0i8(i8*) nounwind readonly
613 declare %struct.__neon_int8x16x3_t @llvm.arm64.neon.ld3r.v16i8.p0i8(i8*) nounwind readonly
614 declare %struct.__neon_int8x16x4_t @llvm.arm64.neon.ld4r.v16i8.p0i8(i8*) nounwind readonly
616 define %struct.__neon_int16x4x2_t @ld2r_4h(i16* %A) nounwind {
618 ; Make sure we are using the operands defined by the ABI
619 ; CHECK ld2r.4h { v0, v1 }, [x0]
621 %tmp2 = call %struct.__neon_int16x4x2_t @llvm.arm64.neon.ld2r.v4i16.p0i16(i16* %A)
622 ret %struct.__neon_int16x4x2_t %tmp2
625 define %struct.__neon_int16x4x3_t @ld3r_4h(i16* %A) nounwind {
627 ; Make sure we are using the operands defined by the ABI
628 ; CHECK ld3r.4h { v0, v1, v2 }, [x0]
630 %tmp2 = call %struct.__neon_int16x4x3_t @llvm.arm64.neon.ld3r.v4i16.p0i16(i16* %A)
631 ret %struct.__neon_int16x4x3_t %tmp2
634 define %struct.__neon_int16x4x4_t @ld4r_4h(i16* %A) nounwind {
636 ; Make sure we are using the operands defined by the ABI
637 ; CHECK ld4r.4h { v0, v1, v2, v3 }, [x0]
639 %tmp2 = call %struct.__neon_int16x4x4_t @llvm.arm64.neon.ld4r.v4i16.p0i16(i16* %A)
640 ret %struct.__neon_int16x4x4_t %tmp2
643 declare %struct.__neon_int16x4x2_t @llvm.arm64.neon.ld2r.v4i16.p0i16(i16*) nounwind readonly
644 declare %struct.__neon_int16x4x3_t @llvm.arm64.neon.ld3r.v4i16.p0i16(i16*) nounwind readonly
645 declare %struct.__neon_int16x4x4_t @llvm.arm64.neon.ld4r.v4i16.p0i16(i16*) nounwind readonly
647 define %struct.__neon_int16x8x2_t @ld2r_8h(i16* %A) nounwind {
649 ; Make sure we are using the operands defined by the ABI
650 ; CHECK ld2r.8h { v0, v1 }, [x0]
652 %tmp2 = call %struct.__neon_int16x8x2_t @llvm.arm64.neon.ld2r.v8i16.p0i16(i16* %A)
653 ret %struct.__neon_int16x8x2_t %tmp2
656 define %struct.__neon_int16x8x3_t @ld3r_8h(i16* %A) nounwind {
658 ; Make sure we are using the operands defined by the ABI
659 ; CHECK ld3r.8h { v0, v1, v2 }, [x0]
661 %tmp2 = call %struct.__neon_int16x8x3_t @llvm.arm64.neon.ld3r.v8i16.p0i16(i16* %A)
662 ret %struct.__neon_int16x8x3_t %tmp2
665 define %struct.__neon_int16x8x4_t @ld4r_8h(i16* %A) nounwind {
667 ; Make sure we are using the operands defined by the ABI
668 ; CHECK ld4r.8h { v0, v1, v2, v3 }, [x0]
670 %tmp2 = call %struct.__neon_int16x8x4_t @llvm.arm64.neon.ld4r.v8i16.p0i16(i16* %A)
671 ret %struct.__neon_int16x8x4_t %tmp2
674 declare %struct.__neon_int16x8x2_t @llvm.arm64.neon.ld2r.v8i16.p0i16(i16*) nounwind readonly
675 declare %struct.__neon_int16x8x3_t @llvm.arm64.neon.ld3r.v8i16.p0i16(i16*) nounwind readonly
676 declare %struct.__neon_int16x8x4_t @llvm.arm64.neon.ld4r.v8i16.p0i16(i16*) nounwind readonly
678 define %struct.__neon_int32x2x2_t @ld2r_2s(i32* %A) nounwind {
680 ; Make sure we are using the operands defined by the ABI
681 ; CHECK ld2r.2s { v0, v1 }, [x0]
683 %tmp2 = call %struct.__neon_int32x2x2_t @llvm.arm64.neon.ld2r.v2i32.p0i32(i32* %A)
684 ret %struct.__neon_int32x2x2_t %tmp2
687 define %struct.__neon_int32x2x3_t @ld3r_2s(i32* %A) nounwind {
689 ; Make sure we are using the operands defined by the ABI
690 ; CHECK ld3r.2s { v0, v1, v2 }, [x0]
692 %tmp2 = call %struct.__neon_int32x2x3_t @llvm.arm64.neon.ld3r.v2i32.p0i32(i32* %A)
693 ret %struct.__neon_int32x2x3_t %tmp2
696 define %struct.__neon_int32x2x4_t @ld4r_2s(i32* %A) nounwind {
698 ; Make sure we are using the operands defined by the ABI
699 ; CHECK ld4r.2s { v0, v1, v2, v3 }, [x0]
701 %tmp2 = call %struct.__neon_int32x2x4_t @llvm.arm64.neon.ld4r.v2i32.p0i32(i32* %A)
702 ret %struct.__neon_int32x2x4_t %tmp2
705 declare %struct.__neon_int32x2x2_t @llvm.arm64.neon.ld2r.v2i32.p0i32(i32*) nounwind readonly
706 declare %struct.__neon_int32x2x3_t @llvm.arm64.neon.ld3r.v2i32.p0i32(i32*) nounwind readonly
707 declare %struct.__neon_int32x2x4_t @llvm.arm64.neon.ld4r.v2i32.p0i32(i32*) nounwind readonly
709 define %struct.__neon_int32x4x2_t @ld2r_4s(i32* %A) nounwind {
711 ; Make sure we are using the operands defined by the ABI
712 ; CHECK ld2r.4s { v0, v1 }, [x0]
714 %tmp2 = call %struct.__neon_int32x4x2_t @llvm.arm64.neon.ld2r.v4i32.p0i32(i32* %A)
715 ret %struct.__neon_int32x4x2_t %tmp2
718 define %struct.__neon_int32x4x3_t @ld3r_4s(i32* %A) nounwind {
720 ; Make sure we are using the operands defined by the ABI
721 ; CHECK ld3r.4s { v0, v1, v2 }, [x0]
723 %tmp2 = call %struct.__neon_int32x4x3_t @llvm.arm64.neon.ld3r.v4i32.p0i32(i32* %A)
724 ret %struct.__neon_int32x4x3_t %tmp2
727 define %struct.__neon_int32x4x4_t @ld4r_4s(i32* %A) nounwind {
729 ; Make sure we are using the operands defined by the ABI
730 ; CHECK ld4r.4s { v0, v1, v2, v3 }, [x0]
732 %tmp2 = call %struct.__neon_int32x4x4_t @llvm.arm64.neon.ld4r.v4i32.p0i32(i32* %A)
733 ret %struct.__neon_int32x4x4_t %tmp2
736 declare %struct.__neon_int32x4x2_t @llvm.arm64.neon.ld2r.v4i32.p0i32(i32*) nounwind readonly
737 declare %struct.__neon_int32x4x3_t @llvm.arm64.neon.ld3r.v4i32.p0i32(i32*) nounwind readonly
738 declare %struct.__neon_int32x4x4_t @llvm.arm64.neon.ld4r.v4i32.p0i32(i32*) nounwind readonly
740 define %struct.__neon_int64x2x2_t @ld2r_2d(i64* %A) nounwind {
742 ; Make sure we are using the operands defined by the ABI
743 ; CHECK ld2r.2d { v0, v1 }, [x0]
745 %tmp2 = call %struct.__neon_int64x2x2_t @llvm.arm64.neon.ld2r.v2i64.p0i64(i64* %A)
746 ret %struct.__neon_int64x2x2_t %tmp2
749 define %struct.__neon_int64x2x3_t @ld3r_2d(i64* %A) nounwind {
751 ; Make sure we are using the operands defined by the ABI
752 ; CHECK ld3r.2d { v0, v1, v2 }, [x0]
754 %tmp2 = call %struct.__neon_int64x2x3_t @llvm.arm64.neon.ld3r.v2i64.p0i64(i64* %A)
755 ret %struct.__neon_int64x2x3_t %tmp2
758 define %struct.__neon_int64x2x4_t @ld4r_2d(i64* %A) nounwind {
760 ; Make sure we are using the operands defined by the ABI
761 ; CHECK ld4r.2d { v0, v1, v2, v3 }, [x0]
763 %tmp2 = call %struct.__neon_int64x2x4_t @llvm.arm64.neon.ld4r.v2i64.p0i64(i64* %A)
764 ret %struct.__neon_int64x2x4_t %tmp2
767 declare %struct.__neon_int64x2x2_t @llvm.arm64.neon.ld2r.v2i64.p0i64(i64*) nounwind readonly
768 declare %struct.__neon_int64x2x3_t @llvm.arm64.neon.ld3r.v2i64.p0i64(i64*) nounwind readonly
769 declare %struct.__neon_int64x2x4_t @llvm.arm64.neon.ld4r.v2i64.p0i64(i64*) nounwind readonly
771 define <16 x i8> @ld1_16b(<16 x i8> %V, i8* %bar) {
773 ; Make sure we are using the operands defined by the ABI
774 ; CHECK: ld1.b { v0 }[0], [x0]
776 %tmp1 = load i8* %bar
777 %tmp2 = insertelement <16 x i8> %V, i8 %tmp1, i32 0
781 define <8 x i16> @ld1_8h(<8 x i16> %V, i16* %bar) {
783 ; Make sure we are using the operands defined by the ABI
784 ; CHECK: ld1.h { v0 }[0], [x0]
786 %tmp1 = load i16* %bar
787 %tmp2 = insertelement <8 x i16> %V, i16 %tmp1, i32 0
791 define <4 x i32> @ld1_4s(<4 x i32> %V, i32* %bar) {
793 ; Make sure we are using the operands defined by the ABI
794 ; CHECK: ld1.s { v0 }[0], [x0]
796 %tmp1 = load i32* %bar
797 %tmp2 = insertelement <4 x i32> %V, i32 %tmp1, i32 0
801 define <2 x i64> @ld1_2d(<2 x i64> %V, i64* %bar) {
803 ; Make sure we are using the operands defined by the ABI
804 ; CHECK: ld1.d { v0 }[0], [x0]
806 %tmp1 = load i64* %bar
807 %tmp2 = insertelement <2 x i64> %V, i64 %tmp1, i32 0
811 define <1 x i64> @ld1_1d(<1 x i64>* %p) {
813 ; Make sure we are using the operands defined by the ABI
814 ; CHECK: ldr [[REG:d[0-9]+]], [x0]
816 %tmp = load <1 x i64>* %p, align 8
821 ; Add rdar://13098923 test case: vld1_dup_u32 doesn't generate ld1r.2s
822 define void @ld1r_2s_from_dup(i8* nocapture %a, i8* nocapture %b, i16* nocapture %diff) nounwind ssp {
824 ; CHECK: ld1r_2s_from_dup
825 ; CHECK: ld1r.2s { [[ARG1:v[0-9]+]] }, [x0]
826 ; CHECK-NEXT: ld1r.2s { [[ARG2:v[0-9]+]] }, [x1]
827 ; CHECK-NEXT: usubl.8h v[[RESREGNUM:[0-9]+]], [[ARG1]], [[ARG2]]
828 ; CHECK-NEXT: str d[[RESREGNUM]], [x2]
830 %tmp = bitcast i8* %a to i32*
831 %tmp1 = load i32* %tmp, align 4
832 %tmp2 = insertelement <2 x i32> undef, i32 %tmp1, i32 0
833 %lane = shufflevector <2 x i32> %tmp2, <2 x i32> undef, <2 x i32> zeroinitializer
834 %tmp3 = bitcast <2 x i32> %lane to <8 x i8>
835 %tmp4 = bitcast i8* %b to i32*
836 %tmp5 = load i32* %tmp4, align 4
837 %tmp6 = insertelement <2 x i32> undef, i32 %tmp5, i32 0
838 %lane1 = shufflevector <2 x i32> %tmp6, <2 x i32> undef, <2 x i32> zeroinitializer
839 %tmp7 = bitcast <2 x i32> %lane1 to <8 x i8>
840 %vmovl.i.i = zext <8 x i8> %tmp3 to <8 x i16>
841 %vmovl.i4.i = zext <8 x i8> %tmp7 to <8 x i16>
842 %sub.i = sub <8 x i16> %vmovl.i.i, %vmovl.i4.i
843 %tmp8 = bitcast <8 x i16> %sub.i to <2 x i64>
844 %shuffle.i = shufflevector <2 x i64> %tmp8, <2 x i64> undef, <1 x i32> zeroinitializer
845 %tmp9 = bitcast <1 x i64> %shuffle.i to <4 x i16>
846 %tmp10 = bitcast i16* %diff to <4 x i16>*
847 store <4 x i16> %tmp9, <4 x i16>* %tmp10, align 8
851 ; Tests for rdar://11947069: vld1_dup_* and vld1q_dup_* code gen is suboptimal
852 define <4 x float> @ld1r_4s_float(float* nocapture %x) {
854 ; CHECK: ld1r_4s_float
855 ; Make sure we are using the operands defined by the ABI
856 ; CHECK: ld1r.4s { v0 }, [x0]
858 %tmp = load float* %x, align 4
859 %tmp1 = insertelement <4 x float> undef, float %tmp, i32 0
860 %tmp2 = insertelement <4 x float> %tmp1, float %tmp, i32 1
861 %tmp3 = insertelement <4 x float> %tmp2, float %tmp, i32 2
862 %tmp4 = insertelement <4 x float> %tmp3, float %tmp, i32 3
863 ret <4 x float> %tmp4
866 define <2 x float> @ld1r_2s_float(float* nocapture %x) {
868 ; CHECK: ld1r_2s_float
869 ; Make sure we are using the operands defined by the ABI
870 ; CHECK: ld1r.2s { v0 }, [x0]
872 %tmp = load float* %x, align 4
873 %tmp1 = insertelement <2 x float> undef, float %tmp, i32 0
874 %tmp2 = insertelement <2 x float> %tmp1, float %tmp, i32 1
875 ret <2 x float> %tmp2
878 define <2 x double> @ld1r_2d_double(double* nocapture %x) {
880 ; CHECK: ld1r_2d_double
881 ; Make sure we are using the operands defined by the ABI
882 ; CHECK: ld1r.2d { v0 }, [x0]
884 %tmp = load double* %x, align 4
885 %tmp1 = insertelement <2 x double> undef, double %tmp, i32 0
886 %tmp2 = insertelement <2 x double> %tmp1, double %tmp, i32 1
887 ret <2 x double> %tmp2
890 define <1 x double> @ld1r_1d_double(double* nocapture %x) {
892 ; CHECK: ld1r_1d_double
893 ; Make sure we are using the operands defined by the ABI
894 ; CHECK: ldr d0, [x0]
896 %tmp = load double* %x, align 4
897 %tmp1 = insertelement <1 x double> undef, double %tmp, i32 0
898 ret <1 x double> %tmp1
901 define <4 x float> @ld1r_4s_float_shuff(float* nocapture %x) {
903 ; CHECK: ld1r_4s_float_shuff
904 ; Make sure we are using the operands defined by the ABI
905 ; CHECK: ld1r.4s { v0 }, [x0]
907 %tmp = load float* %x, align 4
908 %tmp1 = insertelement <4 x float> undef, float %tmp, i32 0
909 %lane = shufflevector <4 x float> %tmp1, <4 x float> undef, <4 x i32> zeroinitializer
910 ret <4 x float> %lane
913 define <2 x float> @ld1r_2s_float_shuff(float* nocapture %x) {
915 ; CHECK: ld1r_2s_float_shuff
916 ; Make sure we are using the operands defined by the ABI
917 ; CHECK: ld1r.2s { v0 }, [x0]
919 %tmp = load float* %x, align 4
920 %tmp1 = insertelement <2 x float> undef, float %tmp, i32 0
921 %lane = shufflevector <2 x float> %tmp1, <2 x float> undef, <2 x i32> zeroinitializer
922 ret <2 x float> %lane
925 define <2 x double> @ld1r_2d_double_shuff(double* nocapture %x) {
927 ; CHECK: ld1r_2d_double_shuff
928 ; Make sure we are using the operands defined by the ABI
929 ; CHECK: ld1r.2d { v0 }, [x0]
931 %tmp = load double* %x, align 4
932 %tmp1 = insertelement <2 x double> undef, double %tmp, i32 0
933 %lane = shufflevector <2 x double> %tmp1, <2 x double> undef, <2 x i32> zeroinitializer
934 ret <2 x double> %lane
937 define <1 x double> @ld1r_1d_double_shuff(double* nocapture %x) {
939 ; CHECK: ld1r_1d_double_shuff
940 ; Make sure we are using the operands defined by the ABI
941 ; CHECK: ldr d0, [x0]
943 %tmp = load double* %x, align 4
944 %tmp1 = insertelement <1 x double> undef, double %tmp, i32 0
945 %lane = shufflevector <1 x double> %tmp1, <1 x double> undef, <1 x i32> zeroinitializer
946 ret <1 x double> %lane
949 %struct.__neon_float32x2x2_t = type { <2 x float>, <2 x float> }
950 %struct.__neon_float32x2x3_t = type { <2 x float>, <2 x float>, <2 x float> }
951 %struct.__neon_float32x2x4_t = type { <2 x float>, <2 x float>, <2 x float>, <2 x float> }
953 declare %struct.__neon_int8x8x2_t @llvm.arm64.neon.ld1x2.v8i8.p0i8(i8*) nounwind readonly
954 declare %struct.__neon_int16x4x2_t @llvm.arm64.neon.ld1x2.v4i16.p0i16(i16*) nounwind readonly
955 declare %struct.__neon_int32x2x2_t @llvm.arm64.neon.ld1x2.v2i32.p0i32(i32*) nounwind readonly
956 declare %struct.__neon_float32x2x2_t @llvm.arm64.neon.ld1x2.v2f32.p0f32(float*) nounwind readonly
957 declare %struct.__neon_int64x1x2_t @llvm.arm64.neon.ld1x2.v1i64.p0i64(i64*) nounwind readonly
958 declare %struct.__neon_float64x1x2_t @llvm.arm64.neon.ld1x2.v1f64.p0f64(double*) nounwind readonly
960 define %struct.__neon_int8x8x2_t @ld1_x2_v8i8(i8* %addr) {
961 ; CHECK-LABEL: ld1_x2_v8i8:
962 ; CHECK: ld1.8b { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
963 %val = call %struct.__neon_int8x8x2_t @llvm.arm64.neon.ld1x2.v8i8.p0i8(i8* %addr)
964 ret %struct.__neon_int8x8x2_t %val
967 define %struct.__neon_int16x4x2_t @ld1_x2_v4i16(i16* %addr) {
968 ; CHECK-LABEL: ld1_x2_v4i16:
969 ; CHECK: ld1.4h { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
970 %val = call %struct.__neon_int16x4x2_t @llvm.arm64.neon.ld1x2.v4i16.p0i16(i16* %addr)
971 ret %struct.__neon_int16x4x2_t %val
974 define %struct.__neon_int32x2x2_t @ld1_x2_v2i32(i32* %addr) {
975 ; CHECK-LABEL: ld1_x2_v2i32:
976 ; CHECK: ld1.2s { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
977 %val = call %struct.__neon_int32x2x2_t @llvm.arm64.neon.ld1x2.v2i32.p0i32(i32* %addr)
978 ret %struct.__neon_int32x2x2_t %val
981 define %struct.__neon_float32x2x2_t @ld1_x2_v2f32(float* %addr) {
982 ; CHECK-LABEL: ld1_x2_v2f32:
983 ; CHECK: ld1.2s { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
984 %val = call %struct.__neon_float32x2x2_t @llvm.arm64.neon.ld1x2.v2f32.p0f32(float* %addr)
985 ret %struct.__neon_float32x2x2_t %val
988 define %struct.__neon_int64x1x2_t @ld1_x2_v1i64(i64* %addr) {
989 ; CHECK-LABEL: ld1_x2_v1i64:
990 ; CHECK: ld1.1d { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
991 %val = call %struct.__neon_int64x1x2_t @llvm.arm64.neon.ld1x2.v1i64.p0i64(i64* %addr)
992 ret %struct.__neon_int64x1x2_t %val
995 define %struct.__neon_float64x1x2_t @ld1_x2_v1f64(double* %addr) {
996 ; CHECK-LABEL: ld1_x2_v1f64:
997 ; CHECK: ld1.1d { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
998 %val = call %struct.__neon_float64x1x2_t @llvm.arm64.neon.ld1x2.v1f64.p0f64(double* %addr)
999 ret %struct.__neon_float64x1x2_t %val
1003 %struct.__neon_float32x4x2_t = type { <4 x float>, <4 x float> }
1004 %struct.__neon_float32x4x3_t = type { <4 x float>, <4 x float>, <4 x float> }
1005 %struct.__neon_float32x4x4_t = type { <4 x float>, <4 x float>, <4 x float>, <4 x float> }
1007 %struct.__neon_float64x2x2_t = type { <2 x double>, <2 x double> }
1008 %struct.__neon_float64x2x3_t = type { <2 x double>, <2 x double>, <2 x double> }
1009 %struct.__neon_float64x2x4_t = type { <2 x double>, <2 x double>, <2 x double>, <2 x double> }
1011 declare %struct.__neon_int8x16x2_t @llvm.arm64.neon.ld1x2.v16i8.p0i8(i8*) nounwind readonly
1012 declare %struct.__neon_int16x8x2_t @llvm.arm64.neon.ld1x2.v8i16.p0i16(i16*) nounwind readonly
1013 declare %struct.__neon_int32x4x2_t @llvm.arm64.neon.ld1x2.v4i32.p0i32(i32*) nounwind readonly
1014 declare %struct.__neon_float32x4x2_t @llvm.arm64.neon.ld1x2.v4f32.p0f32(float*) nounwind readonly
1015 declare %struct.__neon_int64x2x2_t @llvm.arm64.neon.ld1x2.v2i64.p0i64(i64*) nounwind readonly
1016 declare %struct.__neon_float64x2x2_t @llvm.arm64.neon.ld1x2.v2f64.p0f64(double*) nounwind readonly
1018 define %struct.__neon_int8x16x2_t @ld1_x2_v16i8(i8* %addr) {
1019 ; CHECK-LABEL: ld1_x2_v16i8:
1020 ; CHECK: ld1.16b { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
1021 %val = call %struct.__neon_int8x16x2_t @llvm.arm64.neon.ld1x2.v16i8.p0i8(i8* %addr)
1022 ret %struct.__neon_int8x16x2_t %val
1025 define %struct.__neon_int16x8x2_t @ld1_x2_v8i16(i16* %addr) {
1026 ; CHECK-LABEL: ld1_x2_v8i16:
1027 ; CHECK: ld1.8h { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
1028 %val = call %struct.__neon_int16x8x2_t @llvm.arm64.neon.ld1x2.v8i16.p0i16(i16* %addr)
1029 ret %struct.__neon_int16x8x2_t %val
1032 define %struct.__neon_int32x4x2_t @ld1_x2_v4i32(i32* %addr) {
1033 ; CHECK-LABEL: ld1_x2_v4i32:
1034 ; CHECK: ld1.4s { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
1035 %val = call %struct.__neon_int32x4x2_t @llvm.arm64.neon.ld1x2.v4i32.p0i32(i32* %addr)
1036 ret %struct.__neon_int32x4x2_t %val
1039 define %struct.__neon_float32x4x2_t @ld1_x2_v4f32(float* %addr) {
1040 ; CHECK-LABEL: ld1_x2_v4f32:
1041 ; CHECK: ld1.4s { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
1042 %val = call %struct.__neon_float32x4x2_t @llvm.arm64.neon.ld1x2.v4f32.p0f32(float* %addr)
1043 ret %struct.__neon_float32x4x2_t %val
1046 define %struct.__neon_int64x2x2_t @ld1_x2_v2i64(i64* %addr) {
1047 ; CHECK-LABEL: ld1_x2_v2i64:
1048 ; CHECK: ld1.2d { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
1049 %val = call %struct.__neon_int64x2x2_t @llvm.arm64.neon.ld1x2.v2i64.p0i64(i64* %addr)
1050 ret %struct.__neon_int64x2x2_t %val
1053 define %struct.__neon_float64x2x2_t @ld1_x2_v2f64(double* %addr) {
1054 ; CHECK-LABEL: ld1_x2_v2f64:
1055 ; CHECK: ld1.2d { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
1056 %val = call %struct.__neon_float64x2x2_t @llvm.arm64.neon.ld1x2.v2f64.p0f64(double* %addr)
1057 ret %struct.__neon_float64x2x2_t %val
1060 declare %struct.__neon_int8x8x3_t @llvm.arm64.neon.ld1x3.v8i8.p0i8(i8*) nounwind readonly
1061 declare %struct.__neon_int16x4x3_t @llvm.arm64.neon.ld1x3.v4i16.p0i16(i16*) nounwind readonly
1062 declare %struct.__neon_int32x2x3_t @llvm.arm64.neon.ld1x3.v2i32.p0i32(i32*) nounwind readonly
1063 declare %struct.__neon_float32x2x3_t @llvm.arm64.neon.ld1x3.v2f32.p0f32(float*) nounwind readonly
1064 declare %struct.__neon_int64x1x3_t @llvm.arm64.neon.ld1x3.v1i64.p0i64(i64*) nounwind readonly
1065 declare %struct.__neon_float64x1x3_t @llvm.arm64.neon.ld1x3.v1f64.p0f64(double*) nounwind readonly
1067 define %struct.__neon_int8x8x3_t @ld1_x3_v8i8(i8* %addr) {
1068 ; CHECK-LABEL: ld1_x3_v8i8:
1069 ; CHECK: ld1.8b { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
1070 %val = call %struct.__neon_int8x8x3_t @llvm.arm64.neon.ld1x3.v8i8.p0i8(i8* %addr)
1071 ret %struct.__neon_int8x8x3_t %val
1074 define %struct.__neon_int16x4x3_t @ld1_x3_v4i16(i16* %addr) {
1075 ; CHECK-LABEL: ld1_x3_v4i16:
1076 ; CHECK: ld1.4h { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
1077 %val = call %struct.__neon_int16x4x3_t @llvm.arm64.neon.ld1x3.v4i16.p0i16(i16* %addr)
1078 ret %struct.__neon_int16x4x3_t %val
1081 define %struct.__neon_int32x2x3_t @ld1_x3_v2i32(i32* %addr) {
1082 ; CHECK-LABEL: ld1_x3_v2i32:
1083 ; CHECK: ld1.2s { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
1084 %val = call %struct.__neon_int32x2x3_t @llvm.arm64.neon.ld1x3.v2i32.p0i32(i32* %addr)
1085 ret %struct.__neon_int32x2x3_t %val
1088 define %struct.__neon_float32x2x3_t @ld1_x3_v2f32(float* %addr) {
1089 ; CHECK-LABEL: ld1_x3_v2f32:
1090 ; CHECK: ld1.2s { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
1091 %val = call %struct.__neon_float32x2x3_t @llvm.arm64.neon.ld1x3.v2f32.p0f32(float* %addr)
1092 ret %struct.__neon_float32x2x3_t %val
1095 define %struct.__neon_int64x1x3_t @ld1_x3_v1i64(i64* %addr) {
1096 ; CHECK-LABEL: ld1_x3_v1i64:
1097 ; CHECK: ld1.1d { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
1098 %val = call %struct.__neon_int64x1x3_t @llvm.arm64.neon.ld1x3.v1i64.p0i64(i64* %addr)
1099 ret %struct.__neon_int64x1x3_t %val
1102 define %struct.__neon_float64x1x3_t @ld1_x3_v1f64(double* %addr) {
1103 ; CHECK-LABEL: ld1_x3_v1f64:
1104 ; CHECK: ld1.1d { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
1105 %val = call %struct.__neon_float64x1x3_t @llvm.arm64.neon.ld1x3.v1f64.p0f64(double* %addr)
1106 ret %struct.__neon_float64x1x3_t %val
1109 declare %struct.__neon_int8x16x3_t @llvm.arm64.neon.ld1x3.v16i8.p0i8(i8*) nounwind readonly
1110 declare %struct.__neon_int16x8x3_t @llvm.arm64.neon.ld1x3.v8i16.p0i16(i16*) nounwind readonly
1111 declare %struct.__neon_int32x4x3_t @llvm.arm64.neon.ld1x3.v4i32.p0i32(i32*) nounwind readonly
1112 declare %struct.__neon_float32x4x3_t @llvm.arm64.neon.ld1x3.v4f32.p0f32(float*) nounwind readonly
1113 declare %struct.__neon_int64x2x3_t @llvm.arm64.neon.ld1x3.v2i64.p0i64(i64*) nounwind readonly
1114 declare %struct.__neon_float64x2x3_t @llvm.arm64.neon.ld1x3.v2f64.p0f64(double*) nounwind readonly
1116 define %struct.__neon_int8x16x3_t @ld1_x3_v16i8(i8* %addr) {
1117 ; CHECK-LABEL: ld1_x3_v16i8:
1118 ; CHECK: ld1.16b { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
1119 %val = call %struct.__neon_int8x16x3_t @llvm.arm64.neon.ld1x3.v16i8.p0i8(i8* %addr)
1120 ret %struct.__neon_int8x16x3_t %val
1123 define %struct.__neon_int16x8x3_t @ld1_x3_v8i16(i16* %addr) {
1124 ; CHECK-LABEL: ld1_x3_v8i16:
1125 ; CHECK: ld1.8h { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
1126 %val = call %struct.__neon_int16x8x3_t @llvm.arm64.neon.ld1x3.v8i16.p0i16(i16* %addr)
1127 ret %struct.__neon_int16x8x3_t %val
1130 define %struct.__neon_int32x4x3_t @ld1_x3_v4i32(i32* %addr) {
1131 ; CHECK-LABEL: ld1_x3_v4i32:
1132 ; CHECK: ld1.4s { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
1133 %val = call %struct.__neon_int32x4x3_t @llvm.arm64.neon.ld1x3.v4i32.p0i32(i32* %addr)
1134 ret %struct.__neon_int32x4x3_t %val
1137 define %struct.__neon_float32x4x3_t @ld1_x3_v4f32(float* %addr) {
1138 ; CHECK-LABEL: ld1_x3_v4f32:
1139 ; CHECK: ld1.4s { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
1140 %val = call %struct.__neon_float32x4x3_t @llvm.arm64.neon.ld1x3.v4f32.p0f32(float* %addr)
1141 ret %struct.__neon_float32x4x3_t %val
1144 define %struct.__neon_int64x2x3_t @ld1_x3_v2i64(i64* %addr) {
1145 ; CHECK-LABEL: ld1_x3_v2i64:
1146 ; CHECK: ld1.2d { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
1147 %val = call %struct.__neon_int64x2x3_t @llvm.arm64.neon.ld1x3.v2i64.p0i64(i64* %addr)
1148 ret %struct.__neon_int64x2x3_t %val
1151 define %struct.__neon_float64x2x3_t @ld1_x3_v2f64(double* %addr) {
1152 ; CHECK-LABEL: ld1_x3_v2f64:
1153 ; CHECK: ld1.2d { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
1154 %val = call %struct.__neon_float64x2x3_t @llvm.arm64.neon.ld1x3.v2f64.p0f64(double* %addr)
1155 ret %struct.__neon_float64x2x3_t %val
1158 declare %struct.__neon_int8x8x4_t @llvm.arm64.neon.ld1x4.v8i8.p0i8(i8*) nounwind readonly
1159 declare %struct.__neon_int16x4x4_t @llvm.arm64.neon.ld1x4.v4i16.p0i16(i16*) nounwind readonly
1160 declare %struct.__neon_int32x2x4_t @llvm.arm64.neon.ld1x4.v2i32.p0i32(i32*) nounwind readonly
1161 declare %struct.__neon_float32x2x4_t @llvm.arm64.neon.ld1x4.v2f32.p0f32(float*) nounwind readonly
1162 declare %struct.__neon_int64x1x4_t @llvm.arm64.neon.ld1x4.v1i64.p0i64(i64*) nounwind readonly
1163 declare %struct.__neon_float64x1x4_t @llvm.arm64.neon.ld1x4.v1f64.p0f64(double*) nounwind readonly
1165 define %struct.__neon_int8x8x4_t @ld1_x4_v8i8(i8* %addr) {
1166 ; CHECK-LABEL: ld1_x4_v8i8:
1167 ; CHECK: ld1.8b { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
1168 %val = call %struct.__neon_int8x8x4_t @llvm.arm64.neon.ld1x4.v8i8.p0i8(i8* %addr)
1169 ret %struct.__neon_int8x8x4_t %val
1172 define %struct.__neon_int16x4x4_t @ld1_x4_v4i16(i16* %addr) {
1173 ; CHECK-LABEL: ld1_x4_v4i16:
1174 ; CHECK: ld1.4h { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
1175 %val = call %struct.__neon_int16x4x4_t @llvm.arm64.neon.ld1x4.v4i16.p0i16(i16* %addr)
1176 ret %struct.__neon_int16x4x4_t %val
1179 define %struct.__neon_int32x2x4_t @ld1_x4_v2i32(i32* %addr) {
1180 ; CHECK-LABEL: ld1_x4_v2i32:
1181 ; CHECK: ld1.2s { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
1182 %val = call %struct.__neon_int32x2x4_t @llvm.arm64.neon.ld1x4.v2i32.p0i32(i32* %addr)
1183 ret %struct.__neon_int32x2x4_t %val
1186 define %struct.__neon_float32x2x4_t @ld1_x4_v2f32(float* %addr) {
1187 ; CHECK-LABEL: ld1_x4_v2f32:
1188 ; CHECK: ld1.2s { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
1189 %val = call %struct.__neon_float32x2x4_t @llvm.arm64.neon.ld1x4.v2f32.p0f32(float* %addr)
1190 ret %struct.__neon_float32x2x4_t %val
1193 define %struct.__neon_int64x1x4_t @ld1_x4_v1i64(i64* %addr) {
1194 ; CHECK-LABEL: ld1_x4_v1i64:
1195 ; CHECK: ld1.1d { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
1196 %val = call %struct.__neon_int64x1x4_t @llvm.arm64.neon.ld1x4.v1i64.p0i64(i64* %addr)
1197 ret %struct.__neon_int64x1x4_t %val
1200 define %struct.__neon_float64x1x4_t @ld1_x4_v1f64(double* %addr) {
1201 ; CHECK-LABEL: ld1_x4_v1f64:
1202 ; CHECK: ld1.1d { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
1203 %val = call %struct.__neon_float64x1x4_t @llvm.arm64.neon.ld1x4.v1f64.p0f64(double* %addr)
1204 ret %struct.__neon_float64x1x4_t %val
1207 declare %struct.__neon_int8x16x4_t @llvm.arm64.neon.ld1x4.v16i8.p0i8(i8*) nounwind readonly
1208 declare %struct.__neon_int16x8x4_t @llvm.arm64.neon.ld1x4.v8i16.p0i16(i16*) nounwind readonly
1209 declare %struct.__neon_int32x4x4_t @llvm.arm64.neon.ld1x4.v4i32.p0i32(i32*) nounwind readonly
1210 declare %struct.__neon_float32x4x4_t @llvm.arm64.neon.ld1x4.v4f32.p0f32(float*) nounwind readonly
1211 declare %struct.__neon_int64x2x4_t @llvm.arm64.neon.ld1x4.v2i64.p0i64(i64*) nounwind readonly
1212 declare %struct.__neon_float64x2x4_t @llvm.arm64.neon.ld1x4.v2f64.p0f64(double*) nounwind readonly
1214 define %struct.__neon_int8x16x4_t @ld1_x4_v16i8(i8* %addr) {
1215 ; CHECK-LABEL: ld1_x4_v16i8:
1216 ; CHECK: ld1.16b { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
1217 %val = call %struct.__neon_int8x16x4_t @llvm.arm64.neon.ld1x4.v16i8.p0i8(i8* %addr)
1218 ret %struct.__neon_int8x16x4_t %val
1221 define %struct.__neon_int16x8x4_t @ld1_x4_v8i16(i16* %addr) {
1222 ; CHECK-LABEL: ld1_x4_v8i16:
1223 ; CHECK: ld1.8h { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
1224 %val = call %struct.__neon_int16x8x4_t @llvm.arm64.neon.ld1x4.v8i16.p0i16(i16* %addr)
1225 ret %struct.__neon_int16x8x4_t %val
1228 define %struct.__neon_int32x4x4_t @ld1_x4_v4i32(i32* %addr) {
1229 ; CHECK-LABEL: ld1_x4_v4i32:
1230 ; CHECK: ld1.4s { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
1231 %val = call %struct.__neon_int32x4x4_t @llvm.arm64.neon.ld1x4.v4i32.p0i32(i32* %addr)
1232 ret %struct.__neon_int32x4x4_t %val
1235 define %struct.__neon_float32x4x4_t @ld1_x4_v4f32(float* %addr) {
1236 ; CHECK-LABEL: ld1_x4_v4f32:
1237 ; CHECK: ld1.4s { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
1238 %val = call %struct.__neon_float32x4x4_t @llvm.arm64.neon.ld1x4.v4f32.p0f32(float* %addr)
1239 ret %struct.__neon_float32x4x4_t %val
1242 define %struct.__neon_int64x2x4_t @ld1_x4_v2i64(i64* %addr) {
1243 ; CHECK-LABEL: ld1_x4_v2i64:
1244 ; CHECK: ld1.2d { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
1245 %val = call %struct.__neon_int64x2x4_t @llvm.arm64.neon.ld1x4.v2i64.p0i64(i64* %addr)
1246 ret %struct.__neon_int64x2x4_t %val
1249 define %struct.__neon_float64x2x4_t @ld1_x4_v2f64(double* %addr) {
1250 ; CHECK-LABEL: ld1_x4_v2f64:
1251 ; CHECK: ld1.2d { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
1252 %val = call %struct.__neon_float64x2x4_t @llvm.arm64.neon.ld1x4.v2f64.p0f64(double* %addr)
1253 ret %struct.__neon_float64x2x4_t %val