1 ; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
3 ;Check for a post-increment updating load.
4 define <4 x i16> @test_vld1_fx_update(i16** %ptr) nounwind {
5 ; CHECK: test_vld1_fx_update
6 ; CHECK: ld1 {v{{[0-9]+}}.4h}, [x{{[0-9]+|sp}}], #8
8 %tmp0 = bitcast i16* %A to i8*
9 %tmp1 = call <4 x i16> @llvm.arm.neon.vld1.v4i16(i8* %tmp0, i32 2)
10 %tmp2 = getelementptr i16* %A, i32 4
11 store i16* %tmp2, i16** %ptr
15 ;Check for a post-increment updating load with register increment.
16 define <2 x i32> @test_vld1_reg_update(i32** %ptr, i32 %inc) nounwind {
17 ; CHECK: test_vld1_reg_update
18 ; CHECK: ld1 {v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}], x{{[0-9]+}}
20 %tmp0 = bitcast i32* %A to i8*
21 %tmp1 = call <2 x i32> @llvm.arm.neon.vld1.v2i32(i8* %tmp0, i32 4)
22 %tmp2 = getelementptr i32* %A, i32 %inc
23 store i32* %tmp2, i32** %ptr
27 define <2 x float> @test_vld2_fx_update(float** %ptr) nounwind {
28 ; CHECK: test_vld2_fx_update
29 ; CHECK: ld2 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}], #16
30 %A = load float** %ptr
31 %tmp0 = bitcast float* %A to i8*
32 %tmp1 = call { <2 x float>, <2 x float> } @llvm.arm.neon.vld2.v2f32(i8* %tmp0, i32 4)
33 %tmp2 = extractvalue { <2 x float>, <2 x float> } %tmp1, 0
34 %tmp3 = getelementptr float* %A, i32 4
35 store float* %tmp3, float** %ptr
39 define <16 x i8> @test_vld2_reg_update(i8** %ptr, i32 %inc) nounwind {
40 ; CHECK: test_vld2_reg_update
41 ; CHECK: ld2 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [x{{[0-9]+|sp}}], x{{[0-9]+}}
43 %tmp0 = call { <16 x i8>, <16 x i8> } @llvm.arm.neon.vld2.v16i8(i8* %A, i32 1)
44 %tmp1 = extractvalue { <16 x i8>, <16 x i8> } %tmp0, 0
45 %tmp2 = getelementptr i8* %A, i32 %inc
46 store i8* %tmp2, i8** %ptr
50 define <4 x i32> @test_vld3_fx_update(i32** %ptr) nounwind {
51 ; CHECK: test_vld3_fx_update
52 ; CHECK: ld3 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}], #48
54 %tmp0 = bitcast i32* %A to i8*
55 %tmp1 = call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.arm.neon.vld3.v4i32(i8* %tmp0, i32 4)
56 %tmp2 = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } %tmp1, 0
57 %tmp3 = getelementptr i32* %A, i32 12
58 store i32* %tmp3, i32** %ptr
62 define <4 x i16> @test_vld3_reg_update(i16** %ptr, i32 %inc) nounwind {
63 ; CHECK: test_vld3_reg_update
64 ; CHECK: ld3 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, [x{{[0-9]+|sp}}], x{{[0-9]+}}
66 %tmp0 = bitcast i16* %A to i8*
67 %tmp1 = call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.arm.neon.vld3.v4i16(i8* %tmp0, i32 2)
68 %tmp2 = extractvalue { <4 x i16>, <4 x i16>, <4 x i16> } %tmp1, 0
69 %tmp3 = getelementptr i16* %A, i32 %inc
70 store i16* %tmp3, i16** %ptr
74 define <8 x i16> @test_vld4_fx_update(i16** %ptr) nounwind {
75 ; CHECK: test_vld4_fx_update
76 ; CHECK: ld4 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [x{{[0-9]+|sp}}], #64
78 %tmp0 = bitcast i16* %A to i8*
79 %tmp1 = call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.arm.neon.vld4.v8i16(i8* %tmp0, i32 8)
80 %tmp2 = extractvalue { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %tmp1, 0
81 %tmp3 = getelementptr i16* %A, i32 32
82 store i16* %tmp3, i16** %ptr
86 define <8 x i8> @test_vld4_reg_update(i8** %ptr, i32 %inc) nounwind {
87 ; CHECK: test_vld4_reg_update
88 ; CHECK: ld4 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [x{{[0-9]+|sp}}], x{{[0-9]+}}
90 %tmp0 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.arm.neon.vld4.v8i8(i8* %A, i32 1)
91 %tmp1 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %tmp0, 0
92 %tmp2 = getelementptr i8* %A, i32 %inc
93 store i8* %tmp2, i8** %ptr
97 define void @test_vst1_fx_update(float** %ptr, <2 x float> %B) nounwind {
98 ; CHECK: test_vst1_fx_update
99 ; CHECK: st1 {v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}], #8
100 %A = load float** %ptr
101 %tmp0 = bitcast float* %A to i8*
102 call void @llvm.arm.neon.vst1.v2f32(i8* %tmp0, <2 x float> %B, i32 4)
103 %tmp2 = getelementptr float* %A, i32 2
104 store float* %tmp2, float** %ptr
108 define void @test_vst1_reg_update(i16** %ptr, <8 x i16> %B, i32 %inc) nounwind {
109 ; CHECK: test_vst1_reg_update
110 ; CHECK: st1 {v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}], x{{[0-9]+}}
112 %tmp0 = bitcast i16* %A to i8*
113 call void @llvm.arm.neon.vst1.v8i16(i8* %tmp0, <8 x i16> %B, i32 2)
114 %tmp1 = getelementptr i16* %A, i32 %inc
115 store i16* %tmp1, i16** %ptr
119 define void @test_vst2_fx_update(i64** %ptr, <1 x i64> %B) nounwind {
120 ; CHECK: test_vst2_fx_update
121 ; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}], #16
123 %tmp0 = bitcast i64* %A to i8*
124 call void @llvm.arm.neon.vst2.v1i64(i8* %tmp0, <1 x i64> %B, <1 x i64> %B, i32 8)
125 %tmp1 = getelementptr i64* %A, i32 2
126 store i64* %tmp1, i64** %ptr
130 define void @test_vst2_reg_update(i8** %ptr, <8 x i8> %B, i32 %inc) nounwind {
131 ; CHECK: test_vst2_reg_update
132 ; CHECK: st2 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}], x{{[0-9]+}}
134 call void @llvm.arm.neon.vst2.v8i8(i8* %A, <8 x i8> %B, <8 x i8> %B, i32 4)
135 %tmp0 = getelementptr i8* %A, i32 %inc
136 store i8* %tmp0, i8** %ptr
140 define void @test_vst3_fx_update(i32** %ptr, <2 x i32> %B) nounwind {
141 ; CHECK: test_vst3_fx_update
142 ; CHECK: st3 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}], #24
144 %tmp0 = bitcast i32* %A to i8*
145 call void @llvm.arm.neon.vst3.v2i32(i8* %tmp0, <2 x i32> %B, <2 x i32> %B, <2 x i32> %B, i32 4)
146 %tmp1 = getelementptr i32* %A, i32 6
147 store i32* %tmp1, i32** %ptr
151 define void @test_vst3_reg_update(i16** %ptr, <8 x i16> %B, i32 %inc) nounwind {
152 ; CHECK: test_vst3_reg_update
153 ; CHECK: st3 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}], x{{[0-9]+}}
155 %tmp0 = bitcast i16* %A to i8*
156 call void @llvm.arm.neon.vst3.v8i16(i8* %tmp0, <8 x i16> %B, <8 x i16> %B, <8 x i16> %B, i32 2)
157 %tmp1 = getelementptr i16* %A, i32 %inc
158 store i16* %tmp1, i16** %ptr
162 define void @test_vst4_fx_update(float** %ptr, <4 x float> %B) nounwind {
163 ; CHECK: test_vst4_fx_update
164 ; CHECK: st4 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}], #64
165 %A = load float** %ptr
166 %tmp0 = bitcast float* %A to i8*
167 call void @llvm.arm.neon.vst4.v4f32(i8* %tmp0, <4 x float> %B, <4 x float> %B, <4 x float> %B, <4 x float> %B, i32 4)
168 %tmp1 = getelementptr float* %A, i32 16
169 store float* %tmp1, float** %ptr
173 define void @test_vst4_reg_update(i8** %ptr, <8 x i8> %B, i32 %inc) nounwind {
174 ; CHECK: test_vst4_reg_update
175 ; CHECK: st4 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}], x{{[0-9]+}}
177 call void @llvm.arm.neon.vst4.v8i8(i8* %A, <8 x i8> %B, <8 x i8> %B, <8 x i8> %B, <8 x i8> %B, i32 1)
178 %tmp0 = getelementptr i8* %A, i32 %inc
179 store i8* %tmp0, i8** %ptr
184 declare <4 x i16> @llvm.arm.neon.vld1.v4i16(i8*, i32)
185 declare <2 x i32> @llvm.arm.neon.vld1.v2i32(i8*, i32)
186 declare { <16 x i8>, <16 x i8> } @llvm.arm.neon.vld2.v16i8(i8*, i32)
187 declare { <2 x float>, <2 x float> } @llvm.arm.neon.vld2.v2f32(i8*, i32)
188 declare { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.arm.neon.vld3.v4i16(i8*, i32)
189 declare { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.arm.neon.vld3.v4i32(i8*, i32)
190 declare { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.arm.neon.vld4.v8i16(i8*, i32)
191 declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.arm.neon.vld4.v8i8(i8*, i32)
193 declare void @llvm.arm.neon.vst1.v2f32(i8*, <2 x float>, i32)
194 declare void @llvm.arm.neon.vst1.v8i16(i8*, <8 x i16>, i32)
195 declare void @llvm.arm.neon.vst2.v1i64(i8*, <1 x i64>, <1 x i64>, i32)
196 declare void @llvm.arm.neon.vst2.v8i8(i8*, <8 x i8>, <8 x i8>, i32)
197 declare void @llvm.arm.neon.vst3.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, i32)
198 declare void @llvm.arm.neon.vst3.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x i16>, i32)
199 declare void @llvm.arm.neon.vst4.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, <4 x float>, i32)
200 declare void @llvm.arm.neon.vst4.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, i32)
202 define <16 x i8> @test_vld1x2_fx_update(i8* %a, i8** %ptr) {
203 ; CHECK: test_vld1x2_fx_update
204 ; CHECK: ld1 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [x{{[0-9]+|sp}}], #32
205 %1 = call { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.vld1x2.v16i8(i8* %a, i32 1)
206 %2 = extractvalue { <16 x i8>, <16 x i8> } %1, 0
207 %tmp1 = getelementptr i8* %a, i32 32
208 store i8* %tmp1, i8** %ptr
212 define <8 x i16> @test_vld1x2_reg_update(i16* %a, i16** %ptr, i32 %inc) {
213 ; CHECK: test_vld1x2_reg_update
214 ; CHECK: ld1 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [x{{[0-9]+|sp}}], x{{[0-9]+}}
215 %1 = bitcast i16* %a to i8*
216 %2 = tail call { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.vld1x2.v8i16(i8* %1, i32 2)
217 %3 = extractvalue { <8 x i16>, <8 x i16> } %2, 0
218 %tmp1 = getelementptr i16* %a, i32 %inc
219 store i16* %tmp1, i16** %ptr
223 define <2 x i64> @test_vld1x3_fx_update(i64* %a, i64** %ptr) {
224 ; CHECK: test_vld1x3_fx_update
225 ; CHECK: ld1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [x{{[0-9]+|sp}}], #48
226 %1 = bitcast i64* %a to i8*
227 %2 = tail call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.vld1x3.v2i64(i8* %1, i32 8)
228 %3 = extractvalue { <2 x i64>, <2 x i64>, <2 x i64> } %2, 0
229 %tmp1 = getelementptr i64* %a, i32 6
230 store i64* %tmp1, i64** %ptr
234 define <8 x i16> @test_vld1x3_reg_update(i16* %a, i16** %ptr, i32 %inc) {
235 ; CHECK: test_vld1x3_reg_update
236 ; CHECK: ld1 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [x{{[0-9]+|sp}}], x{{[0-9]+}}
237 %1 = bitcast i16* %a to i8*
238 %2 = tail call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.vld1x3.v8i16(i8* %1, i32 2)
239 %3 = extractvalue { <8 x i16>, <8 x i16>, <8 x i16> } %2, 0
240 %tmp1 = getelementptr i16* %a, i32 %inc
241 store i16* %tmp1, i16** %ptr
245 define <4 x float> @test_vld1x4_fx_update(float* %a, float** %ptr) {
246 ; CHECK: test_vld1x4_fx_update
247 ; CHECK: ld1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}], #64
248 %1 = bitcast float* %a to i8*
249 %2 = tail call { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.vld1x4.v4f32(i8* %1, i32 4)
250 %3 = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %2, 0
251 %tmp1 = getelementptr float* %a, i32 16
252 store float* %tmp1, float** %ptr
256 define <8 x i8> @test_vld1x4_reg_update(i8* readonly %a, i8** %ptr, i32 %inc) #0 {
257 ; CHECK: test_vld1x4_reg_update
258 ; CHECK: ld1 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [x{{[0-9]+|sp}}], x{{[0-9]+}}
259 %1 = tail call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.vld1x4.v8i8(i8* %a, i32 1)
260 %2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 0
261 %tmp1 = getelementptr i8* %a, i32 %inc
262 store i8* %tmp1, i8** %ptr
266 define void @test_vst1x2_fx_update(i8* %a, [2 x <16 x i8>] %b.coerce, i8** %ptr) #2 {
267 ; CHECK: test_vst1x2_fx_update
268 ; CHECK: st1 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [x{{[0-9]+|sp}}], #32
269 %1 = extractvalue [2 x <16 x i8>] %b.coerce, 0
270 %2 = extractvalue [2 x <16 x i8>] %b.coerce, 1
271 tail call void @llvm.aarch64.neon.vst1x2.v16i8(i8* %a, <16 x i8> %1, <16 x i8> %2, i32 1)
272 %tmp1 = getelementptr i8* %a, i32 32
273 store i8* %tmp1, i8** %ptr
277 define void @test_vst1x2_reg_update(i16* %a, [2 x <8 x i16>] %b.coerce, i16** %ptr, i32 %inc) #2 {
278 ; CHECK: test_vst1x2_reg_update
279 ; CHECK: st1 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [x{{[0-9]+|sp}}], x{{[0-9]+}}
280 %1 = extractvalue [2 x <8 x i16>] %b.coerce, 0
281 %2 = extractvalue [2 x <8 x i16>] %b.coerce, 1
282 %3 = bitcast i16* %a to i8*
283 tail call void @llvm.aarch64.neon.vst1x2.v8i16(i8* %3, <8 x i16> %1, <8 x i16> %2, i32 2)
284 %tmp1 = getelementptr i16* %a, i32 %inc
285 store i16* %tmp1, i16** %ptr
289 define void @test_vst1x3_fx_update(i32* %a, [3 x <2 x i32>] %b.coerce, i32** %ptr) #2 {
290 ; CHECK: test_vst1x3_fx_update
291 ; CHECK: st1 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}], #24
292 %1 = extractvalue [3 x <2 x i32>] %b.coerce, 0
293 %2 = extractvalue [3 x <2 x i32>] %b.coerce, 1
294 %3 = extractvalue [3 x <2 x i32>] %b.coerce, 2
295 %4 = bitcast i32* %a to i8*
296 tail call void @llvm.aarch64.neon.vst1x3.v2i32(i8* %4, <2 x i32> %1, <2 x i32> %2, <2 x i32> %3, i32 4)
297 %tmp1 = getelementptr i32* %a, i32 6
298 store i32* %tmp1, i32** %ptr
302 define void @test_vst1x3_reg_update(i64* %a, [3 x <1 x i64>] %b.coerce, i64** %ptr, i32 %inc) #2 {
303 ; CHECK: test_vst1x3_reg_update
304 ; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [x{{[0-9]+|sp}}], x{{[0-9]+}}
305 %1 = extractvalue [3 x <1 x i64>] %b.coerce, 0
306 %2 = extractvalue [3 x <1 x i64>] %b.coerce, 1
307 %3 = extractvalue [3 x <1 x i64>] %b.coerce, 2
308 %4 = bitcast i64* %a to i8*
309 tail call void @llvm.aarch64.neon.vst1x3.v1i64(i8* %4, <1 x i64> %1, <1 x i64> %2, <1 x i64> %3, i32 8)
310 %tmp1 = getelementptr i64* %a, i32 %inc
311 store i64* %tmp1, i64** %ptr
315 define void @test_vst1x4_fx_update(float* %a, [4 x <4 x float>] %b.coerce, float** %ptr) #2 {
316 ; CHECK: test_vst1x4_fx_update
317 ; CHECK: st1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}], #64
318 %1 = extractvalue [4 x <4 x float>] %b.coerce, 0
319 %2 = extractvalue [4 x <4 x float>] %b.coerce, 1
320 %3 = extractvalue [4 x <4 x float>] %b.coerce, 2
321 %4 = extractvalue [4 x <4 x float>] %b.coerce, 3
322 %5 = bitcast float* %a to i8*
323 tail call void @llvm.aarch64.neon.vst1x4.v4f32(i8* %5, <4 x float> %1, <4 x float> %2, <4 x float> %3, <4 x float> %4, i32 4)
324 %tmp1 = getelementptr float* %a, i32 16
325 store float* %tmp1, float** %ptr
329 define void @test_vst1x4_reg_update(double* %a, [4 x <2 x double>] %b.coerce, double** %ptr, i32 %inc) #2 {
330 ; CHECK: test_vst1x4_reg_update
331 ; CHECK: st1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [x{{[0-9]+|sp}}], x{{[0-9]+}}
332 %1 = extractvalue [4 x <2 x double>] %b.coerce, 0
333 %2 = extractvalue [4 x <2 x double>] %b.coerce, 1
334 %3 = extractvalue [4 x <2 x double>] %b.coerce, 2
335 %4 = extractvalue [4 x <2 x double>] %b.coerce, 3
336 %5 = bitcast double* %a to i8*
337 tail call void @llvm.aarch64.neon.vst1x4.v2f64(i8* %5, <2 x double> %1, <2 x double> %2, <2 x double> %3, <2 x double> %4, i32 8)
338 %tmp1 = getelementptr double* %a, i32 %inc
339 store double* %tmp1, double** %ptr
343 declare { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.vld1x2.v16i8(i8*, i32)
344 declare { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.vld1x2.v8i16(i8*, i32)
345 declare { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.vld1x3.v2i64(i8*, i32)
346 declare { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.vld1x3.v8i16(i8*, i32)
347 declare { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.vld1x4.v4f32(i8*, i32)
348 declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.vld1x4.v8i8(i8*, i32)
349 declare void @llvm.aarch64.neon.vst1x2.v16i8(i8*, <16 x i8>, <16 x i8>, i32)
350 declare void @llvm.aarch64.neon.vst1x2.v8i16(i8*, <8 x i16>, <8 x i16>, i32)
351 declare void @llvm.aarch64.neon.vst1x3.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, i32)
352 declare void @llvm.aarch64.neon.vst1x3.v1i64(i8*, <1 x i64>, <1 x i64>, <1 x i64>, i32)
353 declare void @llvm.aarch64.neon.vst1x4.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, <4 x float>, i32) #3
354 declare void @llvm.aarch64.neon.vst1x4.v2f64(i8*, <2 x double>, <2 x double>, <2 x double>, <2 x double>, i32) #3