1 ; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=knl < %s | FileCheck %s -check-prefix=AVX512
2 ; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=core-avx2 < %s | FileCheck %s -check-prefix=AVX2
3 ; RUN: opt -mtriple=x86_64-apple-darwin -codegenprepare -mcpu=corei7-avx -S < %s | FileCheck %s -check-prefix=AVX_SCALAR
4 ; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=skx < %s | FileCheck %s -check-prefix=SKX
7 ; AVX512: vmovdqu32 (%rdi), %zmm0 {%k1} {z}
10 ; AVX2: vpmaskmovd {{.*}}(%rdi)
11 ; AVX2: vpmaskmovd {{.*}}(%rdi)
14 ; AVX_SCALAR-LABEL: test1
15 ; AVX_SCALAR-NOT: masked
16 ; AVX_SCALAR: extractelement
17 ; AVX_SCALAR: insertelement
18 ; AVX_SCALAR: extractelement
19 ; AVX_SCALAR: insertelement
20 define <16 x i32> @test1(<16 x i32> %trigger, <16 x i32>* %addr) {
21 %mask = icmp eq <16 x i32> %trigger, zeroinitializer
22 %res = call <16 x i32> @llvm.masked.load.v16i32(<16 x i32>* %addr, i32 4, <16 x i1>%mask, <16 x i32>undef)
27 ; AVX512: vmovdqu32 (%rdi), %zmm0 {%k1} {z}
30 ; AVX2: vpmaskmovd {{.*}}(%rdi)
31 ; AVX2: vpmaskmovd {{.*}}(%rdi)
33 define <16 x i32> @test2(<16 x i32> %trigger, <16 x i32>* %addr) {
34 %mask = icmp eq <16 x i32> %trigger, zeroinitializer
35 %res = call <16 x i32> @llvm.masked.load.v16i32(<16 x i32>* %addr, i32 4, <16 x i1>%mask, <16 x i32>zeroinitializer)
40 ; AVX512: vmovdqu32 %zmm1, (%rdi) {%k1}
42 ; AVX_SCALAR-LABEL: test3
43 ; AVX_SCALAR-NOT: masked
44 ; AVX_SCALAR: extractelement
46 ; AVX_SCALAR: extractelement
48 ; AVX_SCALAR: extractelement
50 define void @test3(<16 x i32> %trigger, <16 x i32>* %addr, <16 x i32> %val) {
51 %mask = icmp eq <16 x i32> %trigger, zeroinitializer
52 call void @llvm.masked.store.v16i32(<16 x i32>%val, <16 x i32>* %addr, i32 4, <16 x i1>%mask)
57 ; AVX512: vmovups (%rdi), %zmm{{.*{%k[1-7]}}}
60 ; AVX2: vmaskmovps {{.*}}(%rdi)
61 ; AVX2: vmaskmovps {{.*}}(%rdi)
63 define <16 x float> @test4(<16 x i32> %trigger, <16 x float>* %addr, <16 x float> %dst) {
64 %mask = icmp eq <16 x i32> %trigger, zeroinitializer
65 %res = call <16 x float> @llvm.masked.load.v16f32(<16 x float>* %addr, i32 4, <16 x i1>%mask, <16 x float> %dst)
70 ; AVX512: vmovupd (%rdi), %zmm1 {%k1}
77 define <8 x double> @test5(<8 x i32> %trigger, <8 x double>* %addr, <8 x double> %dst) {
78 %mask = icmp eq <8 x i32> %trigger, zeroinitializer
79 %res = call <8 x double> @llvm.masked.load.v8f64(<8 x double>* %addr, i32 4, <8 x i1>%mask, <8 x double>%dst)
88 ; SKX: vmovupd {{.*}}{%k1}
89 define <2 x double> @test6(<2 x i64> %trigger, <2 x double>* %addr, <2 x double> %dst) {
90 %mask = icmp eq <2 x i64> %trigger, zeroinitializer
91 %res = call <2 x double> @llvm.masked.load.v2f64(<2 x double>* %addr, i32 4, <2 x i1>%mask, <2 x double>%dst)
96 ; AVX2: vmaskmovps {{.*}}(%rdi)
100 ; SKX: vmovups (%rdi){{.*}}{%k1}
101 define <4 x float> @test7(<4 x i32> %trigger, <4 x float>* %addr, <4 x float> %dst) {
102 %mask = icmp eq <4 x i32> %trigger, zeroinitializer
103 %res = call <4 x float> @llvm.masked.load.v4f32(<4 x float>* %addr, i32 4, <4 x i1>%mask, <4 x float>%dst)
108 ; AVX2: vpmaskmovd {{.*}}(%rdi)
112 ; SKX: vmovdqu32 (%rdi){{.*}}{%k1}
113 define <4 x i32> @test8(<4 x i32> %trigger, <4 x i32>* %addr, <4 x i32> %dst) {
114 %mask = icmp eq <4 x i32> %trigger, zeroinitializer
115 %res = call <4 x i32> @llvm.masked.load.v4i32(<4 x i32>* %addr, i32 4, <4 x i1>%mask, <4 x i32>%dst)
120 ; AVX2: vpmaskmovd %xmm
123 ; SKX: vmovdqu32 %xmm{{.*}}{%k1}
124 define void @test9(<4 x i32> %trigger, <4 x i32>* %addr, <4 x i32> %val) {
125 %mask = icmp eq <4 x i32> %trigger, zeroinitializer
126 call void @llvm.masked.store.v4i32(<4 x i32>%val, <4 x i32>* %addr, i32 4, <4 x i1>%mask)
131 ; AVX2: vmaskmovpd (%rdi), %ymm
135 ; SKX: vmovapd {{.*}}{%k1}
136 define <4 x double> @test10(<4 x i32> %trigger, <4 x double>* %addr, <4 x double> %dst) {
137 %mask = icmp eq <4 x i32> %trigger, zeroinitializer
138 %res = call <4 x double> @llvm.masked.load.v4f64(<4 x double>* %addr, i32 32, <4 x i1>%mask, <4 x double>%dst)
139 ret <4 x double> %res
142 ; AVX2-LABEL: test11a
147 ; SKX: vmovaps (%rdi), %ymm1 {%k1}
148 ; AVX512-LABEL: test11a
149 ; AVX512: kshiftlw $8
150 ; AVX512: kshiftrw $8
151 ; AVX512: vmovups (%rdi), %zmm1 {%k1}
152 define <8 x float> @test11a(<8 x i32> %trigger, <8 x float>* %addr, <8 x float> %dst) {
153 %mask = icmp eq <8 x i32> %trigger, zeroinitializer
154 %res = call <8 x float> @llvm.masked.load.v8f32(<8 x float>* %addr, i32 32, <8 x i1>%mask, <8 x float>%dst)
159 ; SKX: vmovdqu32 (%rdi), %ymm1 {%k1}
160 ; AVX512-LABEL: test11b
161 ; AVX512: kshiftlw $8
162 ; AVX512: kshiftrw $8
163 ; AVX512: vmovdqu32 (%rdi), %zmm1 {%k1}
164 define <8 x i32> @test11b(<8 x i1> %mask, <8 x i32>* %addr, <8 x i32> %dst) {
165 %res = call <8 x i32> @llvm.masked.load.v8i32(<8 x i32>* %addr, i32 4, <8 x i1>%mask, <8 x i32>%dst)
170 ; SKX: vmovaps (%rdi), %ymm0 {%k1} {z}
171 ; AVX512-LABEL: test11c
172 ; AVX512: kshiftlw $8
173 ; AVX512: kshiftrw $8
174 ; AVX512: vmovups (%rdi), %zmm0 {%k1} {z}
175 define <8 x float> @test11c(<8 x i1> %mask, <8 x float>* %addr) {
176 %res = call <8 x float> @llvm.masked.load.v8f32(<8 x float>* %addr, i32 32, <8 x i1> %mask, <8 x float> zeroinitializer)
181 ; SKX: vmovdqu32 (%rdi), %ymm0 {%k1} {z}
182 ; AVX512-LABEL: test11d
183 ; AVX512: kshiftlw $8
184 ; AVX512: kshiftrw $8
185 ; AVX512: vmovdqu32 (%rdi), %zmm0 {%k1} {z}
186 define <8 x i32> @test11d(<8 x i1> %mask, <8 x i32>* %addr) {
187 %res = call <8 x i32> @llvm.masked.load.v8i32(<8 x i32>* %addr, i32 4, <8 x i1> %mask, <8 x i32> zeroinitializer)
192 ; AVX2: vpmaskmovd %ymm
195 ; SKX: vmovdqu32 {{.*}}{%k1}
196 define void @test12(<8 x i32> %trigger, <8 x i32>* %addr, <8 x i32> %val) {
197 %mask = icmp eq <8 x i32> %trigger, zeroinitializer
198 call void @llvm.masked.store.v8i32(<8 x i32>%val, <8 x i32>* %addr, i32 4, <8 x i1>%mask)
202 ; AVX512-LABEL: test13
203 ; AVX512: vmovups %zmm1, (%rdi) {%k1}
205 define void @test13(<16 x i32> %trigger, <16 x float>* %addr, <16 x float> %val) {
206 %mask = icmp eq <16 x i32> %trigger, zeroinitializer
207 call void @llvm.masked.store.v16f32(<16 x float>%val, <16 x float>* %addr, i32 4, <16 x i1>%mask)
219 ; SKX: vmovups {{.*}}{%k1}
221 define void @test14(<2 x i32> %trigger, <2 x float>* %addr, <2 x float> %val) {
222 %mask = icmp eq <2 x i32> %trigger, zeroinitializer
223 call void @llvm.masked.store.v2f32(<2 x float>%val, <2 x float>* %addr, i32 4, <2 x i1>%mask)
232 ; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2
233 ; SKX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
234 ; SKX-NEXT: vpcmpeqq %xmm2, %xmm0, %k1
235 ; SKX-NEXT: vpmovqd %xmm1, (%rdi) {%k1}
237 define void @test15(<2 x i32> %trigger, <2 x i32>* %addr, <2 x i32> %val) {
238 %mask = icmp eq <2 x i32> %trigger, zeroinitializer
239 call void @llvm.masked.store.v2i32(<2 x i32>%val, <2 x i32>* %addr, i32 4, <2 x i1>%mask)
250 ; SKX: vmovups {{.*}}{%k1}
251 define <2 x float> @test16(<2 x i32> %trigger, <2 x float>* %addr, <2 x float> %dst) {
252 %mask = icmp eq <2 x i32> %trigger, zeroinitializer
253 %res = call <2 x float> @llvm.masked.load.v2f32(<2 x float>* %addr, i32 4, <2 x i1>%mask, <2 x float>%dst)
265 ; SKX: vmovdqu32 {{.*}}{%k1}
266 define <2 x i32> @test17(<2 x i32> %trigger, <2 x i32>* %addr, <2 x i32> %dst) {
267 %mask = icmp eq <2 x i32> %trigger, zeroinitializer
268 %res = call <2 x i32> @llvm.masked.load.v2i32(<2 x i32>* %addr, i32 4, <2 x i1>%mask, <2 x i32>%dst)
276 define <2 x float> @test18(<2 x i32> %trigger, <2 x float>* %addr) {
277 %mask = icmp eq <2 x i32> %trigger, zeroinitializer
278 %res = call <2 x float> @llvm.masked.load.v2f32(<2 x float>* %addr, i32 4, <2 x i1>%mask, <2 x float>undef)
282 ; AVX_SCALAR-LABEL: test19
283 ; AVX_SCALAR: load <4 x float>, <4 x float>* %addr, align 4
285 define <4 x float> @test19(<4 x i32> %trigger, <4 x float>* %addr) {
286 %mask = icmp eq <4 x i32> %trigger, zeroinitializer
287 %res = call <4 x float> @llvm.masked.load.v4f32(<4 x float>* %addr, i32 4, <4 x i1><i1 true, i1 true, i1 true, i1 true>, <4 x float>undef)
291 ; AVX_SCALAR-LABEL: test20
292 ; AVX_SCALAR: load float, {{.*}}, align 4
293 ; AVX_SCALAR: insertelement <4 x float> undef, float
294 ; AVX_SCALAR: select <4 x i1> <i1 true, i1 false, i1 true, i1 true>
296 define <4 x float> @test20(<4 x i32> %trigger, <4 x float>* %addr, <4 x float> %src0) {
297 %mask = icmp eq <4 x i32> %trigger, zeroinitializer
298 %res = call <4 x float> @llvm.masked.load.v4f32(<4 x float>* %addr, i32 16, <4 x i1><i1 true, i1 false, i1 true, i1 true>, <4 x float> %src0)
302 ; AVX_SCALAR-LABEL: test21
303 ; AVX_SCALAR: store <4 x i32> %val
304 define void @test21(<4 x i32> %trigger, <4 x i32>* %addr, <4 x i32> %val) {
305 %mask = icmp eq <4 x i32> %trigger, zeroinitializer
306 call void @llvm.masked.store.v4i32(<4 x i32>%val, <4 x i32>* %addr, i32 4, <4 x i1><i1 true, i1 true, i1 true, i1 true>)
310 ; AVX_SCALAR-LABEL: test22
311 ; AVX_SCALAR: extractelement <4 x i32> %val, i32 0
312 ; AVX_SCALAR: store i32
313 define void @test22(<4 x i32> %trigger, <4 x i32>* %addr, <4 x i32> %val) {
314 %mask = icmp eq <4 x i32> %trigger, zeroinitializer
315 call void @llvm.masked.store.v4i32(<4 x i32>%val, <4 x i32>* %addr, i32 4, <4 x i1><i1 true, i1 false, i1 false, i1 false>)
319 declare <16 x i32> @llvm.masked.load.v16i32(<16 x i32>*, i32, <16 x i1>, <16 x i32>)
320 declare <4 x i32> @llvm.masked.load.v4i32(<4 x i32>*, i32, <4 x i1>, <4 x i32>)
321 declare <2 x i32> @llvm.masked.load.v2i32(<2 x i32>*, i32, <2 x i1>, <2 x i32>)
322 declare void @llvm.masked.store.v16i32(<16 x i32>, <16 x i32>*, i32, <16 x i1>)
323 declare void @llvm.masked.store.v8i32(<8 x i32>, <8 x i32>*, i32, <8 x i1>)
324 declare void @llvm.masked.store.v4i32(<4 x i32>, <4 x i32>*, i32, <4 x i1>)
325 declare void @llvm.masked.store.v2f32(<2 x float>, <2 x float>*, i32, <2 x i1>)
326 declare void @llvm.masked.store.v2i32(<2 x i32>, <2 x i32>*, i32, <2 x i1>)
327 declare void @llvm.masked.store.v16f32(<16 x float>, <16 x float>*, i32, <16 x i1>)
328 declare void @llvm.masked.store.v16f32p(<16 x float>*, <16 x float>**, i32, <16 x i1>)
329 declare <16 x float> @llvm.masked.load.v16f32(<16 x float>*, i32, <16 x i1>, <16 x float>)
330 declare <8 x float> @llvm.masked.load.v8f32(<8 x float>*, i32, <8 x i1>, <8 x float>)
331 declare <8 x i32> @llvm.masked.load.v8i32(<8 x i32>*, i32, <8 x i1>, <8 x i32>)
332 declare <4 x float> @llvm.masked.load.v4f32(<4 x float>*, i32, <4 x i1>, <4 x float>)
333 declare <2 x float> @llvm.masked.load.v2f32(<2 x float>*, i32, <2 x i1>, <2 x float>)
334 declare <8 x double> @llvm.masked.load.v8f64(<8 x double>*, i32, <8 x i1>, <8 x double>)
335 declare <4 x double> @llvm.masked.load.v4f64(<4 x double>*, i32, <4 x i1>, <4 x double>)
336 declare <2 x double> @llvm.masked.load.v2f64(<2 x double>*, i32, <2 x i1>, <2 x double>)
337 declare void @llvm.masked.store.v8f64(<8 x double>, <8 x double>*, i32, <8 x i1>)
338 declare void @llvm.masked.store.v2f64(<2 x double>, <2 x double>*, i32, <2 x i1>)
339 declare void @llvm.masked.store.v2i64(<2 x i64>, <2 x i64>*, i32, <2 x i1>)
341 declare <16 x i32*> @llvm.masked.load.v16p0i32(<16 x i32*>*, i32, <16 x i1>, <16 x i32*>)
343 ; AVX512-LABEL: test23
344 ; AVX512: vmovdqu64 64(%rdi), %zmm1 {%k2} {z}
345 ; AVX512: vmovdqu64 (%rdi), %zmm0 {%k1} {z}
347 define <16 x i32*> @test23(<16 x i32*> %trigger, <16 x i32*>* %addr) {
348 %mask = icmp eq <16 x i32*> %trigger, zeroinitializer
349 %res = call <16 x i32*> @llvm.masked.load.v16p0i32(<16 x i32*>* %addr, i32 4, <16 x i1>%mask, <16 x i32*>zeroinitializer)
353 %mystruct = type { i16, i16, [1 x i8*] }
355 declare <16 x %mystruct*> @llvm.masked.load.v16p0mystruct(<16 x %mystruct*>*, i32, <16 x i1>, <16 x %mystruct*>)
357 ; AVX512-LABEL: test24
358 ; AVX512: vmovdqu64 (%rdi), %zmm0 {%k1} {z}
359 ; AVX512: kshiftrw $8, %k1, %k1
360 ; AVX512: vmovdqu64 64(%rdi), %zmm1 {%k1} {z}
362 define <16 x %mystruct*> @test24(<16 x i1> %mask, <16 x %mystruct*>* %addr) {
363 %res = call <16 x %mystruct*> @llvm.masked.load.v16p0mystruct(<16 x %mystruct*>* %addr, i32 4, <16 x i1>%mask, <16 x %mystruct*>zeroinitializer)
364 ret <16 x %mystruct*> %res