1 // Bitcasts between 512-bit vector types. Return the original type since
2 // no instruction is needed for the conversion
3 let Predicates = [HasAVX512] in {
4 def : Pat<(v8f64 (bitconvert (v16f32 VR512:$src))), (v8f64 VR512:$src)>;
5 def : Pat<(v8f64 (bitconvert (v16i32 VR512:$src))), (v8f64 VR512:$src)>;
6 def : Pat<(v8f64 (bitconvert (v8i64 VR512:$src))), (v8f64 VR512:$src)>;
7 def : Pat<(v16f32 (bitconvert (v16i32 VR512:$src))), (v16f32 VR512:$src)>;
8 def : Pat<(v16f32 (bitconvert (v8i64 VR512:$src))), (v16f32 VR512:$src)>;
9 def : Pat<(v16f32 (bitconvert (v8f64 VR512:$src))), (v16f32 VR512:$src)>;
10 def : Pat<(v8i64 (bitconvert (v16f32 VR512:$src))), (v8i64 VR512:$src)>;
11 def : Pat<(v8i64 (bitconvert (v16i32 VR512:$src))), (v8i64 VR512:$src)>;
12 def : Pat<(v8i64 (bitconvert (v8f64 VR512:$src))), (v8i64 VR512:$src)>;
13 def : Pat<(v16i32 (bitconvert (v16f32 VR512:$src))), (v16i32 VR512:$src)>;
14 def : Pat<(v16i32 (bitconvert (v8i64 VR512:$src))), (v16i32 VR512:$src)>;
15 def : Pat<(v16i32 (bitconvert (v8f64 VR512:$src))), (v16i32 VR512:$src)>;
16 def : Pat<(v8f64 (bitconvert (v8i64 VR512:$src))), (v8f64 VR512:$src)>;
18 def : Pat<(v2i64 (bitconvert (v4i32 VR128X:$src))), (v2i64 VR128X:$src)>;
19 def : Pat<(v2i64 (bitconvert (v8i16 VR128X:$src))), (v2i64 VR128X:$src)>;
20 def : Pat<(v2i64 (bitconvert (v16i8 VR128X:$src))), (v2i64 VR128X:$src)>;
21 def : Pat<(v2i64 (bitconvert (v2f64 VR128X:$src))), (v2i64 VR128X:$src)>;
22 def : Pat<(v2i64 (bitconvert (v4f32 VR128X:$src))), (v2i64 VR128X:$src)>;
23 def : Pat<(v4i32 (bitconvert (v2i64 VR128X:$src))), (v4i32 VR128X:$src)>;
24 def : Pat<(v4i32 (bitconvert (v8i16 VR128X:$src))), (v4i32 VR128X:$src)>;
25 def : Pat<(v4i32 (bitconvert (v16i8 VR128X:$src))), (v4i32 VR128X:$src)>;
26 def : Pat<(v4i32 (bitconvert (v2f64 VR128X:$src))), (v4i32 VR128X:$src)>;
27 def : Pat<(v4i32 (bitconvert (v4f32 VR128X:$src))), (v4i32 VR128X:$src)>;
28 def : Pat<(v8i16 (bitconvert (v2i64 VR128X:$src))), (v8i16 VR128X:$src)>;
29 def : Pat<(v8i16 (bitconvert (v4i32 VR128X:$src))), (v8i16 VR128X:$src)>;
30 def : Pat<(v8i16 (bitconvert (v16i8 VR128X:$src))), (v8i16 VR128X:$src)>;
31 def : Pat<(v8i16 (bitconvert (v2f64 VR128X:$src))), (v8i16 VR128X:$src)>;
32 def : Pat<(v8i16 (bitconvert (v4f32 VR128X:$src))), (v8i16 VR128X:$src)>;
33 def : Pat<(v16i8 (bitconvert (v2i64 VR128X:$src))), (v16i8 VR128X:$src)>;
34 def : Pat<(v16i8 (bitconvert (v4i32 VR128X:$src))), (v16i8 VR128X:$src)>;
35 def : Pat<(v16i8 (bitconvert (v8i16 VR128X:$src))), (v16i8 VR128X:$src)>;
36 def : Pat<(v16i8 (bitconvert (v2f64 VR128X:$src))), (v16i8 VR128X:$src)>;
37 def : Pat<(v16i8 (bitconvert (v4f32 VR128X:$src))), (v16i8 VR128X:$src)>;
38 def : Pat<(v4f32 (bitconvert (v2i64 VR128X:$src))), (v4f32 VR128X:$src)>;
39 def : Pat<(v4f32 (bitconvert (v4i32 VR128X:$src))), (v4f32 VR128X:$src)>;
40 def : Pat<(v4f32 (bitconvert (v8i16 VR128X:$src))), (v4f32 VR128X:$src)>;
41 def : Pat<(v4f32 (bitconvert (v16i8 VR128X:$src))), (v4f32 VR128X:$src)>;
42 def : Pat<(v4f32 (bitconvert (v2f64 VR128X:$src))), (v4f32 VR128X:$src)>;
43 def : Pat<(v2f64 (bitconvert (v2i64 VR128X:$src))), (v2f64 VR128X:$src)>;
44 def : Pat<(v2f64 (bitconvert (v4i32 VR128X:$src))), (v2f64 VR128X:$src)>;
45 def : Pat<(v2f64 (bitconvert (v8i16 VR128X:$src))), (v2f64 VR128X:$src)>;
46 def : Pat<(v2f64 (bitconvert (v16i8 VR128X:$src))), (v2f64 VR128X:$src)>;
47 def : Pat<(v2f64 (bitconvert (v4f32 VR128X:$src))), (v2f64 VR128X:$src)>;
49 // Bitcasts between 256-bit vector types. Return the original type since
50 // no instruction is needed for the conversion
51 def : Pat<(v4f64 (bitconvert (v8f32 VR256X:$src))), (v4f64 VR256X:$src)>;
52 def : Pat<(v4f64 (bitconvert (v8i32 VR256X:$src))), (v4f64 VR256X:$src)>;
53 def : Pat<(v4f64 (bitconvert (v4i64 VR256X:$src))), (v4f64 VR256X:$src)>;
54 def : Pat<(v4f64 (bitconvert (v16i16 VR256X:$src))), (v4f64 VR256X:$src)>;
55 def : Pat<(v4f64 (bitconvert (v32i8 VR256X:$src))), (v4f64 VR256X:$src)>;
56 def : Pat<(v8f32 (bitconvert (v8i32 VR256X:$src))), (v8f32 VR256X:$src)>;
57 def : Pat<(v8f32 (bitconvert (v4i64 VR256X:$src))), (v8f32 VR256X:$src)>;
58 def : Pat<(v8f32 (bitconvert (v4f64 VR256X:$src))), (v8f32 VR256X:$src)>;
59 def : Pat<(v8f32 (bitconvert (v32i8 VR256X:$src))), (v8f32 VR256X:$src)>;
60 def : Pat<(v8f32 (bitconvert (v16i16 VR256X:$src))), (v8f32 VR256X:$src)>;
61 def : Pat<(v4i64 (bitconvert (v8f32 VR256X:$src))), (v4i64 VR256X:$src)>;
62 def : Pat<(v4i64 (bitconvert (v8i32 VR256X:$src))), (v4i64 VR256X:$src)>;
63 def : Pat<(v4i64 (bitconvert (v4f64 VR256X:$src))), (v4i64 VR256X:$src)>;
64 def : Pat<(v4i64 (bitconvert (v32i8 VR256X:$src))), (v4i64 VR256X:$src)>;
65 def : Pat<(v4i64 (bitconvert (v16i16 VR256X:$src))), (v4i64 VR256X:$src)>;
66 def : Pat<(v32i8 (bitconvert (v4f64 VR256X:$src))), (v32i8 VR256X:$src)>;
67 def : Pat<(v32i8 (bitconvert (v4i64 VR256X:$src))), (v32i8 VR256X:$src)>;
68 def : Pat<(v32i8 (bitconvert (v8f32 VR256X:$src))), (v32i8 VR256X:$src)>;
69 def : Pat<(v32i8 (bitconvert (v8i32 VR256X:$src))), (v32i8 VR256X:$src)>;
70 def : Pat<(v32i8 (bitconvert (v16i16 VR256X:$src))), (v32i8 VR256X:$src)>;
71 def : Pat<(v8i32 (bitconvert (v32i8 VR256X:$src))), (v8i32 VR256X:$src)>;
72 def : Pat<(v8i32 (bitconvert (v16i16 VR256X:$src))), (v8i32 VR256X:$src)>;
73 def : Pat<(v8i32 (bitconvert (v8f32 VR256X:$src))), (v8i32 VR256X:$src)>;
74 def : Pat<(v8i32 (bitconvert (v4i64 VR256X:$src))), (v8i32 VR256X:$src)>;
75 def : Pat<(v8i32 (bitconvert (v4f64 VR256X:$src))), (v8i32 VR256X:$src)>;
76 def : Pat<(v16i16 (bitconvert (v8f32 VR256X:$src))), (v16i16 VR256X:$src)>;
77 def : Pat<(v16i16 (bitconvert (v8i32 VR256X:$src))), (v16i16 VR256X:$src)>;
78 def : Pat<(v16i16 (bitconvert (v4i64 VR256X:$src))), (v16i16 VR256X:$src)>;
79 def : Pat<(v16i16 (bitconvert (v4f64 VR256X:$src))), (v16i16 VR256X:$src)>;
80 def : Pat<(v16i16 (bitconvert (v32i8 VR256X:$src))), (v16i16 VR256X:$src)>;
83 //===----------------------------------------------------------------------===//
84 // AVX-512 - VECTOR INSERT
87 let neverHasSideEffects = 1, ExeDomain = SSEPackedSingle in {
88 def VINSERTF32x4rr : AVX512AIi8<0x18, MRMSrcReg, (outs VR512:$dst),
89 (ins VR512:$src1, VR128X:$src2, i8imm:$src3),
90 "vinsertf32x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
91 []>, EVEX_4V, EVEX_V512;
93 def VINSERTF32x4rm : AVX512AIi8<0x18, MRMSrcMem, (outs VR512:$dst),
94 (ins VR512:$src1, f128mem:$src2, i8imm:$src3),
95 "vinsertf32x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
96 []>, EVEX_4V, EVEX_V512, EVEX_CD8<32, CD8VT4>;
100 let neverHasSideEffects = 1, ExeDomain = SSEPackedDouble in {
101 def VINSERTF64x4rr : AVX512AIi8<0x1a, MRMSrcReg, (outs VR512:$dst),
102 (ins VR512:$src1, VR256X:$src2, i8imm:$src3),
103 "vinsertf64x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
104 []>, EVEX_4V, EVEX_V512, VEX_W;
106 def VINSERTF64x4rm : AVX512AIi8<0x1a, MRMSrcMem, (outs VR512:$dst),
107 (ins VR512:$src1, i256mem:$src2, i8imm:$src3),
108 "vinsertf64x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
109 []>, EVEX_4V, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT4>;
111 // -- 32x4 integer form --
112 let neverHasSideEffects = 1 in {
113 def VINSERTI32x4rr : AVX512AIi8<0x38, MRMSrcReg, (outs VR512:$dst),
114 (ins VR512:$src1, VR128X:$src2, i8imm:$src3),
115 "vinserti32x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
116 []>, EVEX_4V, EVEX_V512;
118 def VINSERTI32x4rm : AVX512AIi8<0x38, MRMSrcMem, (outs VR512:$dst),
119 (ins VR512:$src1, i128mem:$src2, i8imm:$src3),
120 "vinserti32x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
121 []>, EVEX_4V, EVEX_V512, EVEX_CD8<32, CD8VT4>;
125 let neverHasSideEffects = 1 in {
127 def VINSERTI64x4rr : AVX512AIi8<0x3a, MRMSrcReg, (outs VR512:$dst),
128 (ins VR512:$src1, VR256X:$src2, i8imm:$src3),
129 "vinserti64x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
130 []>, EVEX_4V, EVEX_V512, VEX_W;
132 def VINSERTI64x4rm : AVX512AIi8<0x3a, MRMSrcMem, (outs VR512:$dst),
133 (ins VR512:$src1, i256mem:$src2, i8imm:$src3),
134 "vinserti64x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
135 []>, EVEX_4V, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT4>;
138 def : Pat<(vinsert128_insert:$ins (v16f32 VR512:$src1), (v4f32 VR128X:$src2),
139 (iPTR imm)), (VINSERTF32x4rr VR512:$src1, VR128X:$src2,
140 (INSERT_get_vinsert128_imm VR512:$ins))>;
141 def : Pat<(vinsert128_insert:$ins (v8f64 VR512:$src1), (v2f64 VR128X:$src2),
142 (iPTR imm)), (VINSERTF32x4rr VR512:$src1, VR128X:$src2,
143 (INSERT_get_vinsert128_imm VR512:$ins))>;
144 def : Pat<(vinsert128_insert:$ins (v8i64 VR512:$src1), (v2i64 VR128X:$src2),
145 (iPTR imm)), (VINSERTI32x4rr VR512:$src1, VR128X:$src2,
146 (INSERT_get_vinsert128_imm VR512:$ins))>;
147 def : Pat<(vinsert128_insert:$ins (v16i32 VR512:$src1), (v4i32 VR128X:$src2),
148 (iPTR imm)), (VINSERTI32x4rr VR512:$src1, VR128X:$src2,
149 (INSERT_get_vinsert128_imm VR512:$ins))>;
151 def : Pat<(vinsert128_insert:$ins (v16f32 VR512:$src1), (loadv4f32 addr:$src2),
152 (iPTR imm)), (VINSERTF32x4rm VR512:$src1, addr:$src2,
153 (INSERT_get_vinsert128_imm VR512:$ins))>;
154 def : Pat<(vinsert128_insert:$ins (v16i32 VR512:$src1),
155 (bc_v4i32 (loadv2i64 addr:$src2)),
156 (iPTR imm)), (VINSERTI32x4rm VR512:$src1, addr:$src2,
157 (INSERT_get_vinsert128_imm VR512:$ins))>;
158 def : Pat<(vinsert128_insert:$ins (v8f64 VR512:$src1), (loadv2f64 addr:$src2),
159 (iPTR imm)), (VINSERTF32x4rm VR512:$src1, addr:$src2,
160 (INSERT_get_vinsert128_imm VR512:$ins))>;
161 def : Pat<(vinsert128_insert:$ins (v8i64 VR512:$src1), (loadv2i64 addr:$src2),
162 (iPTR imm)), (VINSERTI32x4rm VR512:$src1, addr:$src2,
163 (INSERT_get_vinsert128_imm VR512:$ins))>;
165 def : Pat<(vinsert256_insert:$ins (v16f32 VR512:$src1), (v8f32 VR256X:$src2),
166 (iPTR imm)), (VINSERTF64x4rr VR512:$src1, VR256X:$src2,
167 (INSERT_get_vinsert256_imm VR512:$ins))>;
168 def : Pat<(vinsert256_insert:$ins (v8f64 VR512:$src1), (v4f64 VR256X:$src2),
169 (iPTR imm)), (VINSERTF64x4rr VR512:$src1, VR256X:$src2,
170 (INSERT_get_vinsert256_imm VR512:$ins))>;
171 def : Pat<(vinsert128_insert:$ins (v8i64 VR512:$src1), (v4i64 VR256X:$src2),
172 (iPTR imm)), (VINSERTI64x4rr VR512:$src1, VR256X:$src2,
173 (INSERT_get_vinsert256_imm VR512:$ins))>;
174 def : Pat<(vinsert128_insert:$ins (v16i32 VR512:$src1), (v8i32 VR256X:$src2),
175 (iPTR imm)), (VINSERTI64x4rr VR512:$src1, VR256X:$src2,
176 (INSERT_get_vinsert256_imm VR512:$ins))>;
178 def : Pat<(vinsert256_insert:$ins (v16f32 VR512:$src1), (loadv8f32 addr:$src2),
179 (iPTR imm)), (VINSERTF64x4rm VR512:$src1, addr:$src2,
180 (INSERT_get_vinsert256_imm VR512:$ins))>;
181 def : Pat<(vinsert256_insert:$ins (v8f64 VR512:$src1), (loadv4f64 addr:$src2),
182 (iPTR imm)), (VINSERTF64x4rm VR512:$src1, addr:$src2,
183 (INSERT_get_vinsert256_imm VR512:$ins))>;
184 def : Pat<(vinsert256_insert:$ins (v8i64 VR512:$src1), (loadv4i64 addr:$src2),
185 (iPTR imm)), (VINSERTI64x4rm VR512:$src1, addr:$src2,
186 (INSERT_get_vinsert256_imm VR512:$ins))>;
187 def : Pat<(vinsert256_insert:$ins (v16i32 VR512:$src1),
188 (bc_v8i32 (loadv4i64 addr:$src2)),
189 (iPTR imm)), (VINSERTI64x4rm VR512:$src1, addr:$src2,
190 (INSERT_get_vinsert256_imm VR512:$ins))>;
192 // vinsertps - insert f32 to XMM
193 def VINSERTPSzrr : AVX512AIi8<0x21, MRMSrcReg, (outs VR128X:$dst),
194 (ins VR128X:$src1, VR128X:$src2, u32u8imm:$src3),
195 "vinsertps{z}\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
196 [(set VR128X:$dst, (X86insrtps VR128X:$src1, VR128X:$src2, imm:$src3))]>,
198 def VINSERTPSzrm: AVX512AIi8<0x21, MRMSrcMem, (outs VR128X:$dst),
199 (ins VR128X:$src1, f32mem:$src2, u32u8imm:$src3),
200 "vinsertps{z}\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
201 [(set VR128X:$dst, (X86insrtps VR128X:$src1,
202 (v4f32 (scalar_to_vector (loadf32 addr:$src2))),
203 imm:$src3))]>, EVEX_4V, EVEX_CD8<32, CD8VT1>;
205 //===----------------------------------------------------------------------===//
206 // AVX-512 VECTOR EXTRACT
208 let neverHasSideEffects = 1, ExeDomain = SSEPackedSingle in {
210 def VEXTRACTF32x4rr : AVX512AIi8<0x19, MRMDestReg, (outs VR128X:$dst),
211 (ins VR512:$src1, i8imm:$src2),
212 "vextractf32x4\t{$src2, $src1, $dst|$dst, $src1, $src2}",
213 []>, EVEX, EVEX_V512;
214 def VEXTRACTF32x4mr : AVX512AIi8<0x19, MRMDestMem, (outs),
215 (ins f128mem:$dst, VR512:$src1, i8imm:$src2),
216 "vextractf32x4\t{$src2, $src1, $dst|$dst, $src1, $src2}",
217 []>, EVEX, EVEX_V512, EVEX_CD8<32, CD8VT4>;
220 def VEXTRACTF64x4rr : AVX512AIi8<0x1b, MRMDestReg, (outs VR256X:$dst),
221 (ins VR512:$src1, i8imm:$src2),
222 "vextractf64x4\t{$src2, $src1, $dst|$dst, $src1, $src2}",
223 []>, EVEX, EVEX_V512, VEX_W;
225 def VEXTRACTF64x4mr : AVX512AIi8<0x1b, MRMDestMem, (outs),
226 (ins f256mem:$dst, VR512:$src1, i8imm:$src2),
227 "vextractf64x4\t{$src2, $src1, $dst|$dst, $src1, $src2}",
228 []>, EVEX, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT4>;
231 let neverHasSideEffects = 1 in {
233 def VEXTRACTI32x4rr : AVX512AIi8<0x39, MRMDestReg, (outs VR128X:$dst),
234 (ins VR512:$src1, i8imm:$src2),
235 "vextracti32x4\t{$src2, $src1, $dst|$dst, $src1, $src2}",
236 []>, EVEX, EVEX_V512;
237 def VEXTRACTI32x4mr : AVX512AIi8<0x39, MRMDestMem, (outs),
238 (ins i128mem:$dst, VR512:$src1, i8imm:$src2),
239 "vextracti32x4\t{$src2, $src1, $dst|$dst, $src1, $src2}",
240 []>, EVEX, EVEX_V512, EVEX_CD8<32, CD8VT4>;
243 def VEXTRACTI64x4rr : AVX512AIi8<0x3b, MRMDestReg, (outs VR256X:$dst),
244 (ins VR512:$src1, i8imm:$src2),
245 "vextracti64x4\t{$src2, $src1, $dst|$dst, $src1, $src2}",
246 []>, EVEX, EVEX_V512, VEX_W;
248 def VEXTRACTI64x4mr : AVX512AIi8<0x3b, MRMDestMem, (outs),
249 (ins i256mem:$dst, VR512:$src1, i8imm:$src2),
250 "vextracti64x4\t{$src2, $src1, $dst|$dst, $src1, $src2}",
251 []>, EVEX, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT4>;
254 def : Pat<(vextract128_extract:$ext (v16f32 VR512:$src1), (iPTR imm)),
255 (v4f32 (VEXTRACTF32x4rr VR512:$src1,
256 (EXTRACT_get_vextract128_imm VR128X:$ext)))>;
258 def : Pat<(vextract128_extract:$ext VR512:$src1, (iPTR imm)),
259 (v4i32 (VEXTRACTF32x4rr VR512:$src1,
260 (EXTRACT_get_vextract128_imm VR128X:$ext)))>;
262 def : Pat<(vextract128_extract:$ext (v8f64 VR512:$src1), (iPTR imm)),
263 (v2f64 (VEXTRACTF32x4rr VR512:$src1,
264 (EXTRACT_get_vextract128_imm VR128X:$ext)))>;
266 def : Pat<(vextract128_extract:$ext (v8i64 VR512:$src1), (iPTR imm)),
267 (v2i64 (VEXTRACTI32x4rr VR512:$src1,
268 (EXTRACT_get_vextract128_imm VR128X:$ext)))>;
271 def : Pat<(vextract256_extract:$ext (v16f32 VR512:$src1), (iPTR imm)),
272 (v8f32 (VEXTRACTF64x4rr VR512:$src1,
273 (EXTRACT_get_vextract256_imm VR256X:$ext)))>;
275 def : Pat<(vextract256_extract:$ext (v16i32 VR512:$src1), (iPTR imm)),
276 (v8i32 (VEXTRACTI64x4rr VR512:$src1,
277 (EXTRACT_get_vextract256_imm VR256X:$ext)))>;
279 def : Pat<(vextract256_extract:$ext (v8f64 VR512:$src1), (iPTR imm)),
280 (v4f64 (VEXTRACTF64x4rr VR512:$src1,
281 (EXTRACT_get_vextract256_imm VR256X:$ext)))>;
283 def : Pat<(vextract256_extract:$ext (v8i64 VR512:$src1), (iPTR imm)),
284 (v4i64 (VEXTRACTI64x4rr VR512:$src1,
285 (EXTRACT_get_vextract256_imm VR256X:$ext)))>;
287 // A 256-bit subvector extract from the first 512-bit vector position
288 // is a subregister copy that needs no instruction.
289 def : Pat<(v8i32 (extract_subvector (v16i32 VR512:$src), (iPTR 0))),
290 (v8i32 (EXTRACT_SUBREG (v16i32 VR512:$src), sub_ymm))>;
291 def : Pat<(v8f32 (extract_subvector (v16f32 VR512:$src), (iPTR 0))),
292 (v8f32 (EXTRACT_SUBREG (v16f32 VR512:$src), sub_ymm))>;
293 def : Pat<(v4i64 (extract_subvector (v8i64 VR512:$src), (iPTR 0))),
294 (v4i64 (EXTRACT_SUBREG (v8i64 VR512:$src), sub_ymm))>;
295 def : Pat<(v4f64 (extract_subvector (v8f64 VR512:$src), (iPTR 0))),
296 (v4f64 (EXTRACT_SUBREG (v8f64 VR512:$src), sub_ymm))>;
299 def : Pat<(v4i32 (extract_subvector (v16i32 VR512:$src), (iPTR 0))),
300 (v4i32 (EXTRACT_SUBREG (v16i32 VR512:$src), sub_xmm))>;
301 def : Pat<(v2i64 (extract_subvector (v8i64 VR512:$src), (iPTR 0))),
302 (v2i64 (EXTRACT_SUBREG (v8i64 VR512:$src), sub_xmm))>;
303 def : Pat<(v2f64 (extract_subvector (v8f64 VR512:$src), (iPTR 0))),
304 (v2f64 (EXTRACT_SUBREG (v8f64 VR512:$src), sub_xmm))>;
305 def : Pat<(v4f32 (extract_subvector (v16f32 VR512:$src), (iPTR 0))),
306 (v4f32 (EXTRACT_SUBREG (v16f32 VR512:$src), sub_xmm))>;
309 // A 128-bit subvector insert to the first 512-bit vector position
310 // is a subregister copy that needs no instruction.
311 def : Pat<(insert_subvector undef, (v2i64 VR128X:$src), (iPTR 0)),
312 (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)),
313 (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
315 def : Pat<(insert_subvector undef, (v2f64 VR128X:$src), (iPTR 0)),
316 (INSERT_SUBREG (v8f64 (IMPLICIT_DEF)),
317 (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
319 def : Pat<(insert_subvector undef, (v4i32 VR128X:$src), (iPTR 0)),
320 (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)),
321 (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
323 def : Pat<(insert_subvector undef, (v4f32 VR128X:$src), (iPTR 0)),
324 (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)),
325 (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
328 def : Pat<(insert_subvector undef, (v4i64 VR256X:$src), (iPTR 0)),
329 (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
330 def : Pat<(insert_subvector undef, (v4f64 VR256X:$src), (iPTR 0)),
331 (INSERT_SUBREG (v8f64 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
332 def : Pat<(insert_subvector undef, (v8i32 VR256X:$src), (iPTR 0)),
333 (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
334 def : Pat<(insert_subvector undef, (v8f32 VR256X:$src), (iPTR 0)),
335 (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
337 // vextractps - extract 32 bits from XMM
338 def VEXTRACTPSzrr : AVX512AIi8<0x17, MRMDestReg, (outs GR32:$dst),
339 (ins VR128X:$src1, u32u8imm:$src2),
340 "vextractps{z}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
341 [(set GR32:$dst, (extractelt (bc_v4i32 (v4f32 VR128X:$src1)), imm:$src2))]>,
344 def VEXTRACTPSzmr : AVX512AIi8<0x17, MRMDestMem, (outs),
345 (ins f32mem:$dst, VR128X:$src1, u32u8imm:$src2),
346 "vextractps{z}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
347 [(store (extractelt (bc_v4i32 (v4f32 VR128X:$src1)), imm:$src2),
350 //===---------------------------------------------------------------------===//
353 multiclass avx512_fp_broadcast<bits<8> opc, string OpcodeStr,
354 RegisterClass DestRC,
355 RegisterClass SrcRC, X86MemOperand x86memop> {
356 def rr : AVX5128I<opc, MRMSrcReg, (outs DestRC:$dst), (ins SrcRC:$src),
357 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
359 def rm : AVX5128I<opc, MRMSrcMem, (outs DestRC:$dst), (ins x86memop:$src),
360 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),[]>, EVEX;
362 let ExeDomain = SSEPackedSingle in {
363 defm VBROADCASTSSZ : avx512_fp_broadcast<0x18, "vbroadcastss{z}", VR512,
365 EVEX_V512, EVEX_CD8<32, CD8VT1>;
368 let ExeDomain = SSEPackedDouble in {
369 defm VBROADCASTSDZ : avx512_fp_broadcast<0x19, "vbroadcastsd{z}", VR512,
371 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
374 def : Pat<(v16f32 (X86VBroadcast (loadf32 addr:$src))),
375 (VBROADCASTSSZrm addr:$src)>;
376 def : Pat<(v8f64 (X86VBroadcast (loadf64 addr:$src))),
377 (VBROADCASTSDZrm addr:$src)>;
379 multiclass avx512_int_broadcast_reg<bits<8> opc, string OpcodeStr,
380 RegisterClass SrcRC, RegisterClass KRC> {
381 def Zrr : AVX5128I<opc, MRMSrcReg, (outs VR512:$dst), (ins SrcRC:$src),
382 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
383 []>, EVEX, EVEX_V512;
384 def Zkrr : AVX5128I<opc, MRMSrcReg, (outs VR512:$dst),
385 (ins KRC:$mask, SrcRC:$src),
386 !strconcat(OpcodeStr,
387 "\t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}"),
388 []>, EVEX, EVEX_V512, EVEX_KZ;
391 defm VPBROADCASTDr : avx512_int_broadcast_reg<0x7C, "vpbroadcastd", GR32, VK16WM>;
392 defm VPBROADCASTQr : avx512_int_broadcast_reg<0x7C, "vpbroadcastq", GR64, VK8WM>,
395 def : Pat <(v16i32 (X86vzext VK16WM:$mask)),
396 (VPBROADCASTDrZkrr VK16WM:$mask, (i32 (MOV32ri 0x1)))>;
398 def : Pat <(v8i64 (X86vzext VK8WM:$mask)),
399 (VPBROADCASTQrZkrr VK8WM:$mask, (i64 (MOV64ri 0x1)))>;
401 def : Pat<(v16i32 (X86VBroadcast (i32 GR32:$src))),
402 (VPBROADCASTDrZrr GR32:$src)>;
403 def : Pat<(v8i64 (X86VBroadcast (i64 GR64:$src))),
404 (VPBROADCASTQrZrr GR64:$src)>;
406 multiclass avx512_int_broadcast_rm<bits<8> opc, string OpcodeStr,
407 X86MemOperand x86memop, PatFrag ld_frag,
408 RegisterClass DstRC, ValueType OpVT, ValueType SrcVT,
410 def rr : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst), (ins VR128X:$src),
411 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
413 (OpVT (X86VBroadcast (SrcVT VR128X:$src))))]>, EVEX;
414 def krr : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst), (ins KRC:$mask,
416 !strconcat(OpcodeStr,
417 "\t{$src, ${dst}{${mask}}{z}|${dst}{${mask}}{z}, $src}"),
419 (OpVT (X86VBroadcastm KRC:$mask, (SrcVT VR128X:$src))))]>,
421 def rm : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
422 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
424 (OpVT (X86VBroadcast (ld_frag addr:$src))))]>, EVEX;
425 def krm : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst), (ins KRC:$mask,
427 !strconcat(OpcodeStr,
428 "\t{$src, ${dst}{${mask}}{z}|${dst}{${mask}}{z}, $src}"),
429 [(set DstRC:$dst, (OpVT (X86VBroadcastm KRC:$mask,
430 (ld_frag addr:$src))))]>, EVEX, EVEX_KZ;
433 defm VPBROADCASTDZ : avx512_int_broadcast_rm<0x58, "vpbroadcastd", i32mem,
434 loadi32, VR512, v16i32, v4i32, VK16WM>,
435 EVEX_V512, EVEX_CD8<32, CD8VT1>;
436 defm VPBROADCASTQZ : avx512_int_broadcast_rm<0x59, "vpbroadcastq", i64mem,
437 loadi64, VR512, v8i64, v2i64, VK8WM>, EVEX_V512, VEX_W,
438 EVEX_CD8<64, CD8VT1>;
440 def : Pat<(v16f32 (X86VBroadcast (v4f32 VR128X:$src))),
441 (VBROADCASTSSZrr VR128X:$src)>;
442 def : Pat<(v8f64 (X86VBroadcast (v2f64 VR128X:$src))),
443 (VBROADCASTSDZrr VR128X:$src)>;
445 // Provide fallback in case the load node that is used in the patterns above
446 // is used by additional users, which prevents the pattern selection.
447 def : Pat<(v16f32 (X86VBroadcast FR32X:$src)),
448 (VBROADCASTSSZrr (COPY_TO_REGCLASS FR32X:$src, VR128X))>;
449 def : Pat<(v8f64 (X86VBroadcast FR64X:$src)),
450 (VBROADCASTSDZrr (COPY_TO_REGCLASS FR64X:$src, VR128X))>;
453 let Predicates = [HasAVX512] in {
454 def : Pat<(v8i32 (X86VBroadcastm (v8i1 VK8WM:$mask), (loadi32 addr:$src))),
456 (v16i32 (VPBROADCASTDZkrm (COPY_TO_REGCLASS VK8WM:$mask, VK16WM),
457 addr:$src)), sub_ymm)>;
459 //===----------------------------------------------------------------------===//
460 // AVX-512 BROADCAST MASK TO VECTOR REGISTER
463 multiclass avx512_mask_broadcast<bits<8> opc, string OpcodeStr,
464 RegisterClass DstRC, RegisterClass KRC,
465 ValueType OpVT, ValueType SrcVT> {
466 def rr : AVX512XS8I<opc, MRMDestReg, (outs DstRC:$dst), (ins KRC:$src),
467 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
471 defm VPBROADCASTMW2D : avx512_mask_broadcast<0x3A, "vpbroadcastmw2d", VR512,
472 VK16, v16i32, v16i1>, EVEX_V512;
473 defm VPBROADCASTMB2Q : avx512_mask_broadcast<0x2A, "vpbroadcastmb2q", VR512,
474 VK8, v8i64, v8i1>, EVEX_V512, VEX_W;
476 //===----------------------------------------------------------------------===//
479 // -- immediate form --
480 multiclass avx512_perm_imm<bits<8> opc, string OpcodeStr, RegisterClass RC,
481 SDNode OpNode, PatFrag mem_frag,
482 X86MemOperand x86memop, ValueType OpVT> {
483 def ri : AVX512AIi8<opc, MRMSrcReg, (outs RC:$dst),
484 (ins RC:$src1, i8imm:$src2),
485 !strconcat(OpcodeStr,
486 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
488 (OpVT (OpNode RC:$src1, (i8 imm:$src2))))]>,
490 def mi : AVX512AIi8<opc, MRMSrcMem, (outs RC:$dst),
491 (ins x86memop:$src1, i8imm:$src2),
492 !strconcat(OpcodeStr,
493 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
495 (OpVT (OpNode (mem_frag addr:$src1),
496 (i8 imm:$src2))))]>, EVEX;
499 defm VPERMQZ : avx512_perm_imm<0x00, "vpermq", VR512, X86VPermi, memopv8i64,
500 i512mem, v8i64>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
501 let ExeDomain = SSEPackedDouble in
502 defm VPERMPDZ : avx512_perm_imm<0x01, "vpermpd", VR512, X86VPermi, memopv8f64,
503 f512mem, v8f64>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
505 // -- VPERM - register form --
506 multiclass avx512_perm<bits<8> opc, string OpcodeStr, RegisterClass RC,
507 PatFrag mem_frag, X86MemOperand x86memop, ValueType OpVT> {
509 def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
510 (ins RC:$src1, RC:$src2),
511 !strconcat(OpcodeStr,
512 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
514 (OpVT (X86VPermv RC:$src1, RC:$src2)))]>, EVEX_4V;
516 def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
517 (ins RC:$src1, x86memop:$src2),
518 !strconcat(OpcodeStr,
519 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
521 (OpVT (X86VPermv RC:$src1,
522 (bitconvert (mem_frag addr:$src2)))))]>, EVEX_4V;
525 defm VPERMDZ : avx512_perm<0x36, "vpermd", VR512, memopv8i64, i512mem,
526 v16i32>, EVEX_V512, EVEX_CD8<32, CD8VF>;
527 defm VPERMQZ : avx512_perm<0x36, "vpermq", VR512, memopv8i64, i512mem,
528 v8i64>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
529 let ExeDomain = SSEPackedSingle in
530 defm VPERMPSZ : avx512_perm<0x16, "vpermps", VR512, memopv8f64, f512mem,
531 v16f32>, EVEX_V512, EVEX_CD8<32, CD8VF>;
532 let ExeDomain = SSEPackedDouble in
533 defm VPERMPDZ : avx512_perm<0x16, "vpermpd", VR512, memopv8f64, f512mem,
534 v8f64>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
536 // -- VPERM2I - 3 source operands form --
537 multiclass avx512_perm_3src<bits<8> opc, string OpcodeStr, RegisterClass RC,
538 PatFrag mem_frag, X86MemOperand x86memop,
540 let Constraints = "$src1 = $dst" in {
541 def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
542 (ins RC:$src1, RC:$src2, RC:$src3),
543 !strconcat(OpcodeStr,
544 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
546 (OpVT (X86VPermv3 RC:$src1, RC:$src2, RC:$src3)))]>,
549 def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
550 (ins RC:$src1, RC:$src2, x86memop:$src3),
551 !strconcat(OpcodeStr,
552 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
554 (OpVT (X86VPermv3 RC:$src1, RC:$src2,
555 (bitconvert (mem_frag addr:$src3)))))]>, EVEX_4V;
558 defm VPERMI2D : avx512_perm_3src<0x76, "vpermi2d", VR512, memopv8i64, i512mem,
559 v16i32>, EVEX_V512, EVEX_CD8<32, CD8VF>;
560 defm VPERMI2Q : avx512_perm_3src<0x76, "vpermi2q", VR512, memopv8i64, i512mem,
561 v8i64>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
562 defm VPERMI2PS : avx512_perm_3src<0x77, "vpermi2ps", VR512, memopv16f32, i512mem,
563 v16f32>, EVEX_V512, EVEX_CD8<32, CD8VF>;
564 defm VPERMI2PD : avx512_perm_3src<0x77, "vpermi2pd", VR512, memopv8f64, i512mem,
565 v8f64>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
567 //===----------------------------------------------------------------------===//
568 // AVX-512 - BLEND using mask
570 multiclass avx512_blendmask<bits<8> opc, string OpcodeStr,
571 RegisterClass KRC, RegisterClass RC,
572 X86MemOperand x86memop, PatFrag mem_frag,
573 SDNode OpNode, ValueType vt> {
574 def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
575 (ins KRC:$mask, RC:$src1, RC:$src2),
576 !strconcat(OpcodeStr,
577 "\t{$src2, $src1, ${dst}{${mask}}|${dst}{${mask}}, $src1, $src2}"),
578 [(set RC:$dst, (OpNode KRC:$mask, (vt RC:$src2),
579 (vt RC:$src1)))]>, EVEX_4V, EVEX_K;
581 def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
582 (ins KRC:$mask, RC:$src1, x86memop:$src2),
583 !strconcat(OpcodeStr,
584 "\t{$src2, $src1, $mask, $dst|$dst, $mask, $src1, $src2}"),
589 let ExeDomain = SSEPackedSingle in
590 defm VBLENDMPSZ : avx512_blendmask<0x65, "vblendmps", VK16WM, VR512, f512mem,
591 memopv16f32, vselect, v16f32>,
592 EVEX_CD8<32, CD8VF>, EVEX_V512;
593 let ExeDomain = SSEPackedDouble in
594 defm VBLENDMPDZ : avx512_blendmask<0x65, "vblendmpd", VK8WM, VR512, f512mem,
595 memopv8f64, vselect, v8f64>,
596 VEX_W, EVEX_CD8<64, CD8VF>, EVEX_V512;
598 defm VPBLENDMDZ : avx512_blendmask<0x64, "vpblendmd", VK16WM, VR512, f512mem,
599 memopv8i64, vselect, v16i32>,
600 EVEX_CD8<32, CD8VF>, EVEX_V512;
602 defm VPBLENDMQZ : avx512_blendmask<0x64, "vpblendmq", VK8WM, VR512, f512mem,
603 memopv8i64, vselect, v8i64>, VEX_W,
604 EVEX_CD8<64, CD8VF>, EVEX_V512;
607 let Predicates = [HasAVX512] in {
608 def : Pat<(v8f32 (vselect (v8i1 VK8WM:$mask), (v8f32 VR256X:$src1),
609 (v8f32 VR256X:$src2))),
611 (v16f32 (VBLENDMPSZrr (COPY_TO_REGCLASS VK8WM:$mask, VK16WM),
612 (v16f32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)),
613 (v16f32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)))), sub_ymm)>;
615 def : Pat<(v8i32 (vselect (v8i1 VK8WM:$mask), (v8i32 VR256X:$src1),
616 (v8i32 VR256X:$src2))),
618 (v16i32 (VPBLENDMDZrr (COPY_TO_REGCLASS VK8WM:$mask, VK16WM),
619 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)),
620 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)))), sub_ymm)>;
623 multiclass avx512_icmp_packed<bits<8> opc, string OpcodeStr, RegisterClass KRC,
624 RegisterClass RC, X86MemOperand x86memop, PatFrag memop_frag,
625 SDNode OpNode, ValueType vt> {
626 def rr : AVX512BI<opc, MRMSrcReg,
627 (outs KRC:$dst), (ins RC:$src1, RC:$src2),
628 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
629 [(set KRC:$dst, (OpNode (vt RC:$src1), (vt RC:$src2)))],
630 IIC_SSE_CMPP_RR>, EVEX_4V;
631 def rm : AVX512BI<opc, MRMSrcMem,
632 (outs KRC:$dst), (ins RC:$src1, x86memop:$src2),
633 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
634 [(set KRC:$dst, (OpNode (vt RC:$src1),
635 (bitconvert (memop_frag addr:$src2))))],
636 IIC_SSE_CMPP_RM>, EVEX_4V;
639 defm VPCMPEQDZ : avx512_icmp_packed<0x76, "vpcmpeqd", VK16, VR512, i512mem,
640 memopv8i64, X86pcmpeqm, v16i32>, EVEX_V512;
641 defm VPCMPEQQZ : avx512_icmp_packed<0x29, "vpcmpeqq", VK8, VR512, i512mem,
642 memopv8i64, X86pcmpeqm, v8i64>, T8, EVEX_V512, VEX_W;
644 defm VPCMPGTDZ : avx512_icmp_packed<0x66, "vpcmpgtd", VK16, VR512, i512mem,
645 memopv8i64, X86pcmpgtm, v16i32>, EVEX_V512;
646 defm VPCMPGTQZ : avx512_icmp_packed<0x37, "vpcmpgtq", VK8, VR512, i512mem,
647 memopv8i64, X86pcmpgtm, v8i64>, T8, EVEX_V512, VEX_W;
649 def : Pat<(v8i1 (X86pcmpgtm (v8i32 VR256X:$src1), (v8i32 VR256X:$src2))),
650 (COPY_TO_REGCLASS (VPCMPGTDZrr
651 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)),
652 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm))), VK8)>;
654 def : Pat<(v8i1 (X86pcmpeqm (v8i32 VR256X:$src1), (v8i32 VR256X:$src2))),
655 (COPY_TO_REGCLASS (VPCMPEQDZrr
656 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)),
657 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm))), VK8)>;
660 multiclass avx512_icmp_cc<bits<8> opc, RegisterClass KRC,
661 RegisterClass RC, X86MemOperand x86memop, PatFrag memop_frag,
662 SDNode OpNode, ValueType vt, Operand CC, string asm,
664 def rri : AVX512AIi8<opc, MRMSrcReg,
665 (outs KRC:$dst), (ins RC:$src1, RC:$src2, CC:$cc), asm,
666 [(set KRC:$dst, (OpNode (vt RC:$src1), (vt RC:$src2), imm:$cc))],
667 IIC_SSE_CMPP_RR>, EVEX_4V;
668 def rmi : AVX512AIi8<opc, MRMSrcMem,
669 (outs KRC:$dst), (ins RC:$src1, x86memop:$src2, CC:$cc), asm,
670 [(set KRC:$dst, (OpNode (vt RC:$src1),
671 (bitconvert (memop_frag addr:$src2)), imm:$cc))],
672 IIC_SSE_CMPP_RM>, EVEX_4V;
673 // Accept explicit immediate argument form instead of comparison code.
674 let neverHasSideEffects = 1 in {
675 def rri_alt : AVX512AIi8<opc, MRMSrcReg,
676 (outs RC:$dst), (ins RC:$src1, RC:$src2, i8imm:$cc),
677 asm_alt, [], IIC_SSE_CMPP_RR>, EVEX_4V;
678 def rmi_alt : AVX512AIi8<opc, MRMSrcMem,
679 (outs RC:$dst), (ins RC:$src1, x86memop:$src2, i8imm:$cc),
680 asm_alt, [], IIC_SSE_CMPP_RM>, EVEX_4V;
684 defm VPCMPDZ : avx512_icmp_cc<0x1F, VK16, VR512, i512mem, memopv8i64,
685 X86cmpm, v16i32, AVXCC,
686 "vpcmp${cc}d\t{$src2, $src1, $dst|$dst, $src1, $src2}",
687 "vpcmpd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}">,
688 EVEX_V512, EVEX_CD8<32, CD8VF>;
689 defm VPCMPUDZ : avx512_icmp_cc<0x1E, VK16, VR512, i512mem, memopv8i64,
690 X86cmpmu, v16i32, AVXCC,
691 "vpcmp${cc}ud\t{$src2, $src1, $dst|$dst, $src1, $src2}",
692 "vpcmpud\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}">,
693 EVEX_V512, EVEX_CD8<32, CD8VF>;
695 defm VPCMPQZ : avx512_icmp_cc<0x1F, VK8, VR512, i512mem, memopv8i64,
696 X86cmpm, v8i64, AVXCC,
697 "vpcmp${cc}q\t{$src2, $src1, $dst|$dst, $src1, $src2}",
698 "vpcmpq\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}">,
699 VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
700 defm VPCMPUQZ : avx512_icmp_cc<0x1E, VK8, VR512, i512mem, memopv8i64,
701 X86cmpmu, v8i64, AVXCC,
702 "vpcmp${cc}uq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
703 "vpcmpuq\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}">,
704 VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
706 // avx512_cmp_packed - sse 1 & 2 compare packed instructions
707 multiclass avx512_cmp_packed<RegisterClass KRC, RegisterClass RC,
708 X86MemOperand x86memop, Operand CC,
709 SDNode OpNode, ValueType vt, string asm,
710 string asm_alt, Domain d> {
711 def rri : AVX512PIi8<0xC2, MRMSrcReg,
712 (outs KRC:$dst), (ins RC:$src1, RC:$src2, CC:$cc), asm,
713 [(set KRC:$dst, (OpNode (vt RC:$src1), (vt RC:$src2), imm:$cc))], d>;
714 def rmi : AVX512PIi8<0xC2, MRMSrcMem,
715 (outs KRC:$dst), (ins RC:$src1, x86memop:$src2, CC:$cc), asm,
717 (OpNode (vt RC:$src1), (memop addr:$src2), imm:$cc))], d>;
719 // Accept explicit immediate argument form instead of comparison code.
720 let neverHasSideEffects = 1 in {
721 def rri_alt : PIi8<0xC2, MRMSrcReg,
722 (outs RC:$dst), (ins RC:$src1, RC:$src2, i8imm:$cc),
723 asm_alt, [], IIC_SSE_CMPP_RR, d>;
724 def rmi_alt : PIi8<0xC2, MRMSrcMem,
725 (outs RC:$dst), (ins RC:$src1, x86memop:$src2, i8imm:$cc),
726 asm_alt, [], IIC_SSE_CMPP_RM, d>;
730 defm VCMPPSZ : avx512_cmp_packed<VK16, VR512, f512mem, AVXCC, X86cmpm, v16f32,
731 "vcmp${cc}ps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
732 "vcmpps\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
733 SSEPackedSingle>, TB, EVEX_4V, EVEX_V512, EVEX_CD8<32, CD8VF>;
734 defm VCMPPDZ : avx512_cmp_packed<VK8, VR512, f512mem, AVXCC, X86cmpm, v8f64,
735 "vcmp${cc}pd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
736 "vcmppd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
737 SSEPackedDouble>, TB, OpSize, EVEX_4V, VEX_W, EVEX_V512,
740 def : Pat<(v8i1 (X86cmpm (v8f32 VR256X:$src1), (v8f32 VR256X:$src2), imm:$cc)),
741 (COPY_TO_REGCLASS (VCMPPSZrri
742 (v16f32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)),
743 (v16f32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)),
745 def : Pat<(v8i1 (X86cmpm (v8i32 VR256X:$src1), (v8i32 VR256X:$src2), imm:$cc)),
746 (COPY_TO_REGCLASS (VPCMPDZrri
747 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)),
748 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)),
750 def : Pat<(v8i1 (X86cmpmu (v8i32 VR256X:$src1), (v8i32 VR256X:$src2), imm:$cc)),
751 (COPY_TO_REGCLASS (VPCMPUDZrri
752 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)),
753 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)),
756 // Mask register copy, including
757 // - copy between mask registers
758 // - load/store mask registers
759 // - copy from GPR to mask register and vice versa
761 multiclass avx512_mask_mov<bits<8> opc_kk, bits<8> opc_km, bits<8> opc_mk,
762 string OpcodeStr, RegisterClass KRC,
763 ValueType vt, X86MemOperand x86memop> {
764 let neverHasSideEffects = 1 in {
765 def kk : I<opc_kk, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src),
766 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>;
768 def km : I<opc_km, MRMSrcMem, (outs KRC:$dst), (ins x86memop:$src),
769 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
770 [(set KRC:$dst, (vt (load addr:$src)))]>;
772 def mk : I<opc_mk, MRMDestMem, (outs), (ins x86memop:$dst, KRC:$src),
773 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>;
777 multiclass avx512_mask_mov_gpr<bits<8> opc_kr, bits<8> opc_rk,
779 RegisterClass KRC, RegisterClass GRC> {
780 let neverHasSideEffects = 1 in {
781 def kr : I<opc_kr, MRMSrcReg, (outs KRC:$dst), (ins GRC:$src),
782 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>;
783 def rk : I<opc_rk, MRMSrcReg, (outs GRC:$dst), (ins KRC:$src),
784 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>;
788 let Predicates = [HasAVX512] in {
789 defm KMOVW : avx512_mask_mov<0x90, 0x90, 0x91, "kmovw", VK16, v16i1, i16mem>,
791 defm KMOVW : avx512_mask_mov_gpr<0x92, 0x93, "kmovw", VK16, GR32>,
795 let Predicates = [HasAVX512] in {
796 // GR16 from/to 16-bit mask
797 def : Pat<(v16i1 (bitconvert (i16 GR16:$src))),
798 (KMOVWkr (SUBREG_TO_REG (i32 0), GR16:$src, sub_16bit))>;
799 def : Pat<(i16 (bitconvert (v16i1 VK16:$src))),
800 (EXTRACT_SUBREG (KMOVWrk VK16:$src), sub_16bit)>;
802 // Store kreg in memory
803 def : Pat<(store (v16i1 VK16:$src), addr:$dst),
804 (KMOVWmk addr:$dst, VK16:$src)>;
806 def : Pat<(store (v8i1 VK8:$src), addr:$dst),
807 (KMOVWmk addr:$dst, (v16i1 (COPY_TO_REGCLASS VK8:$src, VK16)))>;
809 // With AVX-512 only, 8-bit mask is promoted to 16-bit mask.
810 let Predicates = [HasAVX512] in {
811 // GR from/to 8-bit mask without native support
812 def : Pat<(v8i1 (bitconvert (i8 GR8:$src))),
814 (KMOVWkr (SUBREG_TO_REG (i32 0), GR8:$src, sub_8bit)),
816 def : Pat<(i8 (bitconvert (v8i1 VK8:$src))),
818 (KMOVWrk (COPY_TO_REGCLASS VK8:$src, VK16)),
822 // Mask unary operation
824 multiclass avx512_mask_unop<bits<8> opc, string OpcodeStr,
825 RegisterClass KRC, SDPatternOperator OpNode> {
826 let Predicates = [HasAVX512] in
827 def rr : I<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src),
828 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
829 [(set KRC:$dst, (OpNode KRC:$src))]>;
832 multiclass avx512_mask_unop_w<bits<8> opc, string OpcodeStr,
833 SDPatternOperator OpNode> {
834 defm W : avx512_mask_unop<opc, !strconcat(OpcodeStr, "w"), VK16, OpNode>,
838 defm KNOT : avx512_mask_unop_w<0x44, "knot", not>;
840 def : Pat<(xor VK16:$src1, (v16i1 immAllOnesV)), (KNOTWrr VK16:$src1)>;
841 def : Pat<(xor VK8:$src1, (v8i1 immAllOnesV)),
842 (COPY_TO_REGCLASS (KNOTWrr (COPY_TO_REGCLASS VK8:$src1, VK16)), VK8)>;
844 // With AVX-512, 8-bit mask is promoted to 16-bit mask.
845 def : Pat<(not VK8:$src),
847 (KNOTWrr (COPY_TO_REGCLASS VK8:$src, VK16)), VK8)>;
849 // Mask binary operation
850 // - KADD, KAND, KANDN, KOR, KXNOR, KXOR
851 multiclass avx512_mask_binop<bits<8> opc, string OpcodeStr,
852 RegisterClass KRC, SDPatternOperator OpNode> {
853 let Predicates = [HasAVX512] in
854 def rr : I<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src1, KRC:$src2),
855 !strconcat(OpcodeStr,
856 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
857 [(set KRC:$dst, (OpNode KRC:$src1, KRC:$src2))]>;
860 multiclass avx512_mask_binop_w<bits<8> opc, string OpcodeStr,
861 SDPatternOperator OpNode> {
862 defm W : avx512_mask_binop<opc, !strconcat(OpcodeStr, "w"), VK16, OpNode>,
866 def andn : PatFrag<(ops node:$i0, node:$i1), (and (not node:$i0), node:$i1)>;
867 def xnor : PatFrag<(ops node:$i0, node:$i1), (not (xor node:$i0, node:$i1))>;
869 let isCommutable = 1 in {
870 defm KADD : avx512_mask_binop_w<0x4a, "kadd", add>;
871 defm KAND : avx512_mask_binop_w<0x41, "kand", and>;
872 let isCommutable = 0 in
873 defm KANDN : avx512_mask_binop_w<0x42, "kandn", andn>;
874 defm KOR : avx512_mask_binop_w<0x45, "kor", or>;
875 defm KXNOR : avx512_mask_binop_w<0x46, "kxnor", xnor>;
876 defm KXOR : avx512_mask_binop_w<0x47, "kxor", xor>;
879 multiclass avx512_mask_binop_int<string IntName, string InstName> {
880 let Predicates = [HasAVX512] in
881 def : Pat<(!cast<Intrinsic>("int_x86_"##IntName##"_v16i1")
882 VK16:$src1, VK16:$src2),
883 (!cast<Instruction>(InstName##"Wrr") VK16:$src1, VK16:$src2)>;
886 defm : avx512_mask_binop_int<"kadd", "KADD">;
887 defm : avx512_mask_binop_int<"kand", "KAND">;
888 defm : avx512_mask_binop_int<"kandn", "KANDN">;
889 defm : avx512_mask_binop_int<"kor", "KOR">;
890 defm : avx512_mask_binop_int<"kxnor", "KXNOR">;
891 defm : avx512_mask_binop_int<"kxor", "KXOR">;
892 // With AVX-512, 8-bit mask is promoted to 16-bit mask.
893 multiclass avx512_binop_pat<SDPatternOperator OpNode, Instruction Inst> {
894 let Predicates = [HasAVX512] in
895 def : Pat<(OpNode VK8:$src1, VK8:$src2),
897 (Inst (COPY_TO_REGCLASS VK8:$src1, VK16),
898 (COPY_TO_REGCLASS VK8:$src2, VK16)), VK8)>;
901 defm : avx512_binop_pat<and, KANDWrr>;
902 defm : avx512_binop_pat<andn, KANDNWrr>;
903 defm : avx512_binop_pat<or, KORWrr>;
904 defm : avx512_binop_pat<xnor, KXNORWrr>;
905 defm : avx512_binop_pat<xor, KXORWrr>;
908 multiclass avx512_mask_unpck<bits<8> opc, string OpcodeStr,
909 RegisterClass KRC1, RegisterClass KRC2> {
910 let Predicates = [HasAVX512] in
911 def rr : I<opc, MRMSrcReg, (outs KRC1:$dst), (ins KRC2:$src1, KRC2:$src2),
912 !strconcat(OpcodeStr,
913 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
916 multiclass avx512_mask_unpck_bw<bits<8> opc, string OpcodeStr> {
917 defm BW : avx512_mask_unpck<opc, !strconcat(OpcodeStr, "bw"), VK16, VK8>,
918 VEX_4V, VEX_L, OpSize, TB;
921 defm KUNPCK : avx512_mask_unpck_bw<0x4b, "kunpck">;
923 multiclass avx512_mask_unpck_int<string IntName, string InstName> {
924 let Predicates = [HasAVX512] in
925 def : Pat<(!cast<Intrinsic>("int_x86_"##IntName##"_v16i1")
926 VK8:$src1, VK8:$src2),
927 (!cast<Instruction>(InstName##"BWrr") VK8:$src1, VK8:$src2)>;
930 defm : avx512_mask_unpck_int<"kunpck", "KUNPCK">;
932 multiclass avx512_mask_testop<bits<8> opc, string OpcodeStr, RegisterClass KRC,
934 let Predicates = [HasAVX512], Defs = [EFLAGS] in
935 def rr : I<opc, MRMSrcReg, (outs), (ins KRC:$src1, KRC:$src2),
936 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
937 [(set EFLAGS, (OpNode KRC:$src1, KRC:$src2))]>;
940 multiclass avx512_mask_testop_w<bits<8> opc, string OpcodeStr, SDNode OpNode> {
941 defm W : avx512_mask_testop<opc, !strconcat(OpcodeStr, "w"), VK16, OpNode>,
945 defm KORTEST : avx512_mask_testop_w<0x98, "kortest", X86kortest>;
946 defm KTEST : avx512_mask_testop_w<0x99, "ktest", X86ktest>;
949 multiclass avx512_mask_shiftop<bits<8> opc, string OpcodeStr, RegisterClass KRC,
951 let Predicates = [HasAVX512] in
952 def ri : Ii8<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src, i8imm:$imm),
953 !strconcat(OpcodeStr,
954 "\t{$imm, $src, $dst|$dst, $src, $imm}"),
955 [(set KRC:$dst, (OpNode KRC:$src, (i8 imm:$imm)))]>;
958 multiclass avx512_mask_shiftop_w<bits<8> opc1, bits<8> opc2, string OpcodeStr,
960 defm W : avx512_mask_shiftop<opc1, !strconcat(OpcodeStr, "w"), VK16, OpNode>,
961 VEX, OpSize, TA, VEX_W;
964 defm KSHIFTL : avx512_mask_shiftop_w<0x32, 0x33, "kshiftl", shl>;
965 defm KSHIFTR : avx512_mask_shiftop_w<0x30, 0x31, "kshiftr", srl>;
967 // Mask setting all 0s or 1s
968 multiclass avx512_mask_setop<RegisterClass KRC, ValueType VT, PatFrag Val> {
969 let Predicates = [HasAVX512] in
970 let isReMaterializable = 1, isAsCheapAsAMove = 1, isPseudo = 1 in
971 def #NAME# : I<0, Pseudo, (outs KRC:$dst), (ins), "",
972 [(set KRC:$dst, (VT Val))]>;
975 multiclass avx512_mask_setop_w<PatFrag Val> {
976 defm B : avx512_mask_setop<VK8, v8i1, Val>;
977 defm W : avx512_mask_setop<VK16, v16i1, Val>;
980 defm KSET0 : avx512_mask_setop_w<immAllZerosV>;
981 defm KSET1 : avx512_mask_setop_w<immAllOnesV>;
983 // With AVX-512 only, 8-bit mask is promoted to 16-bit mask.
984 let Predicates = [HasAVX512] in {
985 def : Pat<(v8i1 immAllZerosV), (COPY_TO_REGCLASS (KSET0W), VK8)>;
986 def : Pat<(v8i1 immAllOnesV), (COPY_TO_REGCLASS (KSET1W), VK8)>;
988 def : Pat<(v8i1 (extract_subvector (v16i1 VK16:$src), (iPTR 0))),
989 (v8i1 (COPY_TO_REGCLASS VK16:$src, VK8))>;
991 def : Pat<(v16i1 (insert_subvector undef, (v8i1 VK8:$src), (iPTR 0))),
992 (v16i1 (COPY_TO_REGCLASS VK8:$src, VK16))>;
994 def : Pat<(v8i1 (extract_subvector (v16i1 VK16:$src), (iPTR 8))),
995 (v8i1 (COPY_TO_REGCLASS (KSHIFTRWri VK16:$src, (i8 8)), VK8))>;
997 //===----------------------------------------------------------------------===//
998 // AVX-512 - Aligned and unaligned load and store
1001 multiclass avx512_mov_packed<bits<8> opc, RegisterClass RC, RegisterClass KRC,
1002 X86MemOperand x86memop, PatFrag ld_frag,
1003 string asm, Domain d> {
1004 let neverHasSideEffects = 1 in
1005 def rr : AVX512PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
1006 !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], d>,
1008 let canFoldAsLoad = 1 in
1009 def rm : AVX512PI<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
1010 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
1011 [(set RC:$dst, (ld_frag addr:$src))], d>, EVEX;
1012 let Constraints = "$src1 = $dst" in {
1013 def rrk : AVX512PI<opc, MRMSrcReg, (outs RC:$dst),
1014 (ins RC:$src1, KRC:$mask, RC:$src2),
1016 "\t{$src2, ${dst}{${mask}}|${dst}{${mask}}, $src2}"), [], d>,
1018 def rmk : AVX512PI<opc, MRMSrcMem, (outs RC:$dst),
1019 (ins RC:$src1, KRC:$mask, x86memop:$src2),
1021 "\t{$src2, ${dst}{${mask}}|${dst}{${mask}}, $src2}"),
1022 [], d>, EVEX, EVEX_K;
1026 defm VMOVAPSZ : avx512_mov_packed<0x28, VR512, VK16WM, f512mem, alignedloadv16f32,
1027 "vmovaps", SSEPackedSingle>,
1028 EVEX_V512, EVEX_CD8<32, CD8VF>;
1029 defm VMOVAPDZ : avx512_mov_packed<0x28, VR512, VK8WM, f512mem, alignedloadv8f64,
1030 "vmovapd", SSEPackedDouble>,
1031 OpSize, EVEX_V512, VEX_W,
1032 EVEX_CD8<64, CD8VF>;
1033 defm VMOVUPSZ : avx512_mov_packed<0x10, VR512, VK16WM, f512mem, loadv16f32,
1034 "vmovups", SSEPackedSingle>,
1035 TB, EVEX_V512, EVEX_CD8<32, CD8VF>;
1036 defm VMOVUPDZ : avx512_mov_packed<0x10, VR512, VK8WM, f512mem, loadv8f64,
1037 "vmovupd", SSEPackedDouble>,
1038 OpSize, EVEX_V512, VEX_W,
1039 EVEX_CD8<64, CD8VF>;
1040 def VMOVAPSZmr : AVX512PI<0x29, MRMDestMem, (outs), (ins f512mem:$dst, VR512:$src),
1041 "vmovaps\t{$src, $dst|$dst, $src}",
1042 [(alignedstore512 (v16f32 VR512:$src), addr:$dst)],
1043 SSEPackedSingle>, EVEX, EVEX_V512, TB,
1044 EVEX_CD8<32, CD8VF>;
1045 def VMOVAPDZmr : AVX512PI<0x29, MRMDestMem, (outs), (ins f512mem:$dst, VR512:$src),
1046 "vmovapd\t{$src, $dst|$dst, $src}",
1047 [(alignedstore512 (v8f64 VR512:$src), addr:$dst)],
1048 SSEPackedDouble>, EVEX, EVEX_V512,
1049 OpSize, TB, VEX_W, EVEX_CD8<64, CD8VF>;
1050 def VMOVUPSZmr : AVX512PI<0x11, MRMDestMem, (outs), (ins f512mem:$dst, VR512:$src),
1051 "vmovups\t{$src, $dst|$dst, $src}",
1052 [(store (v16f32 VR512:$src), addr:$dst)],
1053 SSEPackedSingle>, EVEX, EVEX_V512, TB,
1054 EVEX_CD8<32, CD8VF>;
1055 def VMOVUPDZmr : AVX512PI<0x11, MRMDestMem, (outs), (ins f512mem:$dst, VR512:$src),
1056 "vmovupd\t{$src, $dst|$dst, $src}",
1057 [(store (v8f64 VR512:$src), addr:$dst)],
1058 SSEPackedDouble>, EVEX, EVEX_V512,
1059 OpSize, TB, VEX_W, EVEX_CD8<64, CD8VF>;
1061 // Use vmovaps/vmovups for AVX-512 integer load/store.
1062 // 512-bit load/store
1063 def : Pat<(alignedloadv8i64 addr:$src),
1064 (VMOVAPSZrm addr:$src)>;
1065 def : Pat<(loadv8i64 addr:$src),
1066 (VMOVUPSZrm addr:$src)>;
1068 def : Pat<(alignedstore512 (v8i64 VR512:$src), addr:$dst),
1069 (VMOVAPSZmr addr:$dst, VR512:$src)>;
1070 def : Pat<(alignedstore512 (v16i32 VR512:$src), addr:$dst),
1071 (VMOVAPSZmr addr:$dst, VR512:$src)>;
1073 def : Pat<(store (v8i64 VR512:$src), addr:$dst),
1074 (VMOVUPDZmr addr:$dst, VR512:$src)>;
1075 def : Pat<(store (v16i32 VR512:$src), addr:$dst),
1076 (VMOVUPSZmr addr:$dst, VR512:$src)>;
1078 let neverHasSideEffects = 1 in {
1079 def VMOVDQA32rr : AVX512BI<0x6F, MRMSrcReg, (outs VR512:$dst),
1081 "vmovdqa32\t{$src, $dst|$dst, $src}", []>,
1083 def VMOVDQA64rr : AVX512BI<0x6F, MRMSrcReg, (outs VR512:$dst),
1085 "vmovdqa64\t{$src, $dst|$dst, $src}", []>,
1086 EVEX, EVEX_V512, VEX_W;
1087 let mayStore = 1 in {
1088 def VMOVDQA32mr : AVX512BI<0x7F, MRMDestMem, (outs),
1089 (ins i512mem:$dst, VR512:$src),
1090 "vmovdqa32\t{$src, $dst|$dst, $src}", []>,
1091 EVEX, EVEX_V512, EVEX_CD8<32, CD8VF>;
1092 def VMOVDQA64mr : AVX512BI<0x7F, MRMDestMem, (outs),
1093 (ins i512mem:$dst, VR512:$src),
1094 "vmovdqa64\t{$src, $dst|$dst, $src}", []>,
1095 EVEX, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
1097 let mayLoad = 1 in {
1098 def VMOVDQA32rm : AVX512BI<0x6F, MRMSrcMem, (outs VR512:$dst),
1100 "vmovdqa32\t{$src, $dst|$dst, $src}", []>,
1101 EVEX, EVEX_V512, EVEX_CD8<32, CD8VF>;
1102 def VMOVDQA64rm : AVX512BI<0x6F, MRMSrcMem, (outs VR512:$dst),
1104 "vmovdqa64\t{$src, $dst|$dst, $src}", []>,
1105 EVEX, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
1109 multiclass avx512_mov_int<bits<8> opc, string asm, RegisterClass RC,
1110 RegisterClass KRC, PatFrag bc_frag,
1111 PatFrag ld_frag, X86MemOperand x86memop> {
1112 let neverHasSideEffects = 1 in
1113 def rr : AVX512XSI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
1114 !strconcat(asm, "\t{$src, $dst|$dst, $src}"), []>,
1116 let canFoldAsLoad = 1 in
1117 def rm : AVX512XSI<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
1118 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
1119 [(set RC:$dst, (bc_frag (ld_frag addr:$src)))]>,
1121 let Constraints = "$src1 = $dst" in {
1122 def rrk : AVX512XSI<opc, MRMSrcReg, (outs RC:$dst),
1123 (ins RC:$src1, KRC:$mask, RC:$src2),
1125 "\t{$src2, ${dst}{${mask}}|${dst}{${mask}}, $src2}"), []>,
1127 def rmk : AVX512XSI<opc, MRMSrcMem, (outs RC:$dst),
1128 (ins RC:$src1, KRC:$mask, x86memop:$src2),
1130 "\t{$src2, ${dst}{${mask}}|${dst}{${mask}}, $src2}"),
1135 defm VMOVDQU32 : avx512_mov_int<0x6F, "vmovdqu32", VR512, VK16WM, bc_v16i32,
1136 memopv8i64, i512mem>, EVEX_V512, EVEX_CD8<32, CD8VF>;
1137 defm VMOVDQU64 : avx512_mov_int<0x6F, "vmovdqu64", VR512, VK8WM, bc_v8i64,
1138 memopv8i64, i512mem>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
1140 let AddedComplexity = 20 in {
1141 def : Pat<(v16f32 (vselect VK16WM:$mask, (v16f32 VR512:$src1),
1142 (v16f32 VR512:$src2))),
1143 (VMOVUPSZrrk VR512:$src2, VK16WM:$mask, VR512:$src1)>;
1144 def : Pat<(v8f64 (vselect VK8WM:$mask, (v8f64 VR512:$src1),
1145 (v8f64 VR512:$src2))),
1146 (VMOVUPDZrrk VR512:$src2, VK8WM:$mask, VR512:$src1)>;
1147 def : Pat<(v16i32 (vselect VK16WM:$mask, (v16i32 VR512:$src1),
1148 (v16i32 VR512:$src2))),
1149 (VMOVDQU32rrk VR512:$src2, VK16WM:$mask, VR512:$src1)>;
1150 def : Pat<(v8i64 (vselect VK8WM:$mask, (v8i64 VR512:$src1),
1151 (v8i64 VR512:$src2))),
1152 (VMOVDQU64rrk VR512:$src2, VK8WM:$mask, VR512:$src1)>;