1 // Bitcasts between 512-bit vector types. Return the original type since
2 // no instruction is needed for the conversion
3 let Predicates = [HasAVX512] in {
4 def : Pat<(v8f64 (bitconvert (v16f32 VR512:$src))), (v8f64 VR512:$src)>;
5 def : Pat<(v8f64 (bitconvert (v16i32 VR512:$src))), (v8f64 VR512:$src)>;
6 def : Pat<(v8f64 (bitconvert (v8i64 VR512:$src))), (v8f64 VR512:$src)>;
7 def : Pat<(v16f32 (bitconvert (v16i32 VR512:$src))), (v16f32 VR512:$src)>;
8 def : Pat<(v16f32 (bitconvert (v8i64 VR512:$src))), (v16f32 VR512:$src)>;
9 def : Pat<(v16f32 (bitconvert (v8f64 VR512:$src))), (v16f32 VR512:$src)>;
10 def : Pat<(v8i64 (bitconvert (v16f32 VR512:$src))), (v8i64 VR512:$src)>;
11 def : Pat<(v8i64 (bitconvert (v16i32 VR512:$src))), (v8i64 VR512:$src)>;
12 def : Pat<(v8i64 (bitconvert (v8f64 VR512:$src))), (v8i64 VR512:$src)>;
13 def : Pat<(v16i32 (bitconvert (v16f32 VR512:$src))), (v16i32 VR512:$src)>;
14 def : Pat<(v16i32 (bitconvert (v8i64 VR512:$src))), (v16i32 VR512:$src)>;
15 def : Pat<(v16i32 (bitconvert (v8f64 VR512:$src))), (v16i32 VR512:$src)>;
16 def : Pat<(v8f64 (bitconvert (v8i64 VR512:$src))), (v8f64 VR512:$src)>;
18 def : Pat<(v2i64 (bitconvert (v4i32 VR128X:$src))), (v2i64 VR128X:$src)>;
19 def : Pat<(v2i64 (bitconvert (v8i16 VR128X:$src))), (v2i64 VR128X:$src)>;
20 def : Pat<(v2i64 (bitconvert (v16i8 VR128X:$src))), (v2i64 VR128X:$src)>;
21 def : Pat<(v2i64 (bitconvert (v2f64 VR128X:$src))), (v2i64 VR128X:$src)>;
22 def : Pat<(v2i64 (bitconvert (v4f32 VR128X:$src))), (v2i64 VR128X:$src)>;
23 def : Pat<(v4i32 (bitconvert (v2i64 VR128X:$src))), (v4i32 VR128X:$src)>;
24 def : Pat<(v4i32 (bitconvert (v8i16 VR128X:$src))), (v4i32 VR128X:$src)>;
25 def : Pat<(v4i32 (bitconvert (v16i8 VR128X:$src))), (v4i32 VR128X:$src)>;
26 def : Pat<(v4i32 (bitconvert (v2f64 VR128X:$src))), (v4i32 VR128X:$src)>;
27 def : Pat<(v4i32 (bitconvert (v4f32 VR128X:$src))), (v4i32 VR128X:$src)>;
28 def : Pat<(v8i16 (bitconvert (v2i64 VR128X:$src))), (v8i16 VR128X:$src)>;
29 def : Pat<(v8i16 (bitconvert (v4i32 VR128X:$src))), (v8i16 VR128X:$src)>;
30 def : Pat<(v8i16 (bitconvert (v16i8 VR128X:$src))), (v8i16 VR128X:$src)>;
31 def : Pat<(v8i16 (bitconvert (v2f64 VR128X:$src))), (v8i16 VR128X:$src)>;
32 def : Pat<(v8i16 (bitconvert (v4f32 VR128X:$src))), (v8i16 VR128X:$src)>;
33 def : Pat<(v16i8 (bitconvert (v2i64 VR128X:$src))), (v16i8 VR128X:$src)>;
34 def : Pat<(v16i8 (bitconvert (v4i32 VR128X:$src))), (v16i8 VR128X:$src)>;
35 def : Pat<(v16i8 (bitconvert (v8i16 VR128X:$src))), (v16i8 VR128X:$src)>;
36 def : Pat<(v16i8 (bitconvert (v2f64 VR128X:$src))), (v16i8 VR128X:$src)>;
37 def : Pat<(v16i8 (bitconvert (v4f32 VR128X:$src))), (v16i8 VR128X:$src)>;
38 def : Pat<(v4f32 (bitconvert (v2i64 VR128X:$src))), (v4f32 VR128X:$src)>;
39 def : Pat<(v4f32 (bitconvert (v4i32 VR128X:$src))), (v4f32 VR128X:$src)>;
40 def : Pat<(v4f32 (bitconvert (v8i16 VR128X:$src))), (v4f32 VR128X:$src)>;
41 def : Pat<(v4f32 (bitconvert (v16i8 VR128X:$src))), (v4f32 VR128X:$src)>;
42 def : Pat<(v4f32 (bitconvert (v2f64 VR128X:$src))), (v4f32 VR128X:$src)>;
43 def : Pat<(v2f64 (bitconvert (v2i64 VR128X:$src))), (v2f64 VR128X:$src)>;
44 def : Pat<(v2f64 (bitconvert (v4i32 VR128X:$src))), (v2f64 VR128X:$src)>;
45 def : Pat<(v2f64 (bitconvert (v8i16 VR128X:$src))), (v2f64 VR128X:$src)>;
46 def : Pat<(v2f64 (bitconvert (v16i8 VR128X:$src))), (v2f64 VR128X:$src)>;
47 def : Pat<(v2f64 (bitconvert (v4f32 VR128X:$src))), (v2f64 VR128X:$src)>;
49 // Bitcasts between 256-bit vector types. Return the original type since
50 // no instruction is needed for the conversion
51 def : Pat<(v4f64 (bitconvert (v8f32 VR256X:$src))), (v4f64 VR256X:$src)>;
52 def : Pat<(v4f64 (bitconvert (v8i32 VR256X:$src))), (v4f64 VR256X:$src)>;
53 def : Pat<(v4f64 (bitconvert (v4i64 VR256X:$src))), (v4f64 VR256X:$src)>;
54 def : Pat<(v4f64 (bitconvert (v16i16 VR256X:$src))), (v4f64 VR256X:$src)>;
55 def : Pat<(v4f64 (bitconvert (v32i8 VR256X:$src))), (v4f64 VR256X:$src)>;
56 def : Pat<(v8f32 (bitconvert (v8i32 VR256X:$src))), (v8f32 VR256X:$src)>;
57 def : Pat<(v8f32 (bitconvert (v4i64 VR256X:$src))), (v8f32 VR256X:$src)>;
58 def : Pat<(v8f32 (bitconvert (v4f64 VR256X:$src))), (v8f32 VR256X:$src)>;
59 def : Pat<(v8f32 (bitconvert (v32i8 VR256X:$src))), (v8f32 VR256X:$src)>;
60 def : Pat<(v8f32 (bitconvert (v16i16 VR256X:$src))), (v8f32 VR256X:$src)>;
61 def : Pat<(v4i64 (bitconvert (v8f32 VR256X:$src))), (v4i64 VR256X:$src)>;
62 def : Pat<(v4i64 (bitconvert (v8i32 VR256X:$src))), (v4i64 VR256X:$src)>;
63 def : Pat<(v4i64 (bitconvert (v4f64 VR256X:$src))), (v4i64 VR256X:$src)>;
64 def : Pat<(v4i64 (bitconvert (v32i8 VR256X:$src))), (v4i64 VR256X:$src)>;
65 def : Pat<(v4i64 (bitconvert (v16i16 VR256X:$src))), (v4i64 VR256X:$src)>;
66 def : Pat<(v32i8 (bitconvert (v4f64 VR256X:$src))), (v32i8 VR256X:$src)>;
67 def : Pat<(v32i8 (bitconvert (v4i64 VR256X:$src))), (v32i8 VR256X:$src)>;
68 def : Pat<(v32i8 (bitconvert (v8f32 VR256X:$src))), (v32i8 VR256X:$src)>;
69 def : Pat<(v32i8 (bitconvert (v8i32 VR256X:$src))), (v32i8 VR256X:$src)>;
70 def : Pat<(v32i8 (bitconvert (v16i16 VR256X:$src))), (v32i8 VR256X:$src)>;
71 def : Pat<(v8i32 (bitconvert (v32i8 VR256X:$src))), (v8i32 VR256X:$src)>;
72 def : Pat<(v8i32 (bitconvert (v16i16 VR256X:$src))), (v8i32 VR256X:$src)>;
73 def : Pat<(v8i32 (bitconvert (v8f32 VR256X:$src))), (v8i32 VR256X:$src)>;
74 def : Pat<(v8i32 (bitconvert (v4i64 VR256X:$src))), (v8i32 VR256X:$src)>;
75 def : Pat<(v8i32 (bitconvert (v4f64 VR256X:$src))), (v8i32 VR256X:$src)>;
76 def : Pat<(v16i16 (bitconvert (v8f32 VR256X:$src))), (v16i16 VR256X:$src)>;
77 def : Pat<(v16i16 (bitconvert (v8i32 VR256X:$src))), (v16i16 VR256X:$src)>;
78 def : Pat<(v16i16 (bitconvert (v4i64 VR256X:$src))), (v16i16 VR256X:$src)>;
79 def : Pat<(v16i16 (bitconvert (v4f64 VR256X:$src))), (v16i16 VR256X:$src)>;
80 def : Pat<(v16i16 (bitconvert (v32i8 VR256X:$src))), (v16i16 VR256X:$src)>;
84 // AVX-512: VPXOR instruction writes zero to its upper part, it's safe build zeros.
87 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
88 isPseudo = 1, Predicates = [HasAVX512] in {
89 def AVX512_512_SET0 : I<0, Pseudo, (outs VR512:$dst), (ins), "",
90 [(set VR512:$dst, (v16f32 immAllZerosV))]>;
93 let Predicates = [HasAVX512] in {
94 def : Pat<(v8i64 immAllZerosV), (AVX512_512_SET0)>;
95 def : Pat<(v16i32 immAllZerosV), (AVX512_512_SET0)>;
96 def : Pat<(v8f64 immAllZerosV), (AVX512_512_SET0)>;
99 //===----------------------------------------------------------------------===//
100 // AVX-512 - VECTOR INSERT
103 let hasSideEffects = 0, ExeDomain = SSEPackedSingle in {
104 def VINSERTF32x4rr : AVX512AIi8<0x18, MRMSrcReg, (outs VR512:$dst),
105 (ins VR512:$src1, VR128X:$src2, i8imm:$src3),
106 "vinsertf32x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
107 []>, EVEX_4V, EVEX_V512;
109 def VINSERTF32x4rm : AVX512AIi8<0x18, MRMSrcMem, (outs VR512:$dst),
110 (ins VR512:$src1, f128mem:$src2, i8imm:$src3),
111 "vinsertf32x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
112 []>, EVEX_4V, EVEX_V512, EVEX_CD8<32, CD8VT4>;
115 // -- 64x4 fp form --
116 let hasSideEffects = 0, ExeDomain = SSEPackedDouble in {
117 def VINSERTF64x4rr : AVX512AIi8<0x1a, MRMSrcReg, (outs VR512:$dst),
118 (ins VR512:$src1, VR256X:$src2, i8imm:$src3),
119 "vinsertf64x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
120 []>, EVEX_4V, EVEX_V512, VEX_W;
122 def VINSERTF64x4rm : AVX512AIi8<0x1a, MRMSrcMem, (outs VR512:$dst),
123 (ins VR512:$src1, i256mem:$src2, i8imm:$src3),
124 "vinsertf64x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
125 []>, EVEX_4V, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT4>;
127 // -- 32x4 integer form --
128 let hasSideEffects = 0 in {
129 def VINSERTI32x4rr : AVX512AIi8<0x38, MRMSrcReg, (outs VR512:$dst),
130 (ins VR512:$src1, VR128X:$src2, i8imm:$src3),
131 "vinserti32x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
132 []>, EVEX_4V, EVEX_V512;
134 def VINSERTI32x4rm : AVX512AIi8<0x38, MRMSrcMem, (outs VR512:$dst),
135 (ins VR512:$src1, i128mem:$src2, i8imm:$src3),
136 "vinserti32x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
137 []>, EVEX_4V, EVEX_V512, EVEX_CD8<32, CD8VT4>;
141 let hasSideEffects = 0 in {
143 def VINSERTI64x4rr : AVX512AIi8<0x3a, MRMSrcReg, (outs VR512:$dst),
144 (ins VR512:$src1, VR256X:$src2, i8imm:$src3),
145 "vinserti64x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
146 []>, EVEX_4V, EVEX_V512, VEX_W;
148 def VINSERTI64x4rm : AVX512AIi8<0x3a, MRMSrcMem, (outs VR512:$dst),
149 (ins VR512:$src1, i256mem:$src2, i8imm:$src3),
150 "vinserti64x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
151 []>, EVEX_4V, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT4>;
154 def : Pat<(vinsert128_insert:$ins (v16f32 VR512:$src1), (v4f32 VR128X:$src2),
155 (iPTR imm)), (VINSERTF32x4rr VR512:$src1, VR128X:$src2,
156 (INSERT_get_vinsert128_imm VR512:$ins))>;
157 def : Pat<(vinsert128_insert:$ins (v8f64 VR512:$src1), (v2f64 VR128X:$src2),
158 (iPTR imm)), (VINSERTF32x4rr VR512:$src1, VR128X:$src2,
159 (INSERT_get_vinsert128_imm VR512:$ins))>;
160 def : Pat<(vinsert128_insert:$ins (v8i64 VR512:$src1), (v2i64 VR128X:$src2),
161 (iPTR imm)), (VINSERTI32x4rr VR512:$src1, VR128X:$src2,
162 (INSERT_get_vinsert128_imm VR512:$ins))>;
163 def : Pat<(vinsert128_insert:$ins (v16i32 VR512:$src1), (v4i32 VR128X:$src2),
164 (iPTR imm)), (VINSERTI32x4rr VR512:$src1, VR128X:$src2,
165 (INSERT_get_vinsert128_imm VR512:$ins))>;
167 def : Pat<(vinsert128_insert:$ins (v16f32 VR512:$src1), (loadv4f32 addr:$src2),
168 (iPTR imm)), (VINSERTF32x4rm VR512:$src1, addr:$src2,
169 (INSERT_get_vinsert128_imm VR512:$ins))>;
170 def : Pat<(vinsert128_insert:$ins (v16i32 VR512:$src1),
171 (bc_v4i32 (loadv2i64 addr:$src2)),
172 (iPTR imm)), (VINSERTI32x4rm VR512:$src1, addr:$src2,
173 (INSERT_get_vinsert128_imm VR512:$ins))>;
174 def : Pat<(vinsert128_insert:$ins (v8f64 VR512:$src1), (loadv2f64 addr:$src2),
175 (iPTR imm)), (VINSERTF32x4rm VR512:$src1, addr:$src2,
176 (INSERT_get_vinsert128_imm VR512:$ins))>;
177 def : Pat<(vinsert128_insert:$ins (v8i64 VR512:$src1), (loadv2i64 addr:$src2),
178 (iPTR imm)), (VINSERTI32x4rm VR512:$src1, addr:$src2,
179 (INSERT_get_vinsert128_imm VR512:$ins))>;
181 def : Pat<(vinsert256_insert:$ins (v16f32 VR512:$src1), (v8f32 VR256X:$src2),
182 (iPTR imm)), (VINSERTF64x4rr VR512:$src1, VR256X:$src2,
183 (INSERT_get_vinsert256_imm VR512:$ins))>;
184 def : Pat<(vinsert256_insert:$ins (v8f64 VR512:$src1), (v4f64 VR256X:$src2),
185 (iPTR imm)), (VINSERTF64x4rr VR512:$src1, VR256X:$src2,
186 (INSERT_get_vinsert256_imm VR512:$ins))>;
187 def : Pat<(vinsert128_insert:$ins (v8i64 VR512:$src1), (v4i64 VR256X:$src2),
188 (iPTR imm)), (VINSERTI64x4rr VR512:$src1, VR256X:$src2,
189 (INSERT_get_vinsert256_imm VR512:$ins))>;
190 def : Pat<(vinsert128_insert:$ins (v16i32 VR512:$src1), (v8i32 VR256X:$src2),
191 (iPTR imm)), (VINSERTI64x4rr VR512:$src1, VR256X:$src2,
192 (INSERT_get_vinsert256_imm VR512:$ins))>;
194 def : Pat<(vinsert256_insert:$ins (v16f32 VR512:$src1), (loadv8f32 addr:$src2),
195 (iPTR imm)), (VINSERTF64x4rm VR512:$src1, addr:$src2,
196 (INSERT_get_vinsert256_imm VR512:$ins))>;
197 def : Pat<(vinsert256_insert:$ins (v8f64 VR512:$src1), (loadv4f64 addr:$src2),
198 (iPTR imm)), (VINSERTF64x4rm VR512:$src1, addr:$src2,
199 (INSERT_get_vinsert256_imm VR512:$ins))>;
200 def : Pat<(vinsert256_insert:$ins (v8i64 VR512:$src1), (loadv4i64 addr:$src2),
201 (iPTR imm)), (VINSERTI64x4rm VR512:$src1, addr:$src2,
202 (INSERT_get_vinsert256_imm VR512:$ins))>;
203 def : Pat<(vinsert256_insert:$ins (v16i32 VR512:$src1),
204 (bc_v8i32 (loadv4i64 addr:$src2)),
205 (iPTR imm)), (VINSERTI64x4rm VR512:$src1, addr:$src2,
206 (INSERT_get_vinsert256_imm VR512:$ins))>;
208 // vinsertps - insert f32 to XMM
209 def VINSERTPSzrr : AVX512AIi8<0x21, MRMSrcReg, (outs VR128X:$dst),
210 (ins VR128X:$src1, VR128X:$src2, u32u8imm:$src3),
211 "vinsertps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
212 [(set VR128X:$dst, (X86insertps VR128X:$src1, VR128X:$src2, imm:$src3))]>,
214 def VINSERTPSzrm: AVX512AIi8<0x21, MRMSrcMem, (outs VR128X:$dst),
215 (ins VR128X:$src1, f32mem:$src2, u32u8imm:$src3),
216 "vinsertps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
217 [(set VR128X:$dst, (X86insertps VR128X:$src1,
218 (v4f32 (scalar_to_vector (loadf32 addr:$src2))),
219 imm:$src3))]>, EVEX_4V, EVEX_CD8<32, CD8VT1>;
221 //===----------------------------------------------------------------------===//
222 // AVX-512 VECTOR EXTRACT
224 let hasSideEffects = 0, ExeDomain = SSEPackedSingle in {
226 def VEXTRACTF32x4rr : AVX512AIi8<0x19, MRMDestReg, (outs VR128X:$dst),
227 (ins VR512:$src1, i8imm:$src2),
228 "vextractf32x4\t{$src2, $src1, $dst|$dst, $src1, $src2}",
229 []>, EVEX, EVEX_V512;
230 def VEXTRACTF32x4mr : AVX512AIi8<0x19, MRMDestMem, (outs),
231 (ins f128mem:$dst, VR512:$src1, i8imm:$src2),
232 "vextractf32x4\t{$src2, $src1, $dst|$dst, $src1, $src2}",
233 []>, EVEX, EVEX_V512, EVEX_CD8<32, CD8VT4>;
236 def VEXTRACTF64x4rr : AVX512AIi8<0x1b, MRMDestReg, (outs VR256X:$dst),
237 (ins VR512:$src1, i8imm:$src2),
238 "vextractf64x4\t{$src2, $src1, $dst|$dst, $src1, $src2}",
239 []>, EVEX, EVEX_V512, VEX_W;
241 def VEXTRACTF64x4mr : AVX512AIi8<0x1b, MRMDestMem, (outs),
242 (ins f256mem:$dst, VR512:$src1, i8imm:$src2),
243 "vextractf64x4\t{$src2, $src1, $dst|$dst, $src1, $src2}",
244 []>, EVEX, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT4>;
247 let hasSideEffects = 0 in {
249 def VEXTRACTI32x4rr : AVX512AIi8<0x39, MRMDestReg, (outs VR128X:$dst),
250 (ins VR512:$src1, i8imm:$src2),
251 "vextracti32x4\t{$src2, $src1, $dst|$dst, $src1, $src2}",
252 []>, EVEX, EVEX_V512;
253 def VEXTRACTI32x4mr : AVX512AIi8<0x39, MRMDestMem, (outs),
254 (ins i128mem:$dst, VR512:$src1, i8imm:$src2),
255 "vextracti32x4\t{$src2, $src1, $dst|$dst, $src1, $src2}",
256 []>, EVEX, EVEX_V512, EVEX_CD8<32, CD8VT4>;
259 def VEXTRACTI64x4rr : AVX512AIi8<0x3b, MRMDestReg, (outs VR256X:$dst),
260 (ins VR512:$src1, i8imm:$src2),
261 "vextracti64x4\t{$src2, $src1, $dst|$dst, $src1, $src2}",
262 []>, EVEX, EVEX_V512, VEX_W;
264 def VEXTRACTI64x4mr : AVX512AIi8<0x3b, MRMDestMem, (outs),
265 (ins i256mem:$dst, VR512:$src1, i8imm:$src2),
266 "vextracti64x4\t{$src2, $src1, $dst|$dst, $src1, $src2}",
267 []>, EVEX, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT4>;
270 def : Pat<(vextract128_extract:$ext (v16f32 VR512:$src1), (iPTR imm)),
271 (v4f32 (VEXTRACTF32x4rr VR512:$src1,
272 (EXTRACT_get_vextract128_imm VR128X:$ext)))>;
274 def : Pat<(vextract128_extract:$ext VR512:$src1, (iPTR imm)),
275 (v4i32 (VEXTRACTF32x4rr VR512:$src1,
276 (EXTRACT_get_vextract128_imm VR128X:$ext)))>;
278 def : Pat<(vextract128_extract:$ext (v8f64 VR512:$src1), (iPTR imm)),
279 (v2f64 (VEXTRACTF32x4rr VR512:$src1,
280 (EXTRACT_get_vextract128_imm VR128X:$ext)))>;
282 def : Pat<(vextract128_extract:$ext (v8i64 VR512:$src1), (iPTR imm)),
283 (v2i64 (VEXTRACTI32x4rr VR512:$src1,
284 (EXTRACT_get_vextract128_imm VR128X:$ext)))>;
287 def : Pat<(vextract256_extract:$ext (v16f32 VR512:$src1), (iPTR imm)),
288 (v8f32 (VEXTRACTF64x4rr VR512:$src1,
289 (EXTRACT_get_vextract256_imm VR256X:$ext)))>;
291 def : Pat<(vextract256_extract:$ext (v16i32 VR512:$src1), (iPTR imm)),
292 (v8i32 (VEXTRACTI64x4rr VR512:$src1,
293 (EXTRACT_get_vextract256_imm VR256X:$ext)))>;
295 def : Pat<(vextract256_extract:$ext (v8f64 VR512:$src1), (iPTR imm)),
296 (v4f64 (VEXTRACTF64x4rr VR512:$src1,
297 (EXTRACT_get_vextract256_imm VR256X:$ext)))>;
299 def : Pat<(vextract256_extract:$ext (v8i64 VR512:$src1), (iPTR imm)),
300 (v4i64 (VEXTRACTI64x4rr VR512:$src1,
301 (EXTRACT_get_vextract256_imm VR256X:$ext)))>;
303 // A 256-bit subvector extract from the first 512-bit vector position
304 // is a subregister copy that needs no instruction.
305 def : Pat<(v8i32 (extract_subvector (v16i32 VR512:$src), (iPTR 0))),
306 (v8i32 (EXTRACT_SUBREG (v16i32 VR512:$src), sub_ymm))>;
307 def : Pat<(v8f32 (extract_subvector (v16f32 VR512:$src), (iPTR 0))),
308 (v8f32 (EXTRACT_SUBREG (v16f32 VR512:$src), sub_ymm))>;
309 def : Pat<(v4i64 (extract_subvector (v8i64 VR512:$src), (iPTR 0))),
310 (v4i64 (EXTRACT_SUBREG (v8i64 VR512:$src), sub_ymm))>;
311 def : Pat<(v4f64 (extract_subvector (v8f64 VR512:$src), (iPTR 0))),
312 (v4f64 (EXTRACT_SUBREG (v8f64 VR512:$src), sub_ymm))>;
315 def : Pat<(v4i32 (extract_subvector (v16i32 VR512:$src), (iPTR 0))),
316 (v4i32 (EXTRACT_SUBREG (v16i32 VR512:$src), sub_xmm))>;
317 def : Pat<(v2i64 (extract_subvector (v8i64 VR512:$src), (iPTR 0))),
318 (v2i64 (EXTRACT_SUBREG (v8i64 VR512:$src), sub_xmm))>;
319 def : Pat<(v2f64 (extract_subvector (v8f64 VR512:$src), (iPTR 0))),
320 (v2f64 (EXTRACT_SUBREG (v8f64 VR512:$src), sub_xmm))>;
321 def : Pat<(v4f32 (extract_subvector (v16f32 VR512:$src), (iPTR 0))),
322 (v4f32 (EXTRACT_SUBREG (v16f32 VR512:$src), sub_xmm))>;
325 // A 128-bit subvector insert to the first 512-bit vector position
326 // is a subregister copy that needs no instruction.
327 def : Pat<(insert_subvector undef, (v2i64 VR128X:$src), (iPTR 0)),
328 (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)),
329 (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
331 def : Pat<(insert_subvector undef, (v2f64 VR128X:$src), (iPTR 0)),
332 (INSERT_SUBREG (v8f64 (IMPLICIT_DEF)),
333 (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
335 def : Pat<(insert_subvector undef, (v4i32 VR128X:$src), (iPTR 0)),
336 (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)),
337 (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
339 def : Pat<(insert_subvector undef, (v4f32 VR128X:$src), (iPTR 0)),
340 (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)),
341 (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
344 def : Pat<(insert_subvector undef, (v4i64 VR256X:$src), (iPTR 0)),
345 (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
346 def : Pat<(insert_subvector undef, (v4f64 VR256X:$src), (iPTR 0)),
347 (INSERT_SUBREG (v8f64 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
348 def : Pat<(insert_subvector undef, (v8i32 VR256X:$src), (iPTR 0)),
349 (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
350 def : Pat<(insert_subvector undef, (v8f32 VR256X:$src), (iPTR 0)),
351 (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
353 // vextractps - extract 32 bits from XMM
354 def VEXTRACTPSzrr : AVX512AIi8<0x17, MRMDestReg, (outs GR32:$dst),
355 (ins VR128X:$src1, u32u8imm:$src2),
356 "vextractps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
357 [(set GR32:$dst, (extractelt (bc_v4i32 (v4f32 VR128X:$src1)), imm:$src2))]>,
360 def VEXTRACTPSzmr : AVX512AIi8<0x17, MRMDestMem, (outs),
361 (ins f32mem:$dst, VR128X:$src1, u32u8imm:$src2),
362 "vextractps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
363 [(store (extractelt (bc_v4i32 (v4f32 VR128X:$src1)), imm:$src2),
364 addr:$dst)]>, EVEX, EVEX_CD8<32, CD8VT1>;
366 //===---------------------------------------------------------------------===//
369 multiclass avx512_fp_broadcast<bits<8> opc, string OpcodeStr,
370 RegisterClass DestRC,
371 RegisterClass SrcRC, X86MemOperand x86memop> {
372 def rr : AVX5128I<opc, MRMSrcReg, (outs DestRC:$dst), (ins SrcRC:$src),
373 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
375 def rm : AVX5128I<opc, MRMSrcMem, (outs DestRC:$dst), (ins x86memop:$src),
376 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),[]>, EVEX;
378 let ExeDomain = SSEPackedSingle in {
379 defm VBROADCASTSSZ : avx512_fp_broadcast<0x18, "vbroadcastss", VR512,
381 EVEX_V512, EVEX_CD8<32, CD8VT1>;
384 let ExeDomain = SSEPackedDouble in {
385 defm VBROADCASTSDZ : avx512_fp_broadcast<0x19, "vbroadcastsd", VR512,
387 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
390 def : Pat<(v16f32 (X86VBroadcast (loadf32 addr:$src))),
391 (VBROADCASTSSZrm addr:$src)>;
392 def : Pat<(v8f64 (X86VBroadcast (loadf64 addr:$src))),
393 (VBROADCASTSDZrm addr:$src)>;
395 def : Pat<(int_x86_avx512_vbroadcast_ss_512 addr:$src),
396 (VBROADCASTSSZrm addr:$src)>;
397 def : Pat<(int_x86_avx512_vbroadcast_sd_512 addr:$src),
398 (VBROADCASTSDZrm addr:$src)>;
400 multiclass avx512_int_broadcast_reg<bits<8> opc, string OpcodeStr,
401 RegisterClass SrcRC, RegisterClass KRC> {
402 def Zrr : AVX5128I<opc, MRMSrcReg, (outs VR512:$dst), (ins SrcRC:$src),
403 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
404 []>, EVEX, EVEX_V512;
405 def Zkrr : AVX5128I<opc, MRMSrcReg, (outs VR512:$dst),
406 (ins KRC:$mask, SrcRC:$src),
407 !strconcat(OpcodeStr,
408 " \t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}"),
409 []>, EVEX, EVEX_V512, EVEX_KZ;
412 defm VPBROADCASTDr : avx512_int_broadcast_reg<0x7C, "vpbroadcastd", GR32, VK16WM>;
413 defm VPBROADCASTQr : avx512_int_broadcast_reg<0x7C, "vpbroadcastq", GR64, VK8WM>,
416 def : Pat <(v16i32 (X86vzext VK16WM:$mask)),
417 (VPBROADCASTDrZkrr VK16WM:$mask, (i32 (MOV32ri 0x1)))>;
419 def : Pat <(v8i64 (X86vzext VK8WM:$mask)),
420 (VPBROADCASTQrZkrr VK8WM:$mask, (i64 (MOV64ri 0x1)))>;
422 def : Pat<(v16i32 (X86VBroadcast (i32 GR32:$src))),
423 (VPBROADCASTDrZrr GR32:$src)>;
424 def : Pat<(v16i32 (X86VBroadcastm VK16WM:$mask, (i32 GR32:$src))),
425 (VPBROADCASTDrZkrr VK16WM:$mask, GR32:$src)>;
426 def : Pat<(v8i64 (X86VBroadcast (i64 GR64:$src))),
427 (VPBROADCASTQrZrr GR64:$src)>;
428 def : Pat<(v8i64 (X86VBroadcastm VK8WM:$mask, (i64 GR64:$src))),
429 (VPBROADCASTQrZkrr VK8WM:$mask, GR64:$src)>;
431 def : Pat<(v16i32 (int_x86_avx512_pbroadcastd_i32_512 (i32 GR32:$src))),
432 (VPBROADCASTDrZrr GR32:$src)>;
433 def : Pat<(v8i64 (int_x86_avx512_pbroadcastq_i64_512 (i64 GR64:$src))),
434 (VPBROADCASTQrZrr GR64:$src)>;
436 def : Pat<(v16i32 (int_x86_avx512_mask_pbroadcast_d_gpr_512 (i32 GR32:$src),
437 (v16i32 immAllZerosV), (i16 GR16:$mask))),
438 (VPBROADCASTDrZkrr (COPY_TO_REGCLASS GR16:$mask, VK16WM), GR32:$src)>;
439 def : Pat<(v8i64 (int_x86_avx512_mask_pbroadcast_q_gpr_512 (i64 GR64:$src),
440 (bc_v8i64 (v16i32 immAllZerosV)), (i8 GR8:$mask))),
441 (VPBROADCASTQrZkrr (COPY_TO_REGCLASS GR8:$mask, VK8WM), GR64:$src)>;
443 multiclass avx512_int_broadcast_rm<bits<8> opc, string OpcodeStr,
444 X86MemOperand x86memop, PatFrag ld_frag,
445 RegisterClass DstRC, ValueType OpVT, ValueType SrcVT,
447 def rr : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst), (ins VR128X:$src),
448 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
450 (OpVT (X86VBroadcast (SrcVT VR128X:$src))))]>, EVEX;
451 def krr : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst), (ins KRC:$mask,
453 !strconcat(OpcodeStr,
454 " \t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
456 (OpVT (X86VBroadcastm KRC:$mask, (SrcVT VR128X:$src))))]>,
459 def rm : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
460 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
462 (OpVT (X86VBroadcast (ld_frag addr:$src))))]>, EVEX;
463 def krm : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst), (ins KRC:$mask,
465 !strconcat(OpcodeStr,
466 " \t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
467 [(set DstRC:$dst, (OpVT (X86VBroadcastm KRC:$mask,
468 (ld_frag addr:$src))))]>, EVEX, EVEX_KZ;
472 defm VPBROADCASTDZ : avx512_int_broadcast_rm<0x58, "vpbroadcastd", i32mem,
473 loadi32, VR512, v16i32, v4i32, VK16WM>,
474 EVEX_V512, EVEX_CD8<32, CD8VT1>;
475 defm VPBROADCASTQZ : avx512_int_broadcast_rm<0x59, "vpbroadcastq", i64mem,
476 loadi64, VR512, v8i64, v2i64, VK8WM>, EVEX_V512, VEX_W,
477 EVEX_CD8<64, CD8VT1>;
479 multiclass avx512_int_subvec_broadcast_rm<bits<8> opc, string OpcodeStr,
480 X86MemOperand x86memop, PatFrag ld_frag,
483 def rm : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst), (ins x86memop:$src),
484 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
486 def krm : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst), (ins KRC:$mask,
488 !strconcat(OpcodeStr,
489 " \t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
494 defm VBROADCASTI32X4 : avx512_int_subvec_broadcast_rm<0x5a, "vbroadcasti32x4",
495 i128mem, loadv2i64, VK16WM>,
496 EVEX_V512, EVEX_CD8<32, CD8VT4>;
497 defm VBROADCASTI64X4 : avx512_int_subvec_broadcast_rm<0x5b, "vbroadcasti64x4",
498 i256mem, loadv4i64, VK16WM>, VEX_W,
499 EVEX_V512, EVEX_CD8<64, CD8VT4>;
501 def : Pat<(v16i32 (int_x86_avx512_pbroadcastd_512 (v4i32 VR128X:$src))),
502 (VPBROADCASTDZrr VR128X:$src)>;
503 def : Pat<(v8i64 (int_x86_avx512_pbroadcastq_512 (v2i64 VR128X:$src))),
504 (VPBROADCASTQZrr VR128X:$src)>;
506 def : Pat<(v16f32 (X86VBroadcast (v4f32 VR128X:$src))),
507 (VBROADCASTSSZrr VR128X:$src)>;
508 def : Pat<(v8f64 (X86VBroadcast (v2f64 VR128X:$src))),
509 (VBROADCASTSDZrr VR128X:$src)>;
511 def : Pat<(v16f32 (int_x86_avx512_vbroadcast_ss_ps_512 (v4f32 VR128X:$src))),
512 (VBROADCASTSSZrr VR128X:$src)>;
513 def : Pat<(v8f64 (int_x86_avx512_vbroadcast_sd_pd_512 (v2f64 VR128X:$src))),
514 (VBROADCASTSDZrr VR128X:$src)>;
516 // Provide fallback in case the load node that is used in the patterns above
517 // is used by additional users, which prevents the pattern selection.
518 def : Pat<(v16f32 (X86VBroadcast FR32X:$src)),
519 (VBROADCASTSSZrr (COPY_TO_REGCLASS FR32X:$src, VR128X))>;
520 def : Pat<(v8f64 (X86VBroadcast FR64X:$src)),
521 (VBROADCASTSDZrr (COPY_TO_REGCLASS FR64X:$src, VR128X))>;
524 let Predicates = [HasAVX512] in {
525 def : Pat<(v8i32 (X86VBroadcastm (v8i1 VK8WM:$mask), (loadi32 addr:$src))),
527 (v16i32 (VPBROADCASTDZkrm (COPY_TO_REGCLASS VK8WM:$mask, VK16WM),
528 addr:$src)), sub_ymm)>;
530 //===----------------------------------------------------------------------===//
531 // AVX-512 BROADCAST MASK TO VECTOR REGISTER
534 multiclass avx512_mask_broadcast<bits<8> opc, string OpcodeStr,
535 RegisterClass DstRC, RegisterClass KRC,
536 ValueType OpVT, ValueType SrcVT> {
537 def rr : AVX512XS8I<opc, MRMDestReg, (outs DstRC:$dst), (ins KRC:$src),
538 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
542 let Predicates = [HasCDI] in {
543 defm VPBROADCASTMW2D : avx512_mask_broadcast<0x3A, "vpbroadcastmw2d", VR512,
544 VK16, v16i32, v16i1>, EVEX_V512;
545 defm VPBROADCASTMB2Q : avx512_mask_broadcast<0x2A, "vpbroadcastmb2q", VR512,
546 VK8, v8i64, v8i1>, EVEX_V512, VEX_W;
549 //===----------------------------------------------------------------------===//
552 // -- immediate form --
553 multiclass avx512_perm_imm<bits<8> opc, string OpcodeStr, RegisterClass RC,
554 SDNode OpNode, PatFrag mem_frag,
555 X86MemOperand x86memop, ValueType OpVT> {
556 def ri : AVX512AIi8<opc, MRMSrcReg, (outs RC:$dst),
557 (ins RC:$src1, i8imm:$src2),
558 !strconcat(OpcodeStr,
559 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
561 (OpVT (OpNode RC:$src1, (i8 imm:$src2))))]>,
563 def mi : AVX512AIi8<opc, MRMSrcMem, (outs RC:$dst),
564 (ins x86memop:$src1, i8imm:$src2),
565 !strconcat(OpcodeStr,
566 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
568 (OpVT (OpNode (mem_frag addr:$src1),
569 (i8 imm:$src2))))]>, EVEX;
572 defm VPERMQZ : avx512_perm_imm<0x00, "vpermq", VR512, X86VPermi, memopv8i64,
573 i512mem, v8i64>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
574 let ExeDomain = SSEPackedDouble in
575 defm VPERMPDZ : avx512_perm_imm<0x01, "vpermpd", VR512, X86VPermi, memopv8f64,
576 f512mem, v8f64>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
578 // -- VPERM - register form --
579 multiclass avx512_perm<bits<8> opc, string OpcodeStr, RegisterClass RC,
580 PatFrag mem_frag, X86MemOperand x86memop, ValueType OpVT> {
582 def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
583 (ins RC:$src1, RC:$src2),
584 !strconcat(OpcodeStr,
585 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
587 (OpVT (X86VPermv RC:$src1, RC:$src2)))]>, EVEX_4V;
589 def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
590 (ins RC:$src1, x86memop:$src2),
591 !strconcat(OpcodeStr,
592 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
594 (OpVT (X86VPermv RC:$src1, (mem_frag addr:$src2))))]>,
598 defm VPERMDZ : avx512_perm<0x36, "vpermd", VR512, memopv16i32, i512mem,
599 v16i32>, EVEX_V512, EVEX_CD8<32, CD8VF>;
600 defm VPERMQZ : avx512_perm<0x36, "vpermq", VR512, memopv8i64, i512mem,
601 v8i64>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
602 let ExeDomain = SSEPackedSingle in
603 defm VPERMPSZ : avx512_perm<0x16, "vpermps", VR512, memopv16f32, f512mem,
604 v16f32>, EVEX_V512, EVEX_CD8<32, CD8VF>;
605 let ExeDomain = SSEPackedDouble in
606 defm VPERMPDZ : avx512_perm<0x16, "vpermpd", VR512, memopv8f64, f512mem,
607 v8f64>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
609 // -- VPERM2I - 3 source operands form --
610 multiclass avx512_perm_3src<bits<8> opc, string OpcodeStr, RegisterClass RC,
611 PatFrag mem_frag, X86MemOperand x86memop,
612 SDNode OpNode, ValueType OpVT> {
613 let Constraints = "$src1 = $dst" in {
614 def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
615 (ins RC:$src1, RC:$src2, RC:$src3),
616 !strconcat(OpcodeStr,
617 " \t{$src3, $src2, $dst|$dst, $src2, $src3}"),
619 (OpVT (OpNode RC:$src1, RC:$src2, RC:$src3)))]>,
622 def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
623 (ins RC:$src1, RC:$src2, x86memop:$src3),
624 !strconcat(OpcodeStr,
625 " \t{$src3, $src2, $dst|$dst, $src2, $src3}"),
627 (OpVT (OpNode RC:$src1, RC:$src2,
628 (mem_frag addr:$src3))))]>, EVEX_4V;
631 defm VPERMI2D : avx512_perm_3src<0x76, "vpermi2d", VR512, memopv16i32, i512mem,
632 X86VPermiv3, v16i32>, EVEX_V512, EVEX_CD8<32, CD8VF>;
633 defm VPERMI2Q : avx512_perm_3src<0x76, "vpermi2q", VR512, memopv8i64, i512mem,
634 X86VPermiv3, v8i64>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
635 defm VPERMI2PS : avx512_perm_3src<0x77, "vpermi2ps", VR512, memopv16f32, i512mem,
636 X86VPermiv3, v16f32>, EVEX_V512, EVEX_CD8<32, CD8VF>;
637 defm VPERMI2PD : avx512_perm_3src<0x77, "vpermi2pd", VR512, memopv8f64, i512mem,
638 X86VPermiv3, v8f64>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
640 defm VPERMT2D : avx512_perm_3src<0x7E, "vpermt2d", VR512, memopv16i32, i512mem,
641 X86VPermv3, v16i32>, EVEX_V512, EVEX_CD8<32, CD8VF>;
642 defm VPERMT2Q : avx512_perm_3src<0x7E, "vpermt2q", VR512, memopv8i64, i512mem,
643 X86VPermv3, v8i64>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
644 defm VPERMT2PS : avx512_perm_3src<0x7F, "vpermt2ps", VR512, memopv16f32, i512mem,
645 X86VPermv3, v16f32>, EVEX_V512, EVEX_CD8<32, CD8VF>;
646 defm VPERMT2PD : avx512_perm_3src<0x7F, "vpermt2pd", VR512, memopv8f64, i512mem,
647 X86VPermv3, v8f64>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
649 def : Pat<(v16f32 (int_x86_avx512_mask_vpermt_ps_512 (v16i32 VR512:$idx),
650 (v16f32 VR512:$src1), (v16f32 VR512:$src2), (i16 -1))),
651 (VPERMT2PSrr VR512:$src1, VR512:$idx, VR512:$src2)>;
653 def : Pat<(v16i32 (int_x86_avx512_mask_vpermt_d_512 (v16i32 VR512:$idx),
654 (v16i32 VR512:$src1), (v16i32 VR512:$src2), (i16 -1))),
655 (VPERMT2Drr VR512:$src1, VR512:$idx, VR512:$src2)>;
657 def : Pat<(v8f64 (int_x86_avx512_mask_vpermt_pd_512 (v8i64 VR512:$idx),
658 (v8f64 VR512:$src1), (v8f64 VR512:$src2), (i8 -1))),
659 (VPERMT2PDrr VR512:$src1, VR512:$idx, VR512:$src2)>;
661 def : Pat<(v8i64 (int_x86_avx512_mask_vpermt_q_512 (v8i64 VR512:$idx),
662 (v8i64 VR512:$src1), (v8i64 VR512:$src2), (i8 -1))),
663 (VPERMT2Qrr VR512:$src1, VR512:$idx, VR512:$src2)>;
664 //===----------------------------------------------------------------------===//
665 // AVX-512 - BLEND using mask
667 multiclass avx512_blendmask<bits<8> opc, string OpcodeStr,
668 RegisterClass KRC, RegisterClass RC,
669 X86MemOperand x86memop, PatFrag mem_frag,
670 SDNode OpNode, ValueType vt> {
671 def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
672 (ins KRC:$mask, RC:$src1, RC:$src2),
673 !strconcat(OpcodeStr,
674 " \t{$src2, $src1, ${dst} {${mask}}|${dst} {${mask}}, $src1, $src2}"),
675 [(set RC:$dst, (OpNode KRC:$mask, (vt RC:$src2),
676 (vt RC:$src1)))]>, EVEX_4V, EVEX_K;
678 def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
679 (ins KRC:$mask, RC:$src1, x86memop:$src2),
680 !strconcat(OpcodeStr,
681 " \t{$src2, $src1, ${dst} {${mask}}|${dst} {${mask}}, $src1, $src2}"),
682 []>, EVEX_4V, EVEX_K;
685 let ExeDomain = SSEPackedSingle in
686 defm VBLENDMPSZ : avx512_blendmask<0x65, "vblendmps",
687 VK16WM, VR512, f512mem,
688 memopv16f32, vselect, v16f32>,
689 EVEX_CD8<32, CD8VF>, EVEX_V512;
690 let ExeDomain = SSEPackedDouble in
691 defm VBLENDMPDZ : avx512_blendmask<0x65, "vblendmpd",
692 VK8WM, VR512, f512mem,
693 memopv8f64, vselect, v8f64>,
694 VEX_W, EVEX_CD8<64, CD8VF>, EVEX_V512;
696 def : Pat<(v16f32 (int_x86_avx512_mask_blend_ps_512 (v16f32 VR512:$src1),
697 (v16f32 VR512:$src2), (i16 GR16:$mask))),
698 (VBLENDMPSZrr (COPY_TO_REGCLASS GR16:$mask, VK16WM),
699 VR512:$src1, VR512:$src2)>;
701 def : Pat<(v8f64 (int_x86_avx512_mask_blend_pd_512 (v8f64 VR512:$src1),
702 (v8f64 VR512:$src2), (i8 GR8:$mask))),
703 (VBLENDMPDZrr (COPY_TO_REGCLASS GR8:$mask, VK8WM),
704 VR512:$src1, VR512:$src2)>;
706 defm VPBLENDMDZ : avx512_blendmask<0x64, "vpblendmd",
707 VK16WM, VR512, f512mem,
708 memopv16i32, vselect, v16i32>,
709 EVEX_CD8<32, CD8VF>, EVEX_V512;
711 defm VPBLENDMQZ : avx512_blendmask<0x64, "vpblendmq",
712 VK8WM, VR512, f512mem,
713 memopv8i64, vselect, v8i64>,
714 VEX_W, EVEX_CD8<64, CD8VF>, EVEX_V512;
716 def : Pat<(v16i32 (int_x86_avx512_mask_blend_d_512 (v16i32 VR512:$src1),
717 (v16i32 VR512:$src2), (i16 GR16:$mask))),
718 (VPBLENDMDZrr (COPY_TO_REGCLASS GR16:$mask, VK16),
719 VR512:$src1, VR512:$src2)>;
721 def : Pat<(v8i64 (int_x86_avx512_mask_blend_q_512 (v8i64 VR512:$src1),
722 (v8i64 VR512:$src2), (i8 GR8:$mask))),
723 (VPBLENDMQZrr (COPY_TO_REGCLASS GR8:$mask, VK8),
724 VR512:$src1, VR512:$src2)>;
726 let Predicates = [HasAVX512] in {
727 def : Pat<(v8f32 (vselect (v8i1 VK8WM:$mask), (v8f32 VR256X:$src1),
728 (v8f32 VR256X:$src2))),
730 (v16f32 (VBLENDMPSZrr (COPY_TO_REGCLASS VK8WM:$mask, VK16WM),
731 (v16f32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)),
732 (v16f32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)))), sub_ymm)>;
734 def : Pat<(v8i32 (vselect (v8i1 VK8WM:$mask), (v8i32 VR256X:$src1),
735 (v8i32 VR256X:$src2))),
737 (v16i32 (VPBLENDMDZrr (COPY_TO_REGCLASS VK8WM:$mask, VK16WM),
738 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)),
739 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)))), sub_ymm)>;
741 //===----------------------------------------------------------------------===//
742 // Compare Instructions
743 //===----------------------------------------------------------------------===//
745 // avx512_cmp_scalar - AVX512 CMPSS and CMPSD
746 multiclass avx512_cmp_scalar<RegisterClass RC, X86MemOperand x86memop,
747 Operand CC, SDNode OpNode, ValueType VT,
748 PatFrag ld_frag, string asm, string asm_alt> {
749 def rr : AVX512Ii8<0xC2, MRMSrcReg,
750 (outs VK1:$dst), (ins RC:$src1, RC:$src2, CC:$cc), asm,
751 [(set VK1:$dst, (OpNode (VT RC:$src1), RC:$src2, imm:$cc))],
752 IIC_SSE_ALU_F32S_RR>, EVEX_4V;
753 def rm : AVX512Ii8<0xC2, MRMSrcMem,
754 (outs VK1:$dst), (ins RC:$src1, x86memop:$src2, CC:$cc), asm,
755 [(set VK1:$dst, (OpNode (VT RC:$src1),
756 (ld_frag addr:$src2), imm:$cc))], IIC_SSE_ALU_F32P_RM>, EVEX_4V;
757 let isAsmParserOnly = 1, hasSideEffects = 0 in {
758 def rri_alt : AVX512Ii8<0xC2, MRMSrcReg,
759 (outs VK1:$dst), (ins RC:$src1, RC:$src2, i8imm:$cc),
760 asm_alt, [], IIC_SSE_ALU_F32S_RR>, EVEX_4V;
761 def rmi_alt : AVX512Ii8<0xC2, MRMSrcMem,
762 (outs VK1:$dst), (ins RC:$src1, x86memop:$src2, i8imm:$cc),
763 asm_alt, [], IIC_SSE_ALU_F32P_RM>, EVEX_4V;
767 let Predicates = [HasAVX512] in {
768 defm VCMPSSZ : avx512_cmp_scalar<FR32X, f32mem, AVXCC, X86cmpms, f32, loadf32,
769 "vcmp${cc}ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
770 "vcmpss\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}">,
772 defm VCMPSDZ : avx512_cmp_scalar<FR64X, f64mem, AVXCC, X86cmpms, f64, loadf64,
773 "vcmp${cc}sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
774 "vcmpsd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}">,
778 multiclass avx512_icmp_packed<bits<8> opc, string OpcodeStr, RegisterClass KRC,
779 RegisterClass RC, X86MemOperand x86memop, PatFrag memop_frag,
780 SDNode OpNode, ValueType vt> {
781 def rr : AVX512BI<opc, MRMSrcReg,
782 (outs KRC:$dst), (ins RC:$src1, RC:$src2),
783 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
784 [(set KRC:$dst, (OpNode (vt RC:$src1), (vt RC:$src2)))],
785 IIC_SSE_ALU_F32P_RR>, EVEX_4V;
786 def rm : AVX512BI<opc, MRMSrcMem,
787 (outs KRC:$dst), (ins RC:$src1, x86memop:$src2),
788 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
789 [(set KRC:$dst, (OpNode (vt RC:$src1), (memop_frag addr:$src2)))],
790 IIC_SSE_ALU_F32P_RM>, EVEX_4V;
793 defm VPCMPEQDZ : avx512_icmp_packed<0x76, "vpcmpeqd", VK16, VR512, i512mem,
794 memopv16i32, X86pcmpeqm, v16i32>, EVEX_V512,
796 defm VPCMPEQQZ : avx512_icmp_packed<0x29, "vpcmpeqq", VK8, VR512, i512mem,
797 memopv8i64, X86pcmpeqm, v8i64>, T8PD, EVEX_V512,
798 VEX_W, EVEX_CD8<64, CD8VF>;
800 defm VPCMPGTDZ : avx512_icmp_packed<0x66, "vpcmpgtd", VK16, VR512, i512mem,
801 memopv16i32, X86pcmpgtm, v16i32>, EVEX_V512,
803 defm VPCMPGTQZ : avx512_icmp_packed<0x37, "vpcmpgtq", VK8, VR512, i512mem,
804 memopv8i64, X86pcmpgtm, v8i64>, T8PD, EVEX_V512,
805 VEX_W, EVEX_CD8<64, CD8VF>;
807 def : Pat<(v8i1 (X86pcmpgtm (v8i32 VR256X:$src1), (v8i32 VR256X:$src2))),
808 (COPY_TO_REGCLASS (VPCMPGTDZrr
809 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)),
810 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm))), VK8)>;
812 def : Pat<(v8i1 (X86pcmpeqm (v8i32 VR256X:$src1), (v8i32 VR256X:$src2))),
813 (COPY_TO_REGCLASS (VPCMPEQDZrr
814 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)),
815 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm))), VK8)>;
817 multiclass avx512_icmp_cc<bits<8> opc, RegisterClass KRC,
818 RegisterClass RC, X86MemOperand x86memop, PatFrag memop_frag,
819 SDNode OpNode, ValueType vt, Operand CC, string asm,
821 def rri : AVX512AIi8<opc, MRMSrcReg,
822 (outs KRC:$dst), (ins RC:$src1, RC:$src2, CC:$cc), asm,
823 [(set KRC:$dst, (OpNode (vt RC:$src1), (vt RC:$src2), imm:$cc))],
824 IIC_SSE_ALU_F32P_RR>, EVEX_4V;
825 def rmi : AVX512AIi8<opc, MRMSrcMem,
826 (outs KRC:$dst), (ins RC:$src1, x86memop:$src2, CC:$cc), asm,
827 [(set KRC:$dst, (OpNode (vt RC:$src1), (memop_frag addr:$src2),
828 imm:$cc))], IIC_SSE_ALU_F32P_RM>, EVEX_4V;
829 // Accept explicit immediate argument form instead of comparison code.
830 let isAsmParserOnly = 1, hasSideEffects = 0 in {
831 def rri_alt : AVX512AIi8<opc, MRMSrcReg,
832 (outs KRC:$dst), (ins RC:$src1, RC:$src2, i8imm:$cc),
833 asm_alt, [], IIC_SSE_ALU_F32P_RR>, EVEX_4V;
834 def rmi_alt : AVX512AIi8<opc, MRMSrcMem,
835 (outs KRC:$dst), (ins RC:$src1, x86memop:$src2, i8imm:$cc),
836 asm_alt, [], IIC_SSE_ALU_F32P_RM>, EVEX_4V;
840 defm VPCMPDZ : avx512_icmp_cc<0x1F, VK16, VR512, i512mem, memopv16i32,
841 X86cmpm, v16i32, AVXCC,
842 "vpcmp${cc}d\t{$src2, $src1, $dst|$dst, $src1, $src2}",
843 "vpcmpd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}">,
844 EVEX_V512, EVEX_CD8<32, CD8VF>;
845 defm VPCMPUDZ : avx512_icmp_cc<0x1E, VK16, VR512, i512mem, memopv16i32,
846 X86cmpmu, v16i32, AVXCC,
847 "vpcmp${cc}ud\t{$src2, $src1, $dst|$dst, $src1, $src2}",
848 "vpcmpud\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}">,
849 EVEX_V512, EVEX_CD8<32, CD8VF>;
851 defm VPCMPQZ : avx512_icmp_cc<0x1F, VK8, VR512, i512mem, memopv8i64,
852 X86cmpm, v8i64, AVXCC,
853 "vpcmp${cc}q\t{$src2, $src1, $dst|$dst, $src1, $src2}",
854 "vpcmpq\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}">,
855 VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
856 defm VPCMPUQZ : avx512_icmp_cc<0x1E, VK8, VR512, i512mem, memopv8i64,
857 X86cmpmu, v8i64, AVXCC,
858 "vpcmp${cc}uq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
859 "vpcmpuq\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}">,
860 VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
862 // avx512_cmp_packed - compare packed instructions
863 multiclass avx512_cmp_packed<RegisterClass KRC, RegisterClass RC,
864 X86MemOperand x86memop, ValueType vt,
865 string suffix, Domain d> {
866 def rri : AVX512PIi8<0xC2, MRMSrcReg,
867 (outs KRC:$dst), (ins RC:$src1, RC:$src2, AVXCC:$cc),
868 !strconcat("vcmp${cc}", suffix,
869 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
870 [(set KRC:$dst, (X86cmpm (vt RC:$src1), (vt RC:$src2), imm:$cc))], d>;
871 def rrib: AVX512PIi8<0xC2, MRMSrcReg,
872 (outs KRC:$dst), (ins RC:$src1, RC:$src2, AVXCC:$cc),
873 !strconcat("vcmp${cc}", suffix,
874 " \t{{sae}, $src2, $src1, $dst|$dst, $src1, $src2, {sae}}"),
876 def rmi : AVX512PIi8<0xC2, MRMSrcMem,
877 (outs KRC:$dst), (ins RC:$src1, x86memop:$src2, AVXCC:$cc),
878 !strconcat("vcmp${cc}", suffix,
879 " \t{$src2, $src1, $dst|$dst, $src1, $src2, $cc}"),
881 (X86cmpm (vt RC:$src1), (memop addr:$src2), imm:$cc))], d>;
883 // Accept explicit immediate argument form instead of comparison code.
884 let isAsmParserOnly = 1, hasSideEffects = 0 in {
885 def rri_alt : AVX512PIi8<0xC2, MRMSrcReg,
886 (outs KRC:$dst), (ins RC:$src1, RC:$src2, i8imm:$cc),
887 !strconcat("vcmp", suffix,
888 " \t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}"), [], d>;
889 def rmi_alt : AVX512PIi8<0xC2, MRMSrcMem,
890 (outs KRC:$dst), (ins RC:$src1, x86memop:$src2, i8imm:$cc),
891 !strconcat("vcmp", suffix,
892 " \t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}"), [], d>;
896 defm VCMPPSZ : avx512_cmp_packed<VK16, VR512, f512mem, v16f32,
897 "ps", SSEPackedSingle>, PS, EVEX_4V, EVEX_V512,
899 defm VCMPPDZ : avx512_cmp_packed<VK8, VR512, f512mem, v8f64,
900 "pd", SSEPackedDouble>, PD, EVEX_4V, VEX_W, EVEX_V512,
903 def : Pat<(v8i1 (X86cmpm (v8f32 VR256X:$src1), (v8f32 VR256X:$src2), imm:$cc)),
904 (COPY_TO_REGCLASS (VCMPPSZrri
905 (v16f32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)),
906 (v16f32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)),
908 def : Pat<(v8i1 (X86cmpm (v8i32 VR256X:$src1), (v8i32 VR256X:$src2), imm:$cc)),
909 (COPY_TO_REGCLASS (VPCMPDZrri
910 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)),
911 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)),
913 def : Pat<(v8i1 (X86cmpmu (v8i32 VR256X:$src1), (v8i32 VR256X:$src2), imm:$cc)),
914 (COPY_TO_REGCLASS (VPCMPUDZrri
915 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)),
916 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)),
919 def : Pat<(i16 (int_x86_avx512_mask_cmp_ps_512 (v16f32 VR512:$src1),
920 (v16f32 VR512:$src2), imm:$cc, (i16 -1),
922 (COPY_TO_REGCLASS (VCMPPSZrrib VR512:$src1, VR512:$src2,
923 (I8Imm imm:$cc)), GR16)>;
925 def : Pat<(i8 (int_x86_avx512_mask_cmp_pd_512 (v8f64 VR512:$src1),
926 (v8f64 VR512:$src2), imm:$cc, (i8 -1),
928 (COPY_TO_REGCLASS (VCMPPDZrrib VR512:$src1, VR512:$src2,
929 (I8Imm imm:$cc)), GR8)>;
931 def : Pat<(i16 (int_x86_avx512_mask_cmp_ps_512 (v16f32 VR512:$src1),
932 (v16f32 VR512:$src2), imm:$cc, (i16 -1),
934 (COPY_TO_REGCLASS (VCMPPSZrri VR512:$src1, VR512:$src2,
935 (I8Imm imm:$cc)), GR16)>;
937 def : Pat<(i8 (int_x86_avx512_mask_cmp_pd_512 (v8f64 VR512:$src1),
938 (v8f64 VR512:$src2), imm:$cc, (i8 -1),
940 (COPY_TO_REGCLASS (VCMPPDZrri VR512:$src1, VR512:$src2,
941 (I8Imm imm:$cc)), GR8)>;
943 // Mask register copy, including
944 // - copy between mask registers
945 // - load/store mask registers
946 // - copy from GPR to mask register and vice versa
948 multiclass avx512_mask_mov<bits<8> opc_kk, bits<8> opc_km, bits<8> opc_mk,
949 string OpcodeStr, RegisterClass KRC,
950 ValueType vt, X86MemOperand x86memop> {
951 let hasSideEffects = 0 in {
952 def kk : I<opc_kk, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src),
953 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"), []>;
955 def km : I<opc_km, MRMSrcMem, (outs KRC:$dst), (ins x86memop:$src),
956 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
957 [(set KRC:$dst, (vt (load addr:$src)))]>;
959 def mk : I<opc_mk, MRMDestMem, (outs), (ins x86memop:$dst, KRC:$src),
960 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"), []>;
964 multiclass avx512_mask_mov_gpr<bits<8> opc_kr, bits<8> opc_rk,
966 RegisterClass KRC, RegisterClass GRC> {
967 let hasSideEffects = 0 in {
968 def kr : I<opc_kr, MRMSrcReg, (outs KRC:$dst), (ins GRC:$src),
969 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"), []>;
970 def rk : I<opc_rk, MRMSrcReg, (outs GRC:$dst), (ins KRC:$src),
971 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"), []>;
975 let Predicates = [HasAVX512] in {
976 defm KMOVW : avx512_mask_mov<0x90, 0x90, 0x91, "kmovw", VK16, v16i1, i16mem>,
978 defm KMOVW : avx512_mask_mov_gpr<0x92, 0x93, "kmovw", VK16, GR32>,
982 let Predicates = [HasAVX512] in {
983 // GR16 from/to 16-bit mask
984 def : Pat<(v16i1 (bitconvert (i16 GR16:$src))),
985 (KMOVWkr (SUBREG_TO_REG (i32 0), GR16:$src, sub_16bit))>;
986 def : Pat<(i16 (bitconvert (v16i1 VK16:$src))),
987 (EXTRACT_SUBREG (KMOVWrk VK16:$src), sub_16bit)>;
989 // Store kreg in memory
990 def : Pat<(store (v16i1 VK16:$src), addr:$dst),
991 (KMOVWmk addr:$dst, VK16:$src)>;
993 def : Pat<(store VK8:$src, addr:$dst),
994 (KMOVWmk addr:$dst, (COPY_TO_REGCLASS VK8:$src, VK16))>;
996 def : Pat<(i1 (load addr:$src)),
997 (COPY_TO_REGCLASS (KMOVWkm addr:$src), VK1)>;
999 def : Pat<(v8i1 (load addr:$src)),
1000 (COPY_TO_REGCLASS (KMOVWkm addr:$src), VK8)>;
1002 def : Pat<(i1 (trunc (i32 GR32:$src))),
1003 (COPY_TO_REGCLASS (KMOVWkr (AND32ri $src, (i32 1))), VK1)>;
1005 def : Pat<(i1 (trunc (i8 GR8:$src))),
1007 (KMOVWkr (AND32ri (SUBREG_TO_REG (i32 0), GR8:$src, sub_8bit), (i32 1))),
1009 def : Pat<(i1 (trunc (i16 GR16:$src))),
1011 (KMOVWkr (AND32ri (SUBREG_TO_REG (i32 0), $src, sub_16bit), (i32 1))),
1014 def : Pat<(i32 (zext VK1:$src)),
1015 (AND32ri (KMOVWrk (COPY_TO_REGCLASS VK1:$src, VK16)), (i32 1))>;
1016 def : Pat<(i8 (zext VK1:$src)),
1019 (COPY_TO_REGCLASS VK1:$src, VK16)), (i32 1)), sub_8bit)>;
1020 def : Pat<(i64 (zext VK1:$src)),
1021 (AND64ri8 (SUBREG_TO_REG (i64 0),
1022 (KMOVWrk (COPY_TO_REGCLASS VK1:$src, VK16)), sub_32bit), (i64 1))>;
1023 def : Pat<(i16 (zext VK1:$src)),
1025 (AND32ri (KMOVWrk (COPY_TO_REGCLASS VK1:$src, VK16)), (i32 1)),
1027 def : Pat<(v16i1 (scalar_to_vector VK1:$src)),
1028 (COPY_TO_REGCLASS VK1:$src, VK16)>;
1029 def : Pat<(v8i1 (scalar_to_vector VK1:$src)),
1030 (COPY_TO_REGCLASS VK1:$src, VK8)>;
1032 // With AVX-512 only, 8-bit mask is promoted to 16-bit mask.
1033 let Predicates = [HasAVX512] in {
1034 // GR from/to 8-bit mask without native support
1035 def : Pat<(v8i1 (bitconvert (i8 GR8:$src))),
1037 (KMOVWkr (SUBREG_TO_REG (i32 0), GR8:$src, sub_8bit)),
1039 def : Pat<(i8 (bitconvert (v8i1 VK8:$src))),
1041 (KMOVWrk (COPY_TO_REGCLASS VK8:$src, VK16)),
1044 def : Pat<(i1 (X86Vextract VK16:$src, (iPTR 0))),
1045 (COPY_TO_REGCLASS VK16:$src, VK1)>;
1046 def : Pat<(i1 (X86Vextract VK8:$src, (iPTR 0))),
1047 (COPY_TO_REGCLASS VK8:$src, VK1)>;
1051 // Mask unary operation
1053 multiclass avx512_mask_unop<bits<8> opc, string OpcodeStr,
1054 RegisterClass KRC, SDPatternOperator OpNode> {
1055 let Predicates = [HasAVX512] in
1056 def rr : I<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src),
1057 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
1058 [(set KRC:$dst, (OpNode KRC:$src))]>;
1061 multiclass avx512_mask_unop_w<bits<8> opc, string OpcodeStr,
1062 SDPatternOperator OpNode> {
1063 defm W : avx512_mask_unop<opc, !strconcat(OpcodeStr, "w"), VK16, OpNode>,
1067 defm KNOT : avx512_mask_unop_w<0x44, "knot", not>;
1069 multiclass avx512_mask_unop_int<string IntName, string InstName> {
1070 let Predicates = [HasAVX512] in
1071 def : Pat<(!cast<Intrinsic>("int_x86_avx512_"##IntName##"_w")
1073 (COPY_TO_REGCLASS (!cast<Instruction>(InstName##"Wrr")
1074 (v16i1 (COPY_TO_REGCLASS GR16:$src, VK16))), GR16)>;
1076 defm : avx512_mask_unop_int<"knot", "KNOT">;
1078 def : Pat<(xor VK16:$src1, (v16i1 immAllOnesV)), (KNOTWrr VK16:$src1)>;
1079 def : Pat<(xor VK8:$src1, (v8i1 immAllOnesV)),
1080 (COPY_TO_REGCLASS (KNOTWrr (COPY_TO_REGCLASS VK8:$src1, VK16)), VK8)>;
1082 // With AVX-512, 8-bit mask is promoted to 16-bit mask.
1083 def : Pat<(not VK8:$src),
1085 (KNOTWrr (COPY_TO_REGCLASS VK8:$src, VK16)), VK8)>;
1087 // Mask binary operation
1088 // - KAND, KANDN, KOR, KXNOR, KXOR
1089 multiclass avx512_mask_binop<bits<8> opc, string OpcodeStr,
1090 RegisterClass KRC, SDPatternOperator OpNode> {
1091 let Predicates = [HasAVX512] in
1092 def rr : I<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src1, KRC:$src2),
1093 !strconcat(OpcodeStr,
1094 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1095 [(set KRC:$dst, (OpNode KRC:$src1, KRC:$src2))]>;
1098 multiclass avx512_mask_binop_w<bits<8> opc, string OpcodeStr,
1099 SDPatternOperator OpNode> {
1100 defm W : avx512_mask_binop<opc, !strconcat(OpcodeStr, "w"), VK16, OpNode>,
1104 def andn : PatFrag<(ops node:$i0, node:$i1), (and (not node:$i0), node:$i1)>;
1105 def xnor : PatFrag<(ops node:$i0, node:$i1), (not (xor node:$i0, node:$i1))>;
1107 let isCommutable = 1 in {
1108 defm KAND : avx512_mask_binop_w<0x41, "kand", and>;
1109 let isCommutable = 0 in
1110 defm KANDN : avx512_mask_binop_w<0x42, "kandn", andn>;
1111 defm KOR : avx512_mask_binop_w<0x45, "kor", or>;
1112 defm KXNOR : avx512_mask_binop_w<0x46, "kxnor", xnor>;
1113 defm KXOR : avx512_mask_binop_w<0x47, "kxor", xor>;
1116 def : Pat<(xor VK1:$src1, VK1:$src2),
1117 (COPY_TO_REGCLASS (KXORWrr (COPY_TO_REGCLASS VK1:$src1, VK16),
1118 (COPY_TO_REGCLASS VK1:$src2, VK16)), VK1)>;
1120 def : Pat<(or VK1:$src1, VK1:$src2),
1121 (COPY_TO_REGCLASS (KORWrr (COPY_TO_REGCLASS VK1:$src1, VK16),
1122 (COPY_TO_REGCLASS VK1:$src2, VK16)), VK1)>;
1124 def : Pat<(and VK1:$src1, VK1:$src2),
1125 (COPY_TO_REGCLASS (KANDWrr (COPY_TO_REGCLASS VK1:$src1, VK16),
1126 (COPY_TO_REGCLASS VK1:$src2, VK16)), VK1)>;
1128 multiclass avx512_mask_binop_int<string IntName, string InstName> {
1129 let Predicates = [HasAVX512] in
1130 def : Pat<(!cast<Intrinsic>("int_x86_avx512_"##IntName##"_w")
1131 (i16 GR16:$src1), (i16 GR16:$src2)),
1132 (COPY_TO_REGCLASS (!cast<Instruction>(InstName##"Wrr")
1133 (v16i1 (COPY_TO_REGCLASS GR16:$src1, VK16)),
1134 (v16i1 (COPY_TO_REGCLASS GR16:$src2, VK16))), GR16)>;
1137 defm : avx512_mask_binop_int<"kand", "KAND">;
1138 defm : avx512_mask_binop_int<"kandn", "KANDN">;
1139 defm : avx512_mask_binop_int<"kor", "KOR">;
1140 defm : avx512_mask_binop_int<"kxnor", "KXNOR">;
1141 defm : avx512_mask_binop_int<"kxor", "KXOR">;
1143 // With AVX-512, 8-bit mask is promoted to 16-bit mask.
1144 multiclass avx512_binop_pat<SDPatternOperator OpNode, Instruction Inst> {
1145 let Predicates = [HasAVX512] in
1146 def : Pat<(OpNode VK8:$src1, VK8:$src2),
1148 (Inst (COPY_TO_REGCLASS VK8:$src1, VK16),
1149 (COPY_TO_REGCLASS VK8:$src2, VK16)), VK8)>;
1152 defm : avx512_binop_pat<and, KANDWrr>;
1153 defm : avx512_binop_pat<andn, KANDNWrr>;
1154 defm : avx512_binop_pat<or, KORWrr>;
1155 defm : avx512_binop_pat<xnor, KXNORWrr>;
1156 defm : avx512_binop_pat<xor, KXORWrr>;
1159 multiclass avx512_mask_unpck<bits<8> opc, string OpcodeStr,
1160 RegisterClass KRC> {
1161 let Predicates = [HasAVX512] in
1162 def rr : I<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src1, KRC:$src2),
1163 !strconcat(OpcodeStr,
1164 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1167 multiclass avx512_mask_unpck_bw<bits<8> opc, string OpcodeStr> {
1168 defm BW : avx512_mask_unpck<opc, !strconcat(OpcodeStr, "bw"), VK16>,
1172 defm KUNPCK : avx512_mask_unpck_bw<0x4b, "kunpck">;
1173 def : Pat<(v16i1 (concat_vectors (v8i1 VK8:$src1), (v8i1 VK8:$src2))),
1174 (KUNPCKBWrr (COPY_TO_REGCLASS VK8:$src2, VK16),
1175 (COPY_TO_REGCLASS VK8:$src1, VK16))>;
1178 multiclass avx512_mask_unpck_int<string IntName, string InstName> {
1179 let Predicates = [HasAVX512] in
1180 def : Pat<(!cast<Intrinsic>("int_x86_avx512_"##IntName##"_bw")
1181 (i16 GR16:$src1), (i16 GR16:$src2)),
1182 (COPY_TO_REGCLASS (!cast<Instruction>(InstName##"BWrr")
1183 (v16i1 (COPY_TO_REGCLASS GR16:$src1, VK16)),
1184 (v16i1 (COPY_TO_REGCLASS GR16:$src2, VK16))), GR16)>;
1186 defm : avx512_mask_unpck_int<"kunpck", "KUNPCK">;
1189 multiclass avx512_mask_testop<bits<8> opc, string OpcodeStr, RegisterClass KRC,
1191 let Predicates = [HasAVX512], Defs = [EFLAGS] in
1192 def rr : I<opc, MRMSrcReg, (outs), (ins KRC:$src1, KRC:$src2),
1193 !strconcat(OpcodeStr, " \t{$src2, $src1|$src1, $src2}"),
1194 [(set EFLAGS, (OpNode KRC:$src1, KRC:$src2))]>;
1197 multiclass avx512_mask_testop_w<bits<8> opc, string OpcodeStr, SDNode OpNode> {
1198 defm W : avx512_mask_testop<opc, !strconcat(OpcodeStr, "w"), VK16, OpNode>,
1202 defm KORTEST : avx512_mask_testop_w<0x98, "kortest", X86kortest>;
1204 def : Pat<(X86cmp VK1:$src1, (i1 0)),
1205 (KORTESTWrr (COPY_TO_REGCLASS VK1:$src1, VK16),
1206 (COPY_TO_REGCLASS VK1:$src1, VK16))>;
1209 multiclass avx512_mask_shiftop<bits<8> opc, string OpcodeStr, RegisterClass KRC,
1211 let Predicates = [HasAVX512] in
1212 def ri : Ii8<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src, i8imm:$imm),
1213 !strconcat(OpcodeStr,
1214 " \t{$imm, $src, $dst|$dst, $src, $imm}"),
1215 [(set KRC:$dst, (OpNode KRC:$src, (i8 imm:$imm)))]>;
1218 multiclass avx512_mask_shiftop_w<bits<8> opc1, bits<8> opc2, string OpcodeStr,
1220 defm W : avx512_mask_shiftop<opc1, !strconcat(OpcodeStr, "w"), VK16, OpNode>,
1224 defm KSHIFTL : avx512_mask_shiftop_w<0x32, 0x33, "kshiftl", X86vshli>;
1225 defm KSHIFTR : avx512_mask_shiftop_w<0x30, 0x31, "kshiftr", X86vsrli>;
1227 // Mask setting all 0s or 1s
1228 multiclass avx512_mask_setop<RegisterClass KRC, ValueType VT, PatFrag Val> {
1229 let Predicates = [HasAVX512] in
1230 let isReMaterializable = 1, isAsCheapAsAMove = 1, isPseudo = 1 in
1231 def #NAME# : I<0, Pseudo, (outs KRC:$dst), (ins), "",
1232 [(set KRC:$dst, (VT Val))]>;
1235 multiclass avx512_mask_setop_w<PatFrag Val> {
1236 defm B : avx512_mask_setop<VK8, v8i1, Val>;
1237 defm W : avx512_mask_setop<VK16, v16i1, Val>;
1240 defm KSET0 : avx512_mask_setop_w<immAllZerosV>;
1241 defm KSET1 : avx512_mask_setop_w<immAllOnesV>;
1243 // With AVX-512 only, 8-bit mask is promoted to 16-bit mask.
1244 let Predicates = [HasAVX512] in {
1245 def : Pat<(v8i1 immAllZerosV), (COPY_TO_REGCLASS (KSET0W), VK8)>;
1246 def : Pat<(v8i1 immAllOnesV), (COPY_TO_REGCLASS (KSET1W), VK8)>;
1247 def : Pat<(i1 0), (COPY_TO_REGCLASS (KSET0W), VK1)>;
1248 def : Pat<(i1 1), (COPY_TO_REGCLASS (KSET1W), VK1)>;
1249 def : Pat<(i1 -1), (COPY_TO_REGCLASS (KSET1W), VK1)>;
1251 def : Pat<(v8i1 (extract_subvector (v16i1 VK16:$src), (iPTR 0))),
1252 (v8i1 (COPY_TO_REGCLASS VK16:$src, VK8))>;
1254 def : Pat<(v16i1 (insert_subvector undef, (v8i1 VK8:$src), (iPTR 0))),
1255 (v16i1 (COPY_TO_REGCLASS VK8:$src, VK16))>;
1257 def : Pat<(v8i1 (extract_subvector (v16i1 VK16:$src), (iPTR 8))),
1258 (v8i1 (COPY_TO_REGCLASS (KSHIFTRWri VK16:$src, (i8 8)), VK8))>;
1260 def : Pat<(v8i1 (X86vshli VK8:$src, (i8 imm:$imm))),
1261 (v8i1 (COPY_TO_REGCLASS (KSHIFTLWri (COPY_TO_REGCLASS VK8:$src, VK16), (I8Imm $imm)), VK8))>;
1263 def : Pat<(v8i1 (X86vsrli VK8:$src, (i8 imm:$imm))),
1264 (v8i1 (COPY_TO_REGCLASS (KSHIFTRWri (COPY_TO_REGCLASS VK8:$src, VK16), (I8Imm $imm)), VK8))>;
1265 //===----------------------------------------------------------------------===//
1266 // AVX-512 - Aligned and unaligned load and store
1269 multiclass avx512_load<bits<8> opc, RegisterClass RC, RegisterClass KRC,
1270 X86MemOperand x86memop, PatFrag ld_frag,
1271 string asm, Domain d,
1272 ValueType vt, bit IsReMaterializable = 1> {
1273 let hasSideEffects = 0 in {
1274 def rr : AVX512PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
1275 !strconcat(asm, " \t{$src, $dst|$dst, $src}"), [], d>,
1277 def rrkz : AVX512PI<opc, MRMSrcReg, (outs RC:$dst), (ins KRC:$mask, RC:$src),
1279 " \t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
1280 [], d>, EVEX, EVEX_KZ;
1282 let canFoldAsLoad = 1, isReMaterializable = IsReMaterializable in
1283 def rm : AVX512PI<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
1284 !strconcat(asm, " \t{$src, $dst|$dst, $src}"),
1285 [(set (vt RC:$dst), (ld_frag addr:$src))], d>, EVEX;
1286 let Constraints = "$src1 = $dst", hasSideEffects = 0 in {
1287 def rrk : AVX512PI<opc, MRMSrcReg, (outs RC:$dst),
1288 (ins RC:$src1, KRC:$mask, RC:$src2),
1290 " \t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"), [], d>,
1293 def rmk : AVX512PI<opc, MRMSrcMem, (outs RC:$dst),
1294 (ins RC:$src1, KRC:$mask, x86memop:$src2),
1296 " \t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"),
1297 [], d>, EVEX, EVEX_K;
1300 def rmkz : AVX512PI<opc, MRMSrcMem, (outs RC:$dst),
1301 (ins KRC:$mask, x86memop:$src2),
1303 " \t{$src2, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src2}"),
1304 [], d>, EVEX, EVEX_KZ;
1307 multiclass avx512_store<bits<8> opc, RegisterClass RC, RegisterClass KRC,
1308 X86MemOperand x86memop, PatFrag store_frag,
1309 string asm, Domain d, ValueType vt> {
1310 let isAsmParserOnly = 1, hasSideEffects = 0 in {
1311 def rr_alt : AVX512PI<opc, MRMDestReg, (outs RC:$dst), (ins RC:$src),
1312 !strconcat(asm, " \t{$src, $dst|$dst, $src}"), [], d>,
1314 let Constraints = "$src1 = $dst" in
1315 def alt_rrk : AVX512PI<opc, MRMDestReg, (outs RC:$dst),
1316 (ins RC:$src1, KRC:$mask, RC:$src2),
1318 " \t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"), [], d>,
1320 def alt_rrkz : AVX512PI<opc, MRMDestReg, (outs RC:$dst),
1321 (ins KRC:$mask, RC:$src),
1323 " \t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
1324 [], d>, EVEX, EVEX_KZ;
1326 let mayStore = 1 in {
1327 def mr : AVX512PI<opc, MRMDestMem, (outs), (ins x86memop:$dst, RC:$src),
1328 !strconcat(asm, " \t{$src, $dst|$dst, $src}"),
1329 [(store_frag (vt RC:$src), addr:$dst)], d>, EVEX;
1330 def mrk : AVX512PI<opc, MRMDestMem, (outs),
1331 (ins x86memop:$dst, KRC:$mask, RC:$src),
1333 " \t{$src, ${dst} {${mask}}|${dst} {${mask}}, $src}"),
1334 [], d>, EVEX, EVEX_K;
1335 def mrkz : AVX512PI<opc, MRMDestMem, (outs),
1336 (ins x86memop:$dst, KRC:$mask, RC:$src),
1338 " \t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
1339 [], d>, EVEX, EVEX_KZ;
1343 defm VMOVAPSZ : avx512_load<0x28, VR512, VK16WM, f512mem, alignedloadv16f32,
1344 "vmovaps", SSEPackedSingle, v16f32>,
1345 avx512_store<0x29, VR512, VK16WM, f512mem, alignedstore512,
1346 "vmovaps", SSEPackedSingle, v16f32>,
1347 PS, EVEX_V512, EVEX_CD8<32, CD8VF>;
1348 defm VMOVAPDZ : avx512_load<0x28, VR512, VK8WM, f512mem, alignedloadv8f64,
1349 "vmovapd", SSEPackedDouble, v8f64>,
1350 avx512_store<0x29, VR512, VK8WM, f512mem, alignedstore512,
1351 "vmovapd", SSEPackedDouble, v8f64>,
1352 PD, EVEX_V512, VEX_W,
1353 EVEX_CD8<64, CD8VF>;
1354 defm VMOVUPSZ : avx512_load<0x10, VR512, VK16WM, f512mem, loadv16f32,
1355 "vmovups", SSEPackedSingle, v16f32>,
1356 avx512_store<0x11, VR512, VK16WM, f512mem, store,
1357 "vmovups", SSEPackedSingle, v16f32>,
1358 PS, EVEX_V512, EVEX_CD8<32, CD8VF>;
1359 defm VMOVUPDZ : avx512_load<0x10, VR512, VK8WM, f512mem, loadv8f64,
1360 "vmovupd", SSEPackedDouble, v8f64, 0>,
1361 avx512_store<0x11, VR512, VK8WM, f512mem, store,
1362 "vmovupd", SSEPackedDouble, v8f64>,
1363 PD, EVEX_V512, VEX_W,
1364 EVEX_CD8<64, CD8VF>;
1365 def: Pat<(v8f64 (int_x86_avx512_mask_loadu_pd_512 addr:$ptr,
1366 (bc_v8f64 (v16i32 immAllZerosV)), GR8:$mask)),
1367 (VMOVUPDZrmkz (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)), addr:$ptr)>;
1369 def: Pat<(v16f32 (int_x86_avx512_mask_loadu_ps_512 addr:$ptr,
1370 (bc_v16f32 (v16i32 immAllZerosV)), GR16:$mask)),
1371 (VMOVUPSZrmkz (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)), addr:$ptr)>;
1373 def: Pat<(int_x86_avx512_mask_storeu_ps_512 addr:$ptr, (v16f32 VR512:$src),
1375 (VMOVUPSZmrk addr:$ptr, (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)),
1377 def: Pat<(int_x86_avx512_mask_storeu_pd_512 addr:$ptr, (v8f64 VR512:$src),
1379 (VMOVUPDZmrk addr:$ptr, (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)),
1382 defm VMOVDQA32: avx512_load<0x6F, VR512, VK16WM, i512mem, alignedloadv16i32,
1383 "vmovdqa32", SSEPackedInt, v16i32>,
1384 avx512_store<0x7F, VR512, VK16WM, i512mem, alignedstore512,
1385 "vmovdqa32", SSEPackedInt, v16i32>,
1386 PD, EVEX_V512, EVEX_CD8<32, CD8VF>;
1387 defm VMOVDQA64: avx512_load<0x6F, VR512, VK8WM, i512mem, alignedloadv8i64,
1388 "vmovdqa64", SSEPackedInt, v8i64>,
1389 avx512_store<0x7F, VR512, VK8WM, i512mem, alignedstore512,
1390 "vmovdqa64", SSEPackedInt, v8i64>,
1391 PD, VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
1392 defm VMOVDQU32: avx512_load<0x6F, VR512, VK16WM, i512mem, load,
1393 "vmovdqu32", SSEPackedInt, v16i32>,
1394 avx512_store<0x7F, VR512, VK16WM, i512mem, store,
1395 "vmovdqu32", SSEPackedInt, v16i32>,
1396 XS, EVEX_V512, EVEX_CD8<32, CD8VF>;
1397 defm VMOVDQU64: avx512_load<0x6F, VR512, VK8WM, i512mem, load,
1398 "vmovdqu64", SSEPackedInt, v8i64>,
1399 avx512_store<0x7F, VR512, VK8WM, i512mem, store,
1400 "vmovdqu64", SSEPackedInt, v8i64>,
1401 XS, VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
1403 def: Pat<(v16i32 (int_x86_avx512_mask_loadu_d_512 addr:$ptr,
1404 (v16i32 immAllZerosV), GR16:$mask)),
1405 (VMOVDQU32rmkz (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)), addr:$ptr)>;
1407 def: Pat<(v8i64 (int_x86_avx512_mask_loadu_q_512 addr:$ptr,
1408 (bc_v8i64 (v16i32 immAllZerosV)), GR8:$mask)),
1409 (VMOVDQU64rmkz (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)), addr:$ptr)>;
1411 def: Pat<(int_x86_avx512_mask_storeu_d_512 addr:$ptr, (v16i32 VR512:$src),
1413 (VMOVDQU32mrk addr:$ptr, (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)),
1415 def: Pat<(int_x86_avx512_mask_storeu_q_512 addr:$ptr, (v8i64 VR512:$src),
1417 (VMOVDQU64mrk addr:$ptr, (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)),
1420 let AddedComplexity = 20 in {
1421 def : Pat<(v8i64 (vselect VK8WM:$mask, (v8i64 VR512:$src),
1422 (bc_v8i64 (v16i32 immAllZerosV)))),
1423 (VMOVDQU64rrkz VK8WM:$mask, VR512:$src)>;
1425 def : Pat<(v8i64 (vselect VK8WM:$mask, (bc_v8i64 (v16i32 immAllZerosV)),
1426 (v8i64 VR512:$src))),
1427 (VMOVDQU64rrkz (COPY_TO_REGCLASS (KNOTWrr (COPY_TO_REGCLASS VK8:$mask, VK16)),
1430 def : Pat<(v16i32 (vselect VK16WM:$mask, (v16i32 VR512:$src),
1431 (v16i32 immAllZerosV))),
1432 (VMOVDQU32rrkz VK16WM:$mask, VR512:$src)>;
1434 def : Pat<(v16i32 (vselect VK16WM:$mask, (v16i32 immAllZerosV),
1435 (v16i32 VR512:$src))),
1436 (VMOVDQU32rrkz (KNOTWrr VK16WM:$mask), VR512:$src)>;
1438 def : Pat<(v16f32 (vselect VK16WM:$mask, (v16f32 VR512:$src1),
1439 (v16f32 VR512:$src2))),
1440 (VMOVUPSZrrk VR512:$src2, VK16WM:$mask, VR512:$src1)>;
1441 def : Pat<(v8f64 (vselect VK8WM:$mask, (v8f64 VR512:$src1),
1442 (v8f64 VR512:$src2))),
1443 (VMOVUPDZrrk VR512:$src2, VK8WM:$mask, VR512:$src1)>;
1444 def : Pat<(v16i32 (vselect VK16WM:$mask, (v16i32 VR512:$src1),
1445 (v16i32 VR512:$src2))),
1446 (VMOVDQU32rrk VR512:$src2, VK16WM:$mask, VR512:$src1)>;
1447 def : Pat<(v8i64 (vselect VK8WM:$mask, (v8i64 VR512:$src1),
1448 (v8i64 VR512:$src2))),
1449 (VMOVDQU64rrk VR512:$src2, VK8WM:$mask, VR512:$src1)>;
1451 // Move Int Doubleword to Packed Double Int
1453 def VMOVDI2PDIZrr : AVX512BI<0x6E, MRMSrcReg, (outs VR128X:$dst), (ins GR32:$src),
1454 "vmovd\t{$src, $dst|$dst, $src}",
1456 (v4i32 (scalar_to_vector GR32:$src)))], IIC_SSE_MOVDQ>,
1458 def VMOVDI2PDIZrm : AVX512BI<0x6E, MRMSrcMem, (outs VR128X:$dst), (ins i32mem:$src),
1459 "vmovd\t{$src, $dst|$dst, $src}",
1461 (v4i32 (scalar_to_vector (loadi32 addr:$src))))],
1462 IIC_SSE_MOVDQ>, EVEX, VEX_LIG, EVEX_CD8<32, CD8VT1>;
1463 def VMOV64toPQIZrr : AVX512BI<0x6E, MRMSrcReg, (outs VR128X:$dst), (ins GR64:$src),
1464 "vmovq\t{$src, $dst|$dst, $src}",
1466 (v2i64 (scalar_to_vector GR64:$src)))],
1467 IIC_SSE_MOVDQ>, EVEX, VEX_W, VEX_LIG;
1468 let isCodeGenOnly = 1 in {
1469 def VMOV64toSDZrr : AVX512BI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
1470 "vmovq\t{$src, $dst|$dst, $src}",
1471 [(set FR64:$dst, (bitconvert GR64:$src))],
1472 IIC_SSE_MOVDQ>, EVEX, VEX_W, Sched<[WriteMove]>;
1473 def VMOVSDto64Zrr : AVX512BI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
1474 "vmovq\t{$src, $dst|$dst, $src}",
1475 [(set GR64:$dst, (bitconvert FR64:$src))],
1476 IIC_SSE_MOVDQ>, EVEX, VEX_W, Sched<[WriteMove]>;
1478 def VMOVSDto64Zmr : AVX512BI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
1479 "vmovq\t{$src, $dst|$dst, $src}",
1480 [(store (i64 (bitconvert FR64:$src)), addr:$dst)],
1481 IIC_SSE_MOVDQ>, EVEX, VEX_W, Sched<[WriteStore]>,
1482 EVEX_CD8<64, CD8VT1>;
1484 // Move Int Doubleword to Single Scalar
1486 let isCodeGenOnly = 1 in {
1487 def VMOVDI2SSZrr : AVX512BI<0x6E, MRMSrcReg, (outs FR32X:$dst), (ins GR32:$src),
1488 "vmovd\t{$src, $dst|$dst, $src}",
1489 [(set FR32X:$dst, (bitconvert GR32:$src))],
1490 IIC_SSE_MOVDQ>, EVEX, VEX_LIG;
1492 def VMOVDI2SSZrm : AVX512BI<0x6E, MRMSrcMem, (outs FR32X:$dst), (ins i32mem:$src),
1493 "vmovd\t{$src, $dst|$dst, $src}",
1494 [(set FR32X:$dst, (bitconvert (loadi32 addr:$src)))],
1495 IIC_SSE_MOVDQ>, EVEX, VEX_LIG, EVEX_CD8<32, CD8VT1>;
1498 // Move doubleword from xmm register to r/m32
1500 def VMOVPDI2DIZrr : AVX512BI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128X:$src),
1501 "vmovd\t{$src, $dst|$dst, $src}",
1502 [(set GR32:$dst, (vector_extract (v4i32 VR128X:$src),
1503 (iPTR 0)))], IIC_SSE_MOVD_ToGP>,
1505 def VMOVPDI2DIZmr : AVX512BI<0x7E, MRMDestMem, (outs),
1506 (ins i32mem:$dst, VR128X:$src),
1507 "vmovd\t{$src, $dst|$dst, $src}",
1508 [(store (i32 (vector_extract (v4i32 VR128X:$src),
1509 (iPTR 0))), addr:$dst)], IIC_SSE_MOVDQ>,
1510 EVEX, VEX_LIG, EVEX_CD8<32, CD8VT1>;
1512 // Move quadword from xmm1 register to r/m64
1514 def VMOVPQIto64Zrr : I<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128X:$src),
1515 "vmovq\t{$src, $dst|$dst, $src}",
1516 [(set GR64:$dst, (extractelt (v2i64 VR128X:$src),
1518 IIC_SSE_MOVD_ToGP>, PD, EVEX, VEX_LIG, VEX_W,
1519 Requires<[HasAVX512, In64BitMode]>;
1521 def VMOVPQIto64Zmr : I<0xD6, MRMDestMem, (outs),
1522 (ins i64mem:$dst, VR128X:$src),
1523 "vmovq\t{$src, $dst|$dst, $src}",
1524 [(store (extractelt (v2i64 VR128X:$src), (iPTR 0)),
1525 addr:$dst)], IIC_SSE_MOVDQ>,
1526 EVEX, PD, VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>,
1527 Sched<[WriteStore]>, Requires<[HasAVX512, In64BitMode]>;
1529 // Move Scalar Single to Double Int
1531 let isCodeGenOnly = 1 in {
1532 def VMOVSS2DIZrr : AVX512BI<0x7E, MRMDestReg, (outs GR32:$dst),
1534 "vmovd\t{$src, $dst|$dst, $src}",
1535 [(set GR32:$dst, (bitconvert FR32X:$src))],
1536 IIC_SSE_MOVD_ToGP>, EVEX, VEX_LIG;
1537 def VMOVSS2DIZmr : AVX512BI<0x7E, MRMDestMem, (outs),
1538 (ins i32mem:$dst, FR32X:$src),
1539 "vmovd\t{$src, $dst|$dst, $src}",
1540 [(store (i32 (bitconvert FR32X:$src)), addr:$dst)],
1541 IIC_SSE_MOVDQ>, EVEX, VEX_LIG, EVEX_CD8<32, CD8VT1>;
1544 // Move Quadword Int to Packed Quadword Int
1546 def VMOVQI2PQIZrm : AVX512BI<0x6E, MRMSrcMem, (outs VR128X:$dst),
1548 "vmovq\t{$src, $dst|$dst, $src}",
1550 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>,
1551 EVEX, VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
1553 //===----------------------------------------------------------------------===//
1554 // AVX-512 MOVSS, MOVSD
1555 //===----------------------------------------------------------------------===//
1557 multiclass avx512_move_scalar <string asm, RegisterClass RC,
1558 SDNode OpNode, ValueType vt,
1559 X86MemOperand x86memop, PatFrag mem_pat> {
1560 let hasSideEffects = 0 in {
1561 def rr : SI<0x10, MRMSrcReg, (outs VR128X:$dst), (ins VR128X:$src1, RC:$src2),
1562 !strconcat(asm, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1563 [(set VR128X:$dst, (vt (OpNode VR128X:$src1,
1564 (scalar_to_vector RC:$src2))))],
1565 IIC_SSE_MOV_S_RR>, EVEX_4V, VEX_LIG;
1566 let Constraints = "$src1 = $dst" in
1567 def rrk : SI<0x10, MRMSrcReg, (outs VR128X:$dst),
1568 (ins VR128X:$src1, VK1WM:$mask, RC:$src2, RC:$src3),
1570 " \t{$src3, $src2, $dst {${mask}}|$dst {${mask}}, $src2, $src3}"),
1571 [], IIC_SSE_MOV_S_RR>, EVEX_4V, VEX_LIG, EVEX_K;
1572 def rm : SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
1573 !strconcat(asm, " \t{$src, $dst|$dst, $src}"),
1574 [(set RC:$dst, (mem_pat addr:$src))], IIC_SSE_MOV_S_RM>,
1576 def mr: SI<0x11, MRMDestMem, (outs), (ins x86memop:$dst, RC:$src),
1577 !strconcat(asm, " \t{$src, $dst|$dst, $src}"),
1578 [(store RC:$src, addr:$dst)], IIC_SSE_MOV_S_MR>,
1580 } //hasSideEffects = 0
1583 let ExeDomain = SSEPackedSingle in
1584 defm VMOVSSZ : avx512_move_scalar<"movss", FR32X, X86Movss, v4f32, f32mem,
1585 loadf32>, XS, EVEX_CD8<32, CD8VT1>;
1587 let ExeDomain = SSEPackedDouble in
1588 defm VMOVSDZ : avx512_move_scalar<"movsd", FR64X, X86Movsd, v2f64, f64mem,
1589 loadf64>, XD, VEX_W, EVEX_CD8<64, CD8VT1>;
1591 def : Pat<(f32 (X86select VK1WM:$mask, (f32 FR32X:$src1), (f32 FR32X:$src2))),
1592 (COPY_TO_REGCLASS (VMOVSSZrrk (COPY_TO_REGCLASS FR32X:$src2, VR128X),
1593 VK1WM:$mask, (f32 (IMPLICIT_DEF)), FR32X:$src1), FR32X)>;
1595 def : Pat<(f64 (X86select VK1WM:$mask, (f64 FR64X:$src1), (f64 FR64X:$src2))),
1596 (COPY_TO_REGCLASS (VMOVSDZrrk (COPY_TO_REGCLASS FR64X:$src2, VR128X),
1597 VK1WM:$mask, (f64 (IMPLICIT_DEF)), FR64X:$src1), FR64X)>;
1599 // For the disassembler
1600 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in {
1601 def VMOVSSZrr_REV : SI<0x11, MRMDestReg, (outs VR128X:$dst),
1602 (ins VR128X:$src1, FR32X:$src2),
1603 "movss\t{$src2, $src1, $dst|$dst, $src1, $src2}", [],
1605 XS, EVEX_4V, VEX_LIG;
1606 def VMOVSDZrr_REV : SI<0x11, MRMDestReg, (outs VR128X:$dst),
1607 (ins VR128X:$src1, FR64X:$src2),
1608 "movsd\t{$src2, $src1, $dst|$dst, $src1, $src2}", [],
1610 XD, EVEX_4V, VEX_LIG, VEX_W;
1613 let Predicates = [HasAVX512] in {
1614 let AddedComplexity = 15 in {
1615 // Move scalar to XMM zero-extended, zeroing a VR128X then do a
1616 // MOVS{S,D} to the lower bits.
1617 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32X:$src)))),
1618 (VMOVSSZrr (v4f32 (V_SET0)), FR32X:$src)>;
1619 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128X:$src))),
1620 (VMOVSSZrr (v4f32 (V_SET0)), (COPY_TO_REGCLASS VR128X:$src, FR32X))>;
1621 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128X:$src))),
1622 (VMOVSSZrr (v4i32 (V_SET0)), (COPY_TO_REGCLASS VR128X:$src, FR32X))>;
1623 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64X:$src)))),
1624 (VMOVSDZrr (v2f64 (V_SET0)), FR64X:$src)>;
1626 // Move low f32 and clear high bits.
1627 def : Pat<(v8f32 (X86vzmovl (v8f32 VR256X:$src))),
1628 (SUBREG_TO_REG (i32 0),
1629 (VMOVSSZrr (v4f32 (V_SET0)),
1630 (EXTRACT_SUBREG (v8f32 VR256X:$src), sub_xmm)), sub_xmm)>;
1631 def : Pat<(v8i32 (X86vzmovl (v8i32 VR256X:$src))),
1632 (SUBREG_TO_REG (i32 0),
1633 (VMOVSSZrr (v4i32 (V_SET0)),
1634 (EXTRACT_SUBREG (v8i32 VR256X:$src), sub_xmm)), sub_xmm)>;
1637 let AddedComplexity = 20 in {
1638 // MOVSSrm zeros the high parts of the register; represent this
1639 // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
1640 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
1641 (COPY_TO_REGCLASS (VMOVSSZrm addr:$src), VR128X)>;
1642 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
1643 (COPY_TO_REGCLASS (VMOVSSZrm addr:$src), VR128X)>;
1644 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
1645 (COPY_TO_REGCLASS (VMOVSSZrm addr:$src), VR128X)>;
1647 // MOVSDrm zeros the high parts of the register; represent this
1648 // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
1649 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
1650 (COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>;
1651 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
1652 (COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>;
1653 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
1654 (COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>;
1655 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
1656 (COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>;
1657 def : Pat<(v2f64 (X86vzload addr:$src)),
1658 (COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>;
1660 // Represent the same patterns above but in the form they appear for
1662 def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
1663 (v4i32 (scalar_to_vector (loadi32 addr:$src))), (iPTR 0)))),
1664 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrm addr:$src), sub_xmm)>;
1665 def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
1666 (v4f32 (scalar_to_vector (loadf32 addr:$src))), (iPTR 0)))),
1667 (SUBREG_TO_REG (i32 0), (VMOVSSZrm addr:$src), sub_xmm)>;
1668 def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,
1669 (v2f64 (scalar_to_vector (loadf64 addr:$src))), (iPTR 0)))),
1670 (SUBREG_TO_REG (i32 0), (VMOVSDZrm addr:$src), sub_xmm)>;
1672 def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
1673 (v4f32 (scalar_to_vector FR32X:$src)), (iPTR 0)))),
1674 (SUBREG_TO_REG (i32 0), (v4f32 (VMOVSSZrr (v4f32 (V_SET0)),
1675 FR32X:$src)), sub_xmm)>;
1676 def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,
1677 (v2f64 (scalar_to_vector FR64X:$src)), (iPTR 0)))),
1678 (SUBREG_TO_REG (i64 0), (v2f64 (VMOVSDZrr (v2f64 (V_SET0)),
1679 FR64X:$src)), sub_xmm)>;
1680 def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
1681 (v2i64 (scalar_to_vector (loadi64 addr:$src))), (iPTR 0)))),
1682 (SUBREG_TO_REG (i64 0), (VMOVQI2PQIZrm addr:$src), sub_xmm)>;
1684 // Move low f64 and clear high bits.
1685 def : Pat<(v4f64 (X86vzmovl (v4f64 VR256X:$src))),
1686 (SUBREG_TO_REG (i32 0),
1687 (VMOVSDZrr (v2f64 (V_SET0)),
1688 (EXTRACT_SUBREG (v4f64 VR256X:$src), sub_xmm)), sub_xmm)>;
1690 def : Pat<(v4i64 (X86vzmovl (v4i64 VR256X:$src))),
1691 (SUBREG_TO_REG (i32 0), (VMOVSDZrr (v2i64 (V_SET0)),
1692 (EXTRACT_SUBREG (v4i64 VR256X:$src), sub_xmm)), sub_xmm)>;
1694 // Extract and store.
1695 def : Pat<(store (f32 (vector_extract (v4f32 VR128X:$src), (iPTR 0))),
1697 (VMOVSSZmr addr:$dst, (COPY_TO_REGCLASS (v4f32 VR128X:$src), FR32X))>;
1698 def : Pat<(store (f64 (vector_extract (v2f64 VR128X:$src), (iPTR 0))),
1700 (VMOVSDZmr addr:$dst, (COPY_TO_REGCLASS (v2f64 VR128X:$src), FR64X))>;
1702 // Shuffle with VMOVSS
1703 def : Pat<(v4i32 (X86Movss VR128X:$src1, VR128X:$src2)),
1704 (VMOVSSZrr (v4i32 VR128X:$src1),
1705 (COPY_TO_REGCLASS (v4i32 VR128X:$src2), FR32X))>;
1706 def : Pat<(v4f32 (X86Movss VR128X:$src1, VR128X:$src2)),
1707 (VMOVSSZrr (v4f32 VR128X:$src1),
1708 (COPY_TO_REGCLASS (v4f32 VR128X:$src2), FR32X))>;
1711 def : Pat<(v8i32 (X86Movss VR256X:$src1, VR256X:$src2)),
1712 (SUBREG_TO_REG (i32 0),
1713 (VMOVSSZrr (EXTRACT_SUBREG (v8i32 VR256X:$src1), sub_xmm),
1714 (EXTRACT_SUBREG (v8i32 VR256X:$src2), sub_xmm)),
1716 def : Pat<(v8f32 (X86Movss VR256X:$src1, VR256X:$src2)),
1717 (SUBREG_TO_REG (i32 0),
1718 (VMOVSSZrr (EXTRACT_SUBREG (v8f32 VR256X:$src1), sub_xmm),
1719 (EXTRACT_SUBREG (v8f32 VR256X:$src2), sub_xmm)),
1722 // Shuffle with VMOVSD
1723 def : Pat<(v2i64 (X86Movsd VR128X:$src1, VR128X:$src2)),
1724 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
1725 def : Pat<(v2f64 (X86Movsd VR128X:$src1, VR128X:$src2)),
1726 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
1727 def : Pat<(v4f32 (X86Movsd VR128X:$src1, VR128X:$src2)),
1728 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
1729 def : Pat<(v4i32 (X86Movsd VR128X:$src1, VR128X:$src2)),
1730 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
1733 def : Pat<(v4i64 (X86Movsd VR256X:$src1, VR256X:$src2)),
1734 (SUBREG_TO_REG (i32 0),
1735 (VMOVSDZrr (EXTRACT_SUBREG (v4i64 VR256X:$src1), sub_xmm),
1736 (EXTRACT_SUBREG (v4i64 VR256X:$src2), sub_xmm)),
1738 def : Pat<(v4f64 (X86Movsd VR256X:$src1, VR256X:$src2)),
1739 (SUBREG_TO_REG (i32 0),
1740 (VMOVSDZrr (EXTRACT_SUBREG (v4f64 VR256X:$src1), sub_xmm),
1741 (EXTRACT_SUBREG (v4f64 VR256X:$src2), sub_xmm)),
1744 def : Pat<(v2f64 (X86Movlpd VR128X:$src1, VR128X:$src2)),
1745 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
1746 def : Pat<(v2i64 (X86Movlpd VR128X:$src1, VR128X:$src2)),
1747 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
1748 def : Pat<(v4f32 (X86Movlps VR128X:$src1, VR128X:$src2)),
1749 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
1750 def : Pat<(v4i32 (X86Movlps VR128X:$src1, VR128X:$src2)),
1751 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
1754 let AddedComplexity = 15 in
1755 def VMOVZPQILo2PQIZrr : AVX512XSI<0x7E, MRMSrcReg, (outs VR128X:$dst),
1757 "vmovq\t{$src, $dst|$dst, $src}",
1758 [(set VR128X:$dst, (v2i64 (X86vzmovl
1759 (v2i64 VR128X:$src))))],
1760 IIC_SSE_MOVQ_RR>, EVEX, VEX_W;
1762 let AddedComplexity = 20 in
1763 def VMOVZPQILo2PQIZrm : AVX512XSI<0x7E, MRMSrcMem, (outs VR128X:$dst),
1765 "vmovq\t{$src, $dst|$dst, $src}",
1766 [(set VR128X:$dst, (v2i64 (X86vzmovl
1767 (loadv2i64 addr:$src))))],
1768 IIC_SSE_MOVDQ>, EVEX, VEX_W,
1769 EVEX_CD8<8, CD8VT8>;
1771 let Predicates = [HasAVX512] in {
1772 // AVX 128-bit movd/movq instruction write zeros in the high 128-bit part.
1773 let AddedComplexity = 20 in {
1774 def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector (loadi32 addr:$src))))),
1775 (VMOVDI2PDIZrm addr:$src)>;
1776 def : Pat<(v2i64 (X86vzmovl (v2i64 (scalar_to_vector GR64:$src)))),
1777 (VMOV64toPQIZrr GR64:$src)>;
1778 def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector GR32:$src)))),
1779 (VMOVDI2PDIZrr GR32:$src)>;
1781 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
1782 (VMOVDI2PDIZrm addr:$src)>;
1783 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
1784 (VMOVDI2PDIZrm addr:$src)>;
1785 def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
1786 (VMOVZPQILo2PQIZrm addr:$src)>;
1787 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128X:$src))),
1788 (VMOVZPQILo2PQIZrr VR128X:$src)>;
1789 def : Pat<(v2i64 (X86vzload addr:$src)),
1790 (VMOVZPQILo2PQIZrm addr:$src)>;
1793 // Use regular 128-bit instructions to match 256-bit scalar_to_vec+zext.
1794 def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
1795 (v4i32 (scalar_to_vector GR32:$src)),(iPTR 0)))),
1796 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrr GR32:$src), sub_xmm)>;
1797 def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
1798 (v2i64 (scalar_to_vector GR64:$src)),(iPTR 0)))),
1799 (SUBREG_TO_REG (i64 0), (VMOV64toPQIZrr GR64:$src), sub_xmm)>;
1802 def : Pat<(v16i32 (X86Vinsert (v16i32 immAllZerosV), GR32:$src2, (iPTR 0))),
1803 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrr GR32:$src2), sub_xmm)>;
1805 def : Pat<(v8i64 (X86Vinsert (bc_v8i64 (v16i32 immAllZerosV)), GR64:$src2, (iPTR 0))),
1806 (SUBREG_TO_REG (i32 0), (VMOV64toPQIZrr GR64:$src2), sub_xmm)>;
1808 def : Pat<(v16i32 (X86Vinsert undef, GR32:$src2, (iPTR 0))),
1809 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrr GR32:$src2), sub_xmm)>;
1811 def : Pat<(v8i64 (X86Vinsert undef, GR64:$src2, (iPTR 0))),
1812 (SUBREG_TO_REG (i32 0), (VMOV64toPQIZrr GR64:$src2), sub_xmm)>;
1814 //===----------------------------------------------------------------------===//
1815 // AVX-512 - Non-temporals
1816 //===----------------------------------------------------------------------===//
1818 def VMOVNTDQAZrm : AVX5128I<0x2A, MRMSrcMem, (outs VR512:$dst),
1820 "vmovntdqa\t{$src, $dst|$dst, $src}",
1822 (int_x86_avx512_movntdqa addr:$src))]>,
1823 EVEX, EVEX_V512, EVEX_CD8<64, CD8VF>;
1825 // Prefer non-temporal over temporal versions
1826 let AddedComplexity = 400, SchedRW = [WriteStore] in {
1828 def VMOVNTPSZmr : AVX512PSI<0x2B, MRMDestMem, (outs),
1829 (ins f512mem:$dst, VR512:$src),
1830 "vmovntps\t{$src, $dst|$dst, $src}",
1831 [(alignednontemporalstore (v16f32 VR512:$src),
1834 EVEX, EVEX_V512, EVEX_CD8<32, CD8VF>;
1836 def VMOVNTPDZmr : AVX512PDI<0x2B, MRMDestMem, (outs),
1837 (ins f512mem:$dst, VR512:$src),
1838 "vmovntpd\t{$src, $dst|$dst, $src}",
1839 [(alignednontemporalstore (v8f64 VR512:$src),
1842 EVEX, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
1845 def VMOVNTDQZmr : AVX512BI<0xE7, MRMDestMem, (outs),
1846 (ins i512mem:$dst, VR512:$src),
1847 "vmovntdq\t{$src, $dst|$dst, $src}",
1848 [(alignednontemporalstore (v8i64 VR512:$src),
1851 EVEX, EVEX_V512, EVEX_CD8<64, CD8VF>;
1854 //===----------------------------------------------------------------------===//
1855 // AVX-512 - Integer arithmetic
1857 multiclass avx512_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
1858 ValueType OpVT, RegisterClass KRC,
1859 RegisterClass RC, PatFrag memop_frag,
1860 X86MemOperand x86memop, PatFrag scalar_mfrag,
1861 X86MemOperand x86scalar_mop, string BrdcstStr,
1862 OpndItins itins, bit IsCommutable = 0> {
1863 let isCommutable = IsCommutable in
1864 def rr : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
1865 (ins RC:$src1, RC:$src2),
1866 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1867 [(set RC:$dst, (OpVT (OpNode (OpVT RC:$src1), (OpVT RC:$src2))))],
1869 let AddedComplexity = 30 in {
1870 let Constraints = "$src0 = $dst" in
1871 def rrk : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
1872 (ins RC:$src0, KRC:$mask, RC:$src1, RC:$src2),
1873 !strconcat(OpcodeStr,
1874 " \t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
1875 [(set RC:$dst, (OpVT (vselect KRC:$mask,
1876 (OpNode (OpVT RC:$src1), (OpVT RC:$src2)),
1878 itins.rr>, EVEX_4V, EVEX_K;
1879 def rrkz : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
1880 (ins KRC:$mask, RC:$src1, RC:$src2),
1881 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst {${mask}} {z}" ,
1882 "|$dst {${mask}} {z}, $src1, $src2}"),
1883 [(set RC:$dst, (OpVT (vselect KRC:$mask,
1884 (OpNode (OpVT RC:$src1), (OpVT RC:$src2)),
1885 (OpVT immAllZerosV))))],
1886 itins.rr>, EVEX_4V, EVEX_KZ;
1889 let mayLoad = 1 in {
1890 def rm : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
1891 (ins RC:$src1, x86memop:$src2),
1892 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1893 [(set RC:$dst, (OpVT (OpNode (OpVT RC:$src1), (memop_frag addr:$src2))))],
1895 let AddedComplexity = 30 in {
1896 let Constraints = "$src0 = $dst" in
1897 def rmk : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
1898 (ins RC:$src0, KRC:$mask, RC:$src1, x86memop:$src2),
1899 !strconcat(OpcodeStr,
1900 " \t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
1901 [(set RC:$dst, (OpVT (vselect KRC:$mask,
1902 (OpNode (OpVT RC:$src1), (memop_frag addr:$src2)),
1904 itins.rm>, EVEX_4V, EVEX_K;
1905 def rmkz : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
1906 (ins KRC:$mask, RC:$src1, x86memop:$src2),
1907 !strconcat(OpcodeStr,
1908 " \t{$src2, $src1, $dst {${mask}} {z}|$dst {${mask}} {z}, $src1, $src2}"),
1909 [(set RC:$dst, (OpVT (vselect KRC:$mask,
1910 (OpNode (OpVT RC:$src1), (memop_frag addr:$src2)),
1911 (OpVT immAllZerosV))))],
1912 itins.rm>, EVEX_4V, EVEX_KZ;
1914 def rmb : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
1915 (ins RC:$src1, x86scalar_mop:$src2),
1916 !strconcat(OpcodeStr, " \t{${src2}", BrdcstStr,
1917 ", $src1, $dst|$dst, $src1, ${src2}", BrdcstStr, "}"),
1918 [(set RC:$dst, (OpNode RC:$src1,
1919 (OpVT (X86VBroadcast (scalar_mfrag addr:$src2)))))],
1920 itins.rm>, EVEX_4V, EVEX_B;
1921 let AddedComplexity = 30 in {
1922 let Constraints = "$src0 = $dst" in
1923 def rmbk : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
1924 (ins RC:$src0, KRC:$mask, RC:$src1, x86scalar_mop:$src2),
1925 !strconcat(OpcodeStr, " \t{${src2}", BrdcstStr,
1926 ", $src1, $dst {${mask}}|$dst {${mask}}, $src1, ${src2}",
1928 [(set RC:$dst, (OpVT (vselect KRC:$mask,
1929 (OpNode (OpVT RC:$src1),
1930 (OpVT (X86VBroadcast (scalar_mfrag addr:$src2)))),
1932 itins.rm>, EVEX_4V, EVEX_B, EVEX_K;
1933 def rmbkz : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
1934 (ins KRC:$mask, RC:$src1, x86scalar_mop:$src2),
1935 !strconcat(OpcodeStr, " \t{${src2}", BrdcstStr,
1936 ", $src1, $dst {${mask}} {z}|$dst {${mask}} {z}, $src1, ${src2}",
1938 [(set RC:$dst, (OpVT (vselect KRC:$mask,
1939 (OpNode (OpVT RC:$src1),
1940 (OpVT (X86VBroadcast (scalar_mfrag addr:$src2)))),
1941 (OpVT immAllZerosV))))],
1942 itins.rm>, EVEX_4V, EVEX_B, EVEX_KZ;
1947 multiclass avx512_binop_rm2<bits<8> opc, string OpcodeStr, ValueType DstVT,
1948 ValueType SrcVT, RegisterClass KRC, RegisterClass RC,
1949 PatFrag memop_frag, X86MemOperand x86memop,
1950 PatFrag scalar_mfrag, X86MemOperand x86scalar_mop,
1951 string BrdcstStr, OpndItins itins, bit IsCommutable = 0> {
1952 let isCommutable = IsCommutable in
1954 def rr : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
1955 (ins RC:$src1, RC:$src2),
1956 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1958 def rrk : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
1959 (ins KRC:$mask, RC:$src1, RC:$src2),
1960 !strconcat(OpcodeStr,
1961 " \t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
1962 [], itins.rr>, EVEX_4V, EVEX_K;
1963 def rrkz : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
1964 (ins KRC:$mask, RC:$src1, RC:$src2),
1965 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst {${mask}} {z}" ,
1966 "|$dst {${mask}} {z}, $src1, $src2}"),
1967 [], itins.rr>, EVEX_4V, EVEX_KZ;
1969 let mayLoad = 1 in {
1970 def rm : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
1971 (ins RC:$src1, x86memop:$src2),
1972 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1974 def rmk : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
1975 (ins KRC:$mask, RC:$src1, x86memop:$src2),
1976 !strconcat(OpcodeStr,
1977 " \t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
1978 [], itins.rm>, EVEX_4V, EVEX_K;
1979 def rmkz : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
1980 (ins KRC:$mask, RC:$src1, x86memop:$src2),
1981 !strconcat(OpcodeStr,
1982 " \t{$src2, $src1, $dst {${mask}} {z}|$dst {${mask}} {z}, $src1, $src2}"),
1983 [], itins.rm>, EVEX_4V, EVEX_KZ;
1984 def rmb : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
1985 (ins RC:$src1, x86scalar_mop:$src2),
1986 !strconcat(OpcodeStr, " \t{${src2}", BrdcstStr,
1987 ", $src1, $dst|$dst, $src1, ${src2}", BrdcstStr, "}"),
1988 [], itins.rm>, EVEX_4V, EVEX_B;
1989 def rmbk : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
1990 (ins KRC:$mask, RC:$src1, x86scalar_mop:$src2),
1991 !strconcat(OpcodeStr, " \t{${src2}", BrdcstStr,
1992 ", $src1, $dst {${mask}}|$dst {${mask}}, $src1, ${src2}",
1994 [], itins.rm>, EVEX_4V, EVEX_B, EVEX_K;
1995 def rmbkz : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
1996 (ins KRC:$mask, RC:$src1, x86scalar_mop:$src2),
1997 !strconcat(OpcodeStr, " \t{${src2}", BrdcstStr,
1998 ", $src1, $dst {${mask}} {z}|$dst {${mask}} {z}, $src1, ${src2}",
2000 [], itins.rm>, EVEX_4V, EVEX_B, EVEX_KZ;
2004 defm VPADDDZ : avx512_binop_rm<0xFE, "vpaddd", add, v16i32, VK16WM, VR512,
2005 memopv16i32, i512mem, loadi32, i32mem, "{1to16}",
2006 SSE_INTALU_ITINS_P, 1>, EVEX_V512, EVEX_CD8<32, CD8VF>;
2008 defm VPSUBDZ : avx512_binop_rm<0xFA, "vpsubd", sub, v16i32, VK16WM, VR512,
2009 memopv16i32, i512mem, loadi32, i32mem, "{1to16}",
2010 SSE_INTALU_ITINS_P, 0>, EVEX_V512, EVEX_CD8<32, CD8VF>;
2012 defm VPMULLDZ : avx512_binop_rm<0x40, "vpmulld", mul, v16i32, VK16WM, VR512,
2013 memopv16i32, i512mem, loadi32, i32mem, "{1to16}",
2014 SSE_INTALU_ITINS_P, 1>, T8PD, EVEX_V512, EVEX_CD8<32, CD8VF>;
2016 defm VPADDQZ : avx512_binop_rm<0xD4, "vpaddq", add, v8i64, VK8WM, VR512,
2017 memopv8i64, i512mem, loadi64, i64mem, "{1to8}",
2018 SSE_INTALU_ITINS_P, 1>, EVEX_CD8<64, CD8VF>, EVEX_V512, VEX_W;
2020 defm VPSUBQZ : avx512_binop_rm<0xFB, "vpsubq", sub, v8i64, VK8WM, VR512,
2021 memopv8i64, i512mem, loadi64, i64mem, "{1to8}",
2022 SSE_INTALU_ITINS_P, 0>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
2024 defm VPMULDQZ : avx512_binop_rm2<0x28, "vpmuldq", v8i64, v16i32, VK8WM, VR512,
2025 memopv8i64, i512mem, loadi64, i64mem, "{1to8}",
2026 SSE_INTALU_ITINS_P, 1>, T8PD, EVEX_V512,
2027 EVEX_CD8<64, CD8VF>, VEX_W;
2029 defm VPMULUDQZ : avx512_binop_rm2<0xF4, "vpmuludq", v8i64, v16i32, VK8WM, VR512,
2030 memopv8i64, i512mem, loadi64, i64mem, "{1to8}",
2031 SSE_INTMUL_ITINS_P, 1>, EVEX_V512, EVEX_CD8<64, CD8VF>, VEX_W;
2033 def : Pat<(v8i64 (X86pmuludq (v16i32 VR512:$src1), (v16i32 VR512:$src2))),
2034 (VPMULUDQZrr VR512:$src1, VR512:$src2)>;
2036 def : Pat<(v8i64 (int_x86_avx512_mask_pmulu_dq_512 (v16i32 VR512:$src1),
2037 (v16i32 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
2038 (VPMULUDQZrr VR512:$src1, VR512:$src2)>;
2039 def : Pat<(v8i64 (int_x86_avx512_mask_pmul_dq_512 (v16i32 VR512:$src1),
2040 (v16i32 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
2041 (VPMULDQZrr VR512:$src1, VR512:$src2)>;
2043 defm VPMAXUDZ : avx512_binop_rm<0x3F, "vpmaxud", X86umax, v16i32, VK16WM, VR512,
2044 memopv16i32, i512mem, loadi32, i32mem, "{1to16}",
2045 SSE_INTALU_ITINS_P, 1>,
2046 T8PD, EVEX_V512, EVEX_CD8<32, CD8VF>;
2047 defm VPMAXUQZ : avx512_binop_rm<0x3F, "vpmaxuq", X86umax, v8i64, VK8WM, VR512,
2048 memopv8i64, i512mem, loadi64, i64mem, "{1to8}",
2049 SSE_INTALU_ITINS_P, 0>,
2050 T8PD, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
2052 defm VPMAXSDZ : avx512_binop_rm<0x3D, "vpmaxsd", X86smax, v16i32, VK16WM, VR512,
2053 memopv16i32, i512mem, loadi32, i32mem, "{1to16}",
2054 SSE_INTALU_ITINS_P, 1>,
2055 T8PD, EVEX_V512, EVEX_CD8<32, CD8VF>;
2056 defm VPMAXSQZ : avx512_binop_rm<0x3D, "vpmaxsq", X86smax, v8i64, VK8WM, VR512,
2057 memopv8i64, i512mem, loadi64, i64mem, "{1to8}",
2058 SSE_INTALU_ITINS_P, 0>,
2059 T8PD, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
2061 defm VPMINUDZ : avx512_binop_rm<0x3B, "vpminud", X86umin, v16i32, VK16WM, VR512,
2062 memopv16i32, i512mem, loadi32, i32mem, "{1to16}",
2063 SSE_INTALU_ITINS_P, 1>,
2064 T8PD, EVEX_V512, EVEX_CD8<32, CD8VF>;
2065 defm VPMINUQZ : avx512_binop_rm<0x3B, "vpminuq", X86umin, v8i64, VK8WM, VR512,
2066 memopv8i64, i512mem, loadi64, i64mem, "{1to8}",
2067 SSE_INTALU_ITINS_P, 0>,
2068 T8PD, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
2070 defm VPMINSDZ : avx512_binop_rm<0x39, "vpminsd", X86smin, v16i32, VK16WM, VR512,
2071 memopv16i32, i512mem, loadi32, i32mem, "{1to16}",
2072 SSE_INTALU_ITINS_P, 1>,
2073 T8PD, EVEX_V512, EVEX_CD8<32, CD8VF>;
2074 defm VPMINSQZ : avx512_binop_rm<0x39, "vpminsq", X86smin, v8i64, VK8WM, VR512,
2075 memopv8i64, i512mem, loadi64, i64mem, "{1to8}",
2076 SSE_INTALU_ITINS_P, 0>,
2077 T8PD, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
2079 def : Pat <(v16i32 (int_x86_avx512_mask_pmaxs_d_512 (v16i32 VR512:$src1),
2080 (v16i32 VR512:$src2), (v16i32 immAllZerosV), (i16 -1))),
2081 (VPMAXSDZrr VR512:$src1, VR512:$src2)>;
2082 def : Pat <(v16i32 (int_x86_avx512_mask_pmaxu_d_512 (v16i32 VR512:$src1),
2083 (v16i32 VR512:$src2), (v16i32 immAllZerosV), (i16 -1))),
2084 (VPMAXUDZrr VR512:$src1, VR512:$src2)>;
2085 def : Pat <(v8i64 (int_x86_avx512_mask_pmaxs_q_512 (v8i64 VR512:$src1),
2086 (v8i64 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
2087 (VPMAXSQZrr VR512:$src1, VR512:$src2)>;
2088 def : Pat <(v8i64 (int_x86_avx512_mask_pmaxu_q_512 (v8i64 VR512:$src1),
2089 (v8i64 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
2090 (VPMAXUQZrr VR512:$src1, VR512:$src2)>;
2091 def : Pat <(v16i32 (int_x86_avx512_mask_pmins_d_512 (v16i32 VR512:$src1),
2092 (v16i32 VR512:$src2), (v16i32 immAllZerosV), (i16 -1))),
2093 (VPMINSDZrr VR512:$src1, VR512:$src2)>;
2094 def : Pat <(v16i32 (int_x86_avx512_mask_pminu_d_512 (v16i32 VR512:$src1),
2095 (v16i32 VR512:$src2), (v16i32 immAllZerosV), (i16 -1))),
2096 (VPMINUDZrr VR512:$src1, VR512:$src2)>;
2097 def : Pat <(v8i64 (int_x86_avx512_mask_pmins_q_512 (v8i64 VR512:$src1),
2098 (v8i64 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
2099 (VPMINSQZrr VR512:$src1, VR512:$src2)>;
2100 def : Pat <(v8i64 (int_x86_avx512_mask_pminu_q_512 (v8i64 VR512:$src1),
2101 (v8i64 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
2102 (VPMINUQZrr VR512:$src1, VR512:$src2)>;
2103 //===----------------------------------------------------------------------===//
2104 // AVX-512 - Unpack Instructions
2105 //===----------------------------------------------------------------------===//
2107 multiclass avx512_unpack_fp<bits<8> opc, SDNode OpNode, ValueType vt,
2108 PatFrag mem_frag, RegisterClass RC,
2109 X86MemOperand x86memop, string asm,
2111 def rr : AVX512PI<opc, MRMSrcReg,
2112 (outs RC:$dst), (ins RC:$src1, RC:$src2),
2114 (vt (OpNode RC:$src1, RC:$src2)))],
2116 def rm : AVX512PI<opc, MRMSrcMem,
2117 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
2119 (vt (OpNode RC:$src1,
2120 (bitconvert (mem_frag addr:$src2)))))],
2124 defm VUNPCKHPSZ: avx512_unpack_fp<0x15, X86Unpckh, v16f32, memopv8f64,
2125 VR512, f512mem, "vunpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2126 SSEPackedSingle>, PS, EVEX_V512, EVEX_CD8<32, CD8VF>;
2127 defm VUNPCKHPDZ: avx512_unpack_fp<0x15, X86Unpckh, v8f64, memopv8f64,
2128 VR512, f512mem, "vunpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2129 SSEPackedDouble>, PD, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
2130 defm VUNPCKLPSZ: avx512_unpack_fp<0x14, X86Unpckl, v16f32, memopv8f64,
2131 VR512, f512mem, "vunpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2132 SSEPackedSingle>, PS, EVEX_V512, EVEX_CD8<32, CD8VF>;
2133 defm VUNPCKLPDZ: avx512_unpack_fp<0x14, X86Unpckl, v8f64, memopv8f64,
2134 VR512, f512mem, "vunpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2135 SSEPackedDouble>, PD, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
2137 multiclass avx512_unpack_int<bits<8> opc, string OpcodeStr, SDNode OpNode,
2138 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
2139 X86MemOperand x86memop> {
2140 def rr : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
2141 (ins RC:$src1, RC:$src2),
2142 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2143 [(set RC:$dst, (OpVT (OpNode (OpVT RC:$src1), (OpVT RC:$src2))))],
2144 IIC_SSE_UNPCK>, EVEX_4V;
2145 def rm : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
2146 (ins RC:$src1, x86memop:$src2),
2147 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2148 [(set RC:$dst, (OpVT (OpNode (OpVT RC:$src1),
2149 (bitconvert (memop_frag addr:$src2)))))],
2150 IIC_SSE_UNPCK>, EVEX_4V;
2152 defm VPUNPCKLDQZ : avx512_unpack_int<0x62, "vpunpckldq", X86Unpckl, v16i32,
2153 VR512, memopv16i32, i512mem>, EVEX_V512,
2154 EVEX_CD8<32, CD8VF>;
2155 defm VPUNPCKLQDQZ : avx512_unpack_int<0x6C, "vpunpcklqdq", X86Unpckl, v8i64,
2156 VR512, memopv8i64, i512mem>, EVEX_V512,
2157 VEX_W, EVEX_CD8<64, CD8VF>;
2158 defm VPUNPCKHDQZ : avx512_unpack_int<0x6A, "vpunpckhdq", X86Unpckh, v16i32,
2159 VR512, memopv16i32, i512mem>, EVEX_V512,
2160 EVEX_CD8<32, CD8VF>;
2161 defm VPUNPCKHQDQZ : avx512_unpack_int<0x6D, "vpunpckhqdq", X86Unpckh, v8i64,
2162 VR512, memopv8i64, i512mem>, EVEX_V512,
2163 VEX_W, EVEX_CD8<64, CD8VF>;
2164 //===----------------------------------------------------------------------===//
2168 multiclass avx512_pshuf_imm<bits<8> opc, string OpcodeStr, RegisterClass RC,
2169 SDNode OpNode, PatFrag mem_frag,
2170 X86MemOperand x86memop, ValueType OpVT> {
2171 def ri : AVX512Ii8<opc, MRMSrcReg, (outs RC:$dst),
2172 (ins RC:$src1, i8imm:$src2),
2173 !strconcat(OpcodeStr,
2174 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2176 (OpVT (OpNode RC:$src1, (i8 imm:$src2))))]>,
2178 def mi : AVX512Ii8<opc, MRMSrcMem, (outs RC:$dst),
2179 (ins x86memop:$src1, i8imm:$src2),
2180 !strconcat(OpcodeStr,
2181 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2183 (OpVT (OpNode (mem_frag addr:$src1),
2184 (i8 imm:$src2))))]>, EVEX;
2187 defm VPSHUFDZ : avx512_pshuf_imm<0x70, "vpshufd", VR512, X86PShufd, memopv16i32,
2188 i512mem, v16i32>, PD, EVEX_V512, EVEX_CD8<32, CD8VF>;
2190 let ExeDomain = SSEPackedSingle in
2191 defm VPERMILPSZ : avx512_pshuf_imm<0x04, "vpermilps", VR512, X86VPermilp,
2192 memopv16f32, i512mem, v16f32>, TAPD, EVEX_V512,
2193 EVEX_CD8<32, CD8VF>;
2194 let ExeDomain = SSEPackedDouble in
2195 defm VPERMILPDZ : avx512_pshuf_imm<0x05, "vpermilpd", VR512, X86VPermilp,
2196 memopv8f64, i512mem, v8f64>, TAPD, EVEX_V512,
2197 VEX_W, EVEX_CD8<32, CD8VF>;
2199 def : Pat<(v16i32 (X86VPermilp VR512:$src1, (i8 imm:$imm))),
2200 (VPERMILPSZri VR512:$src1, imm:$imm)>;
2201 def : Pat<(v8i64 (X86VPermilp VR512:$src1, (i8 imm:$imm))),
2202 (VPERMILPDZri VR512:$src1, imm:$imm)>;
2204 //===----------------------------------------------------------------------===//
2205 // AVX-512 Logical Instructions
2206 //===----------------------------------------------------------------------===//
2208 defm VPANDDZ : avx512_binop_rm<0xDB, "vpandd", and, v16i32, VK16WM, VR512, memopv16i32,
2209 i512mem, loadi32, i32mem, "{1to16}", SSE_BIT_ITINS_P, 1>,
2210 EVEX_V512, EVEX_CD8<32, CD8VF>;
2211 defm VPANDQZ : avx512_binop_rm<0xDB, "vpandq", and, v8i64, VK8WM, VR512, memopv8i64,
2212 i512mem, loadi64, i64mem, "{1to8}", SSE_BIT_ITINS_P, 1>,
2213 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
2214 defm VPORDZ : avx512_binop_rm<0xEB, "vpord", or, v16i32, VK16WM, VR512, memopv16i32,
2215 i512mem, loadi32, i32mem, "{1to16}", SSE_BIT_ITINS_P, 1>,
2216 EVEX_V512, EVEX_CD8<32, CD8VF>;
2217 defm VPORQZ : avx512_binop_rm<0xEB, "vporq", or, v8i64, VK8WM, VR512, memopv8i64,
2218 i512mem, loadi64, i64mem, "{1to8}", SSE_BIT_ITINS_P, 1>,
2219 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
2220 defm VPXORDZ : avx512_binop_rm<0xEF, "vpxord", xor, v16i32, VK16WM, VR512, memopv16i32,
2221 i512mem, loadi32, i32mem, "{1to16}", SSE_BIT_ITINS_P, 1>,
2222 EVEX_V512, EVEX_CD8<32, CD8VF>;
2223 defm VPXORQZ : avx512_binop_rm<0xEF, "vpxorq", xor, v8i64, VK8WM, VR512, memopv8i64,
2224 i512mem, loadi64, i64mem, "{1to8}", SSE_BIT_ITINS_P, 1>,
2225 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
2226 defm VPANDNDZ : avx512_binop_rm<0xDF, "vpandnd", X86andnp, v16i32, VK16WM, VR512,
2227 memopv16i32, i512mem, loadi32, i32mem, "{1to16}",
2228 SSE_BIT_ITINS_P, 0>, EVEX_V512, EVEX_CD8<32, CD8VF>;
2229 defm VPANDNQZ : avx512_binop_rm<0xDF, "vpandnq", X86andnp, v8i64, VK8WM, VR512,
2230 memopv8i64, i512mem, loadi64, i64mem, "{1to8}",
2231 SSE_BIT_ITINS_P, 0>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
2233 //===----------------------------------------------------------------------===//
2234 // AVX-512 FP arithmetic
2235 //===----------------------------------------------------------------------===//
2237 multiclass avx512_binop_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
2239 defm SSZ : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"), OpNode, FR32X,
2240 f32mem, itins.s, 0>, XS, EVEX_4V, VEX_LIG,
2241 EVEX_CD8<32, CD8VT1>;
2242 defm SDZ : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd"), OpNode, FR64X,
2243 f64mem, itins.d, 0>, XD, VEX_W, EVEX_4V, VEX_LIG,
2244 EVEX_CD8<64, CD8VT1>;
2247 let isCommutable = 1 in {
2248 defm VADD : avx512_binop_s<0x58, "add", fadd, SSE_ALU_ITINS_S>;
2249 defm VMUL : avx512_binop_s<0x59, "mul", fmul, SSE_ALU_ITINS_S>;
2250 defm VMIN : avx512_binop_s<0x5D, "min", X86fmin, SSE_ALU_ITINS_S>;
2251 defm VMAX : avx512_binop_s<0x5F, "max", X86fmax, SSE_ALU_ITINS_S>;
2253 let isCommutable = 0 in {
2254 defm VSUB : avx512_binop_s<0x5C, "sub", fsub, SSE_ALU_ITINS_S>;
2255 defm VDIV : avx512_binop_s<0x5E, "div", fdiv, SSE_ALU_ITINS_S>;
2258 multiclass avx512_fp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
2260 RegisterClass RC, ValueType vt,
2261 X86MemOperand x86memop, PatFrag mem_frag,
2262 X86MemOperand x86scalar_mop, PatFrag scalar_mfrag,
2264 Domain d, OpndItins itins, bit commutable> {
2265 let isCommutable = commutable in {
2266 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
2267 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2268 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], itins.rr, d>,
2271 def rrk: PI<opc, MRMSrcReg, (outs RC:$dst), (ins KRC:$mask, RC:$src1, RC:$src2),
2272 !strconcat(OpcodeStr,
2273 " \t{$src2, $src1, $dst {${mask}} |$dst {${mask}}, $src1, $src2}"),
2274 [], itins.rr, d>, EVEX_4V, EVEX_K;
2276 def rrkz: PI<opc, MRMSrcReg, (outs RC:$dst), (ins KRC:$mask, RC:$src1, RC:$src2),
2277 !strconcat(OpcodeStr,
2278 " \t{$src2, $src1, $dst {${mask}} {z}|$dst {${mask}} {z}, $src1, $src2}"),
2279 [], itins.rr, d>, EVEX_4V, EVEX_KZ;
2282 let mayLoad = 1 in {
2283 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
2284 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2285 [(set RC:$dst, (OpNode RC:$src1, (mem_frag addr:$src2)))],
2286 itins.rm, d>, EVEX_4V;
2288 def rmb : PI<opc, MRMSrcMem, (outs RC:$dst),
2289 (ins RC:$src1, x86scalar_mop:$src2),
2290 !strconcat(OpcodeStr, " \t{${src2}", BrdcstStr,
2291 ", $src1, $dst|$dst, $src1, ${src2}", BrdcstStr, "}"),
2292 [(set RC:$dst, (OpNode RC:$src1,
2293 (vt (X86VBroadcast (scalar_mfrag addr:$src2)))))],
2294 itins.rm, d>, EVEX_4V, EVEX_B;
2296 def rmk : PI<opc, MRMSrcMem, (outs RC:$dst),
2297 (ins KRC:$mask, RC:$src1, x86memop:$src2), !strconcat(OpcodeStr,
2298 "\t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
2299 [], itins.rm, d>, EVEX_4V, EVEX_K;
2301 def rmkz : PI<opc, MRMSrcMem, (outs RC:$dst),
2302 (ins KRC:$mask, RC:$src1, x86memop:$src2), !strconcat(OpcodeStr,
2303 "\t{$src2, $src1, $dst {${mask}} {z}|$dst {${mask}} {z}, $src1, $src2}"),
2304 [], itins.rm, d>, EVEX_4V, EVEX_KZ;
2306 def rmbk : PI<opc, MRMSrcMem, (outs RC:$dst),
2307 (ins KRC:$mask, RC:$src1, x86scalar_mop:$src2), !strconcat(OpcodeStr,
2308 " \t{${src2}", BrdcstStr,
2309 ", $src1, $dst {${mask}}|$dst {${mask}}, $src1, ${src2}", BrdcstStr, "}"),
2310 [], itins.rm, d>, EVEX_4V, EVEX_B, EVEX_K;
2312 def rmbkz : PI<opc, MRMSrcMem, (outs RC:$dst),
2313 (ins KRC:$mask, RC:$src1, x86scalar_mop:$src2), !strconcat(OpcodeStr,
2314 " \t{${src2}", BrdcstStr,
2315 ", $src1, $dst {${mask}} {z}|$dst {${mask}} {z}, $src1, ${src2}",
2317 [], itins.rm, d>, EVEX_4V, EVEX_B, EVEX_KZ;
2321 defm VADDPSZ : avx512_fp_packed<0x58, "addps", fadd, VK16WM, VR512, v16f32, f512mem,
2322 memopv16f32, f32mem, loadf32, "{1to16}", SSEPackedSingle,
2323 SSE_ALU_ITINS_P.s, 1>, EVEX_V512, PS, EVEX_CD8<32, CD8VF>;
2325 defm VADDPDZ : avx512_fp_packed<0x58, "addpd", fadd, VK8WM, VR512, v8f64, f512mem,
2326 memopv8f64, f64mem, loadf64, "{1to8}", SSEPackedDouble,
2327 SSE_ALU_ITINS_P.d, 1>,
2328 EVEX_V512, PD, VEX_W, EVEX_CD8<64, CD8VF>;
2330 defm VMULPSZ : avx512_fp_packed<0x59, "mulps", fmul, VK16WM, VR512, v16f32, f512mem,
2331 memopv16f32, f32mem, loadf32, "{1to16}", SSEPackedSingle,
2332 SSE_ALU_ITINS_P.s, 1>, EVEX_V512, PS, EVEX_CD8<32, CD8VF>;
2333 defm VMULPDZ : avx512_fp_packed<0x59, "mulpd", fmul, VK8WM, VR512, v8f64, f512mem,
2334 memopv8f64, f64mem, loadf64, "{1to8}", SSEPackedDouble,
2335 SSE_ALU_ITINS_P.d, 1>,
2336 EVEX_V512, PD, VEX_W, EVEX_CD8<64, CD8VF>;
2338 defm VMINPSZ : avx512_fp_packed<0x5D, "minps", X86fmin, VK16WM, VR512, v16f32, f512mem,
2339 memopv16f32, f32mem, loadf32, "{1to16}", SSEPackedSingle,
2340 SSE_ALU_ITINS_P.s, 1>,
2341 EVEX_V512, PS, EVEX_CD8<32, CD8VF>;
2342 defm VMAXPSZ : avx512_fp_packed<0x5F, "maxps", X86fmax, VK16WM, VR512, v16f32, f512mem,
2343 memopv16f32, f32mem, loadf32, "{1to16}", SSEPackedSingle,
2344 SSE_ALU_ITINS_P.s, 1>,
2345 EVEX_V512, PS, EVEX_CD8<32, CD8VF>;
2347 defm VMINPDZ : avx512_fp_packed<0x5D, "minpd", X86fmin, VK8WM, VR512, v8f64, f512mem,
2348 memopv8f64, f64mem, loadf64, "{1to8}", SSEPackedDouble,
2349 SSE_ALU_ITINS_P.d, 1>,
2350 EVEX_V512, PD, VEX_W, EVEX_CD8<64, CD8VF>;
2351 defm VMAXPDZ : avx512_fp_packed<0x5F, "maxpd", X86fmax, VK8WM, VR512, v8f64, f512mem,
2352 memopv8f64, f64mem, loadf64, "{1to8}", SSEPackedDouble,
2353 SSE_ALU_ITINS_P.d, 1>,
2354 EVEX_V512, PD, VEX_W, EVEX_CD8<64, CD8VF>;
2356 defm VSUBPSZ : avx512_fp_packed<0x5C, "subps", fsub, VK16WM, VR512, v16f32, f512mem,
2357 memopv16f32, f32mem, loadf32, "{1to16}", SSEPackedSingle,
2358 SSE_ALU_ITINS_P.s, 0>, EVEX_V512, PS, EVEX_CD8<32, CD8VF>;
2359 defm VDIVPSZ : avx512_fp_packed<0x5E, "divps", fdiv, VK16WM, VR512, v16f32, f512mem,
2360 memopv16f32, f32mem, loadf32, "{1to16}", SSEPackedSingle,
2361 SSE_ALU_ITINS_P.s, 0>, EVEX_V512, PS, EVEX_CD8<32, CD8VF>;
2363 defm VSUBPDZ : avx512_fp_packed<0x5C, "subpd", fsub, VK8WM, VR512, v8f64, f512mem,
2364 memopv8f64, f64mem, loadf64, "{1to8}", SSEPackedDouble,
2365 SSE_ALU_ITINS_P.d, 0>,
2366 EVEX_V512, PD, VEX_W, EVEX_CD8<64, CD8VF>;
2367 defm VDIVPDZ : avx512_fp_packed<0x5E, "divpd", fdiv, VK8WM, VR512, v8f64, f512mem,
2368 memopv8f64, f64mem, loadf64, "{1to8}", SSEPackedDouble,
2369 SSE_ALU_ITINS_P.d, 0>,
2370 EVEX_V512, PD, VEX_W, EVEX_CD8<64, CD8VF>;
2372 def : Pat<(v16f32 (int_x86_avx512_mask_max_ps_512 (v16f32 VR512:$src1),
2373 (v16f32 VR512:$src2), (bc_v16f32 (v16i32 immAllZerosV)),
2374 (i16 -1), FROUND_CURRENT)),
2375 (VMAXPSZrr VR512:$src1, VR512:$src2)>;
2377 def : Pat<(v8f64 (int_x86_avx512_mask_max_pd_512 (v8f64 VR512:$src1),
2378 (v8f64 VR512:$src2), (bc_v8f64 (v16i32 immAllZerosV)),
2379 (i8 -1), FROUND_CURRENT)),
2380 (VMAXPDZrr VR512:$src1, VR512:$src2)>;
2382 def : Pat<(v16f32 (int_x86_avx512_mask_min_ps_512 (v16f32 VR512:$src1),
2383 (v16f32 VR512:$src2), (bc_v16f32 (v16i32 immAllZerosV)),
2384 (i16 -1), FROUND_CURRENT)),
2385 (VMINPSZrr VR512:$src1, VR512:$src2)>;
2387 def : Pat<(v8f64 (int_x86_avx512_mask_min_pd_512 (v8f64 VR512:$src1),
2388 (v8f64 VR512:$src2), (bc_v8f64 (v16i32 immAllZerosV)),
2389 (i8 -1), FROUND_CURRENT)),
2390 (VMINPDZrr VR512:$src1, VR512:$src2)>;
2391 //===----------------------------------------------------------------------===//
2392 // AVX-512 VPTESTM instructions
2393 //===----------------------------------------------------------------------===//
2395 multiclass avx512_vptest<bits<8> opc, string OpcodeStr, RegisterClass KRC,
2396 RegisterClass RC, X86MemOperand x86memop, PatFrag memop_frag,
2397 SDNode OpNode, ValueType vt> {
2398 def rr : AVX512PI<opc, MRMSrcReg,
2399 (outs KRC:$dst), (ins RC:$src1, RC:$src2),
2400 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2401 [(set KRC:$dst, (OpNode (vt RC:$src1), (vt RC:$src2)))],
2402 SSEPackedInt>, EVEX_4V;
2403 def rm : AVX512PI<opc, MRMSrcMem,
2404 (outs KRC:$dst), (ins RC:$src1, x86memop:$src2),
2405 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2406 [(set KRC:$dst, (OpNode (vt RC:$src1),
2407 (bitconvert (memop_frag addr:$src2))))], SSEPackedInt>, EVEX_4V;
2410 defm VPTESTMDZ : avx512_vptest<0x27, "vptestmd", VK16, VR512, f512mem,
2411 memopv16i32, X86testm, v16i32>, T8PD, EVEX_V512,
2412 EVEX_CD8<32, CD8VF>;
2413 defm VPTESTMQZ : avx512_vptest<0x27, "vptestmq", VK8, VR512, f512mem,
2414 memopv8i64, X86testm, v8i64>, T8PD, EVEX_V512, VEX_W,
2415 EVEX_CD8<64, CD8VF>;
2417 let Predicates = [HasCDI] in {
2418 defm VPTESTNMDZ : avx512_vptest<0x27, "vptestnmd", VK16, VR512, f512mem,
2419 memopv16i32, X86testnm, v16i32>, T8XS, EVEX_V512,
2420 EVEX_CD8<32, CD8VF>;
2421 defm VPTESTNMQZ : avx512_vptest<0x27, "vptestnmq", VK8, VR512, f512mem,
2422 memopv8i64, X86testnm, v8i64>, T8XS, EVEX_V512, VEX_W,
2423 EVEX_CD8<64, CD8VF>;
2426 def : Pat <(i16 (int_x86_avx512_mask_ptestm_d_512 (v16i32 VR512:$src1),
2427 (v16i32 VR512:$src2), (i16 -1))),
2428 (COPY_TO_REGCLASS (VPTESTMDZrr VR512:$src1, VR512:$src2), GR16)>;
2430 def : Pat <(i8 (int_x86_avx512_mask_ptestm_q_512 (v8i64 VR512:$src1),
2431 (v8i64 VR512:$src2), (i8 -1))),
2432 (COPY_TO_REGCLASS (VPTESTMQZrr VR512:$src1, VR512:$src2), GR8)>;
2433 //===----------------------------------------------------------------------===//
2434 // AVX-512 Shift instructions
2435 //===----------------------------------------------------------------------===//
2436 multiclass avx512_shift_rmi<bits<8> opc, Format ImmFormR, Format ImmFormM,
2437 string OpcodeStr, SDNode OpNode, RegisterClass RC,
2438 ValueType vt, X86MemOperand x86memop, PatFrag mem_frag,
2439 RegisterClass KRC> {
2440 def ri : AVX512BIi8<opc, ImmFormR, (outs RC:$dst),
2441 (ins RC:$src1, i8imm:$src2),
2442 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2443 [(set RC:$dst, (vt (OpNode RC:$src1, (i8 imm:$src2))))],
2444 SSE_INTSHIFT_ITINS_P.rr>, EVEX_4V;
2445 def rik : AVX512BIi8<opc, ImmFormR, (outs RC:$dst),
2446 (ins KRC:$mask, RC:$src1, i8imm:$src2),
2447 !strconcat(OpcodeStr,
2448 " \t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
2449 [], SSE_INTSHIFT_ITINS_P.rr>, EVEX_4V, EVEX_K;
2450 def mi: AVX512BIi8<opc, ImmFormM, (outs RC:$dst),
2451 (ins x86memop:$src1, i8imm:$src2),
2452 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2453 [(set RC:$dst, (OpNode (mem_frag addr:$src1),
2454 (i8 imm:$src2)))], SSE_INTSHIFT_ITINS_P.rm>, EVEX_4V;
2455 def mik: AVX512BIi8<opc, ImmFormM, (outs RC:$dst),
2456 (ins KRC:$mask, x86memop:$src1, i8imm:$src2),
2457 !strconcat(OpcodeStr,
2458 " \t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
2459 [], SSE_INTSHIFT_ITINS_P.rm>, EVEX_4V, EVEX_K;
2462 multiclass avx512_shift_rrm<bits<8> opc, string OpcodeStr, SDNode OpNode,
2463 RegisterClass RC, ValueType vt, ValueType SrcVT,
2464 PatFrag bc_frag, RegisterClass KRC> {
2465 // src2 is always 128-bit
2466 def rr : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
2467 (ins RC:$src1, VR128X:$src2),
2468 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2469 [(set RC:$dst, (vt (OpNode RC:$src1, (SrcVT VR128X:$src2))))],
2470 SSE_INTSHIFT_ITINS_P.rr>, EVEX_4V;
2471 def rrk : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
2472 (ins KRC:$mask, RC:$src1, VR128X:$src2),
2473 !strconcat(OpcodeStr,
2474 " \t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
2475 [], SSE_INTSHIFT_ITINS_P.rr>, EVEX_4V, EVEX_K;
2476 def rm : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
2477 (ins RC:$src1, i128mem:$src2),
2478 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2479 [(set RC:$dst, (vt (OpNode RC:$src1,
2480 (bc_frag (memopv2i64 addr:$src2)))))],
2481 SSE_INTSHIFT_ITINS_P.rm>, EVEX_4V;
2482 def rmk : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
2483 (ins KRC:$mask, RC:$src1, i128mem:$src2),
2484 !strconcat(OpcodeStr,
2485 " \t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
2486 [], SSE_INTSHIFT_ITINS_P.rm>, EVEX_4V, EVEX_K;
2489 defm VPSRLDZ : avx512_shift_rmi<0x72, MRM2r, MRM2m, "vpsrld", X86vsrli,
2490 VR512, v16i32, i512mem, memopv16i32, VK16WM>,
2491 EVEX_V512, EVEX_CD8<32, CD8VF>;
2492 defm VPSRLDZ : avx512_shift_rrm<0xD2, "vpsrld", X86vsrl,
2493 VR512, v16i32, v4i32, bc_v4i32, VK16WM>, EVEX_V512,
2494 EVEX_CD8<32, CD8VQ>;
2496 defm VPSRLQZ : avx512_shift_rmi<0x73, MRM2r, MRM2m, "vpsrlq", X86vsrli,
2497 VR512, v8i64, i512mem, memopv8i64, VK8WM>, EVEX_V512,
2498 EVEX_CD8<64, CD8VF>, VEX_W;
2499 defm VPSRLQZ : avx512_shift_rrm<0xD3, "vpsrlq", X86vsrl,
2500 VR512, v8i64, v2i64, bc_v2i64, VK8WM>, EVEX_V512,
2501 EVEX_CD8<64, CD8VQ>, VEX_W;
2503 defm VPSLLDZ : avx512_shift_rmi<0x72, MRM6r, MRM6m, "vpslld", X86vshli,
2504 VR512, v16i32, i512mem, memopv16i32, VK16WM>, EVEX_V512,
2505 EVEX_CD8<32, CD8VF>;
2506 defm VPSLLDZ : avx512_shift_rrm<0xF2, "vpslld", X86vshl,
2507 VR512, v16i32, v4i32, bc_v4i32, VK16WM>, EVEX_V512,
2508 EVEX_CD8<32, CD8VQ>;
2510 defm VPSLLQZ : avx512_shift_rmi<0x73, MRM6r, MRM6m, "vpsllq", X86vshli,
2511 VR512, v8i64, i512mem, memopv8i64, VK8WM>, EVEX_V512,
2512 EVEX_CD8<64, CD8VF>, VEX_W;
2513 defm VPSLLQZ : avx512_shift_rrm<0xF3, "vpsllq", X86vshl,
2514 VR512, v8i64, v2i64, bc_v2i64, VK8WM>, EVEX_V512,
2515 EVEX_CD8<64, CD8VQ>, VEX_W;
2517 defm VPSRADZ : avx512_shift_rmi<0x72, MRM4r, MRM4m, "vpsrad", X86vsrai,
2518 VR512, v16i32, i512mem, memopv16i32, VK16WM>,
2519 EVEX_V512, EVEX_CD8<32, CD8VF>;
2520 defm VPSRADZ : avx512_shift_rrm<0xE2, "vpsrad", X86vsra,
2521 VR512, v16i32, v4i32, bc_v4i32, VK16WM>, EVEX_V512,
2522 EVEX_CD8<32, CD8VQ>;
2524 defm VPSRAQZ : avx512_shift_rmi<0x72, MRM4r, MRM4m, "vpsraq", X86vsrai,
2525 VR512, v8i64, i512mem, memopv8i64, VK8WM>, EVEX_V512,
2526 EVEX_CD8<64, CD8VF>, VEX_W;
2527 defm VPSRAQZ : avx512_shift_rrm<0xE2, "vpsraq", X86vsra,
2528 VR512, v8i64, v2i64, bc_v2i64, VK8WM>, EVEX_V512,
2529 EVEX_CD8<64, CD8VQ>, VEX_W;
2531 //===-------------------------------------------------------------------===//
2532 // Variable Bit Shifts
2533 //===-------------------------------------------------------------------===//
2534 multiclass avx512_var_shift<bits<8> opc, string OpcodeStr, SDNode OpNode,
2535 RegisterClass RC, ValueType vt,
2536 X86MemOperand x86memop, PatFrag mem_frag> {
2537 def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
2538 (ins RC:$src1, RC:$src2),
2539 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2541 (vt (OpNode RC:$src1, (vt RC:$src2))))]>,
2543 def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
2544 (ins RC:$src1, x86memop:$src2),
2545 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2547 (vt (OpNode RC:$src1, (mem_frag addr:$src2))))]>,
2551 defm VPSLLVDZ : avx512_var_shift<0x47, "vpsllvd", shl, VR512, v16i32,
2552 i512mem, memopv16i32>, EVEX_V512,
2553 EVEX_CD8<32, CD8VF>;
2554 defm VPSLLVQZ : avx512_var_shift<0x47, "vpsllvq", shl, VR512, v8i64,
2555 i512mem, memopv8i64>, EVEX_V512, VEX_W,
2556 EVEX_CD8<64, CD8VF>;
2557 defm VPSRLVDZ : avx512_var_shift<0x45, "vpsrlvd", srl, VR512, v16i32,
2558 i512mem, memopv16i32>, EVEX_V512,
2559 EVEX_CD8<32, CD8VF>;
2560 defm VPSRLVQZ : avx512_var_shift<0x45, "vpsrlvq", srl, VR512, v8i64,
2561 i512mem, memopv8i64>, EVEX_V512, VEX_W,
2562 EVEX_CD8<64, CD8VF>;
2563 defm VPSRAVDZ : avx512_var_shift<0x46, "vpsravd", sra, VR512, v16i32,
2564 i512mem, memopv16i32>, EVEX_V512,
2565 EVEX_CD8<32, CD8VF>;
2566 defm VPSRAVQZ : avx512_var_shift<0x46, "vpsravq", sra, VR512, v8i64,
2567 i512mem, memopv8i64>, EVEX_V512, VEX_W,
2568 EVEX_CD8<64, CD8VF>;
2570 //===----------------------------------------------------------------------===//
2571 // AVX-512 - MOVDDUP
2572 //===----------------------------------------------------------------------===//
2574 multiclass avx512_movddup<string OpcodeStr, RegisterClass RC, ValueType VT,
2575 X86MemOperand x86memop, PatFrag memop_frag> {
2576 def rr : AVX512PDI<0x12, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
2577 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
2578 [(set RC:$dst, (VT (X86Movddup RC:$src)))]>, EVEX;
2579 def rm : AVX512PDI<0x12, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
2580 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
2582 (VT (X86Movddup (memop_frag addr:$src))))]>, EVEX;
2585 defm VMOVDDUPZ : avx512_movddup<"vmovddup", VR512, v8f64, f512mem, memopv8f64>,
2586 VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
2587 def : Pat<(X86Movddup (v8f64 (scalar_to_vector (loadf64 addr:$src)))),
2588 (VMOVDDUPZrm addr:$src)>;
2590 //===---------------------------------------------------------------------===//
2591 // Replicate Single FP - MOVSHDUP and MOVSLDUP
2592 //===---------------------------------------------------------------------===//
2593 multiclass avx512_replicate_sfp<bits<8> op, SDNode OpNode, string OpcodeStr,
2594 ValueType vt, RegisterClass RC, PatFrag mem_frag,
2595 X86MemOperand x86memop> {
2596 def rr : AVX512XSI<op, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
2597 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
2598 [(set RC:$dst, (vt (OpNode RC:$src)))]>, EVEX;
2600 def rm : AVX512XSI<op, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
2601 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
2602 [(set RC:$dst, (OpNode (mem_frag addr:$src)))]>, EVEX;
2605 defm VMOVSHDUPZ : avx512_replicate_sfp<0x16, X86Movshdup, "vmovshdup",
2606 v16f32, VR512, memopv16f32, f512mem>, EVEX_V512,
2607 EVEX_CD8<32, CD8VF>;
2608 defm VMOVSLDUPZ : avx512_replicate_sfp<0x12, X86Movsldup, "vmovsldup",
2609 v16f32, VR512, memopv16f32, f512mem>, EVEX_V512,
2610 EVEX_CD8<32, CD8VF>;
2612 def : Pat<(v16i32 (X86Movshdup VR512:$src)), (VMOVSHDUPZrr VR512:$src)>;
2613 def : Pat<(v16i32 (X86Movshdup (memopv16i32 addr:$src))),
2614 (VMOVSHDUPZrm addr:$src)>;
2615 def : Pat<(v16i32 (X86Movsldup VR512:$src)), (VMOVSLDUPZrr VR512:$src)>;
2616 def : Pat<(v16i32 (X86Movsldup (memopv16i32 addr:$src))),
2617 (VMOVSLDUPZrm addr:$src)>;
2619 //===----------------------------------------------------------------------===//
2620 // Move Low to High and High to Low packed FP Instructions
2621 //===----------------------------------------------------------------------===//
2622 def VMOVLHPSZrr : AVX512PSI<0x16, MRMSrcReg, (outs VR128X:$dst),
2623 (ins VR128X:$src1, VR128X:$src2),
2624 "vmovlhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2625 [(set VR128X:$dst, (v4f32 (X86Movlhps VR128X:$src1, VR128X:$src2)))],
2626 IIC_SSE_MOV_LH>, EVEX_4V;
2627 def VMOVHLPSZrr : AVX512PSI<0x12, MRMSrcReg, (outs VR128X:$dst),
2628 (ins VR128X:$src1, VR128X:$src2),
2629 "vmovhlps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2630 [(set VR128X:$dst, (v4f32 (X86Movhlps VR128X:$src1, VR128X:$src2)))],
2631 IIC_SSE_MOV_LH>, EVEX_4V;
2633 let Predicates = [HasAVX512] in {
2635 def : Pat<(v4i32 (X86Movlhps VR128X:$src1, VR128X:$src2)),
2636 (VMOVLHPSZrr VR128X:$src1, VR128X:$src2)>;
2637 def : Pat<(v2i64 (X86Movlhps VR128X:$src1, VR128X:$src2)),
2638 (VMOVLHPSZrr (v2i64 VR128X:$src1), VR128X:$src2)>;
2641 def : Pat<(v4i32 (X86Movhlps VR128X:$src1, VR128X:$src2)),
2642 (VMOVHLPSZrr VR128X:$src1, VR128X:$src2)>;
2645 //===----------------------------------------------------------------------===//
2646 // FMA - Fused Multiply Operations
2648 let Constraints = "$src1 = $dst" in {
2649 multiclass avx512_fma3p_rm<bits<8> opc, string OpcodeStr,
2650 RegisterClass RC, X86MemOperand x86memop,
2651 PatFrag mem_frag, X86MemOperand x86scalar_mop, PatFrag scalar_mfrag,
2652 string BrdcstStr, SDNode OpNode, ValueType OpVT> {
2653 def r: AVX512FMA3<opc, MRMSrcReg, (outs RC:$dst),
2654 (ins RC:$src1, RC:$src2, RC:$src3),
2655 !strconcat(OpcodeStr," \t{$src3, $src2, $dst|$dst, $src2, $src3}"),
2656 [(set RC:$dst, (OpVT(OpNode RC:$src1, RC:$src2, RC:$src3)))]>;
2659 def m: AVX512FMA3<opc, MRMSrcMem, (outs RC:$dst),
2660 (ins RC:$src1, RC:$src2, x86memop:$src3),
2661 !strconcat(OpcodeStr, " \t{$src3, $src2, $dst|$dst, $src2, $src3}"),
2662 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2,
2663 (mem_frag addr:$src3))))]>;
2664 def mb: AVX512FMA3<opc, MRMSrcMem, (outs RC:$dst),
2665 (ins RC:$src1, RC:$src2, x86scalar_mop:$src3),
2666 !strconcat(OpcodeStr, " \t{${src3}", BrdcstStr,
2667 ", $src2, $dst|$dst, $src2, ${src3}", BrdcstStr, "}"),
2668 [(set RC:$dst, (OpNode RC:$src1, RC:$src2,
2669 (OpVT (X86VBroadcast (scalar_mfrag addr:$src3)))))]>, EVEX_B;
2671 } // Constraints = "$src1 = $dst"
2673 let ExeDomain = SSEPackedSingle in {
2674 defm VFMADD213PSZ : avx512_fma3p_rm<0xA8, "vfmadd213ps", VR512, f512mem,
2675 memopv16f32, f32mem, loadf32, "{1to16}",
2676 X86Fmadd, v16f32>, EVEX_V512,
2677 EVEX_CD8<32, CD8VF>;
2678 defm VFMSUB213PSZ : avx512_fma3p_rm<0xAA, "vfmsub213ps", VR512, f512mem,
2679 memopv16f32, f32mem, loadf32, "{1to16}",
2680 X86Fmsub, v16f32>, EVEX_V512,
2681 EVEX_CD8<32, CD8VF>;
2682 defm VFMADDSUB213PSZ : avx512_fma3p_rm<0xA6, "vfmaddsub213ps", VR512, f512mem,
2683 memopv16f32, f32mem, loadf32, "{1to16}",
2684 X86Fmaddsub, v16f32>,
2685 EVEX_V512, EVEX_CD8<32, CD8VF>;
2686 defm VFMSUBADD213PSZ : avx512_fma3p_rm<0xA7, "vfmsubadd213ps", VR512, f512mem,
2687 memopv16f32, f32mem, loadf32, "{1to16}",
2688 X86Fmsubadd, v16f32>,
2689 EVEX_V512, EVEX_CD8<32, CD8VF>;
2690 defm VFNMADD213PSZ : avx512_fma3p_rm<0xAC, "vfnmadd213ps", VR512, f512mem,
2691 memopv16f32, f32mem, loadf32, "{1to16}",
2692 X86Fnmadd, v16f32>, EVEX_V512,
2693 EVEX_CD8<32, CD8VF>;
2694 defm VFNMSUB213PSZ : avx512_fma3p_rm<0xAE, "vfnmsub213ps", VR512, f512mem,
2695 memopv16f32, f32mem, loadf32, "{1to16}",
2696 X86Fnmsub, v16f32>, EVEX_V512,
2697 EVEX_CD8<32, CD8VF>;
2699 let ExeDomain = SSEPackedDouble in {
2700 defm VFMADD213PDZ : avx512_fma3p_rm<0xA8, "vfmadd213pd", VR512, f512mem,
2701 memopv8f64, f64mem, loadf64, "{1to8}",
2702 X86Fmadd, v8f64>, EVEX_V512,
2703 VEX_W, EVEX_CD8<64, CD8VF>;
2704 defm VFMSUB213PDZ : avx512_fma3p_rm<0xAA, "vfmsub213pd", VR512, f512mem,
2705 memopv8f64, f64mem, loadf64, "{1to8}",
2706 X86Fmsub, v8f64>, EVEX_V512, VEX_W,
2707 EVEX_CD8<64, CD8VF>;
2708 defm VFMADDSUB213PDZ : avx512_fma3p_rm<0xA6, "vfmaddsub213pd", VR512, f512mem,
2709 memopv8f64, f64mem, loadf64, "{1to8}",
2710 X86Fmaddsub, v8f64>, EVEX_V512, VEX_W,
2711 EVEX_CD8<64, CD8VF>;
2712 defm VFMSUBADD213PDZ : avx512_fma3p_rm<0xA7, "vfmsubadd213pd", VR512, f512mem,
2713 memopv8f64, f64mem, loadf64, "{1to8}",
2714 X86Fmsubadd, v8f64>, EVEX_V512, VEX_W,
2715 EVEX_CD8<64, CD8VF>;
2716 defm VFNMADD213PDZ : avx512_fma3p_rm<0xAC, "vfnmadd213pd", VR512, f512mem,
2717 memopv8f64, f64mem, loadf64, "{1to8}",
2718 X86Fnmadd, v8f64>, EVEX_V512, VEX_W,
2719 EVEX_CD8<64, CD8VF>;
2720 defm VFNMSUB213PDZ : avx512_fma3p_rm<0xAE, "vfnmsub213pd", VR512, f512mem,
2721 memopv8f64, f64mem, loadf64, "{1to8}",
2722 X86Fnmsub, v8f64>, EVEX_V512, VEX_W,
2723 EVEX_CD8<64, CD8VF>;
2726 let Constraints = "$src1 = $dst" in {
2727 multiclass avx512_fma3p_m132<bits<8> opc, string OpcodeStr,
2728 RegisterClass RC, X86MemOperand x86memop,
2729 PatFrag mem_frag, X86MemOperand x86scalar_mop, PatFrag scalar_mfrag,
2730 string BrdcstStr, SDNode OpNode, ValueType OpVT> {
2732 def m: AVX512FMA3<opc, MRMSrcMem, (outs RC:$dst),
2733 (ins RC:$src1, RC:$src3, x86memop:$src2),
2734 !strconcat(OpcodeStr, " \t{$src2, $src3, $dst|$dst, $src3, $src2}"),
2735 [(set RC:$dst, (OpVT (OpNode RC:$src1, (mem_frag addr:$src2), RC:$src3)))]>;
2736 def mb: AVX512FMA3<opc, MRMSrcMem, (outs RC:$dst),
2737 (ins RC:$src1, RC:$src3, x86scalar_mop:$src2),
2738 !strconcat(OpcodeStr, " \t{${src2}", BrdcstStr,
2739 ", $src3, $dst|$dst, $src3, ${src2}", BrdcstStr, "}"),
2740 [(set RC:$dst, (OpNode RC:$src1,
2741 (OpVT (X86VBroadcast (scalar_mfrag addr:$src2))), RC:$src3))]>, EVEX_B;
2743 } // Constraints = "$src1 = $dst"
2746 let ExeDomain = SSEPackedSingle in {
2747 defm VFMADD132PSZ : avx512_fma3p_m132<0x98, "vfmadd132ps", VR512, f512mem,
2748 memopv16f32, f32mem, loadf32, "{1to16}",
2749 X86Fmadd, v16f32>, EVEX_V512,
2750 EVEX_CD8<32, CD8VF>;
2751 defm VFMSUB132PSZ : avx512_fma3p_m132<0x9A, "vfmsub132ps", VR512, f512mem,
2752 memopv16f32, f32mem, loadf32, "{1to16}",
2753 X86Fmsub, v16f32>, EVEX_V512,
2754 EVEX_CD8<32, CD8VF>;
2755 defm VFMADDSUB132PSZ : avx512_fma3p_m132<0x96, "vfmaddsub132ps", VR512, f512mem,
2756 memopv16f32, f32mem, loadf32, "{1to16}",
2757 X86Fmaddsub, v16f32>,
2758 EVEX_V512, EVEX_CD8<32, CD8VF>;
2759 defm VFMSUBADD132PSZ : avx512_fma3p_m132<0x97, "vfmsubadd132ps", VR512, f512mem,
2760 memopv16f32, f32mem, loadf32, "{1to16}",
2761 X86Fmsubadd, v16f32>,
2762 EVEX_V512, EVEX_CD8<32, CD8VF>;
2763 defm VFNMADD132PSZ : avx512_fma3p_m132<0x9C, "vfnmadd132ps", VR512, f512mem,
2764 memopv16f32, f32mem, loadf32, "{1to16}",
2765 X86Fnmadd, v16f32>, EVEX_V512,
2766 EVEX_CD8<32, CD8VF>;
2767 defm VFNMSUB132PSZ : avx512_fma3p_m132<0x9E, "vfnmsub132ps", VR512, f512mem,
2768 memopv16f32, f32mem, loadf32, "{1to16}",
2769 X86Fnmsub, v16f32>, EVEX_V512,
2770 EVEX_CD8<32, CD8VF>;
2772 let ExeDomain = SSEPackedDouble in {
2773 defm VFMADD132PDZ : avx512_fma3p_m132<0x98, "vfmadd132pd", VR512, f512mem,
2774 memopv8f64, f64mem, loadf64, "{1to8}",
2775 X86Fmadd, v8f64>, EVEX_V512,
2776 VEX_W, EVEX_CD8<64, CD8VF>;
2777 defm VFMSUB132PDZ : avx512_fma3p_m132<0x9A, "vfmsub132pd", VR512, f512mem,
2778 memopv8f64, f64mem, loadf64, "{1to8}",
2779 X86Fmsub, v8f64>, EVEX_V512, VEX_W,
2780 EVEX_CD8<64, CD8VF>;
2781 defm VFMADDSUB132PDZ : avx512_fma3p_m132<0x96, "vfmaddsub132pd", VR512, f512mem,
2782 memopv8f64, f64mem, loadf64, "{1to8}",
2783 X86Fmaddsub, v8f64>, EVEX_V512, VEX_W,
2784 EVEX_CD8<64, CD8VF>;
2785 defm VFMSUBADD132PDZ : avx512_fma3p_m132<0x97, "vfmsubadd132pd", VR512, f512mem,
2786 memopv8f64, f64mem, loadf64, "{1to8}",
2787 X86Fmsubadd, v8f64>, EVEX_V512, VEX_W,
2788 EVEX_CD8<64, CD8VF>;
2789 defm VFNMADD132PDZ : avx512_fma3p_m132<0x9C, "vfnmadd132pd", VR512, f512mem,
2790 memopv8f64, f64mem, loadf64, "{1to8}",
2791 X86Fnmadd, v8f64>, EVEX_V512, VEX_W,
2792 EVEX_CD8<64, CD8VF>;
2793 defm VFNMSUB132PDZ : avx512_fma3p_m132<0x9E, "vfnmsub132pd", VR512, f512mem,
2794 memopv8f64, f64mem, loadf64, "{1to8}",
2795 X86Fnmsub, v8f64>, EVEX_V512, VEX_W,
2796 EVEX_CD8<64, CD8VF>;
2800 let Constraints = "$src1 = $dst" in {
2801 multiclass avx512_fma3s_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
2802 RegisterClass RC, ValueType OpVT,
2803 X86MemOperand x86memop, Operand memop,
2805 let isCommutable = 1 in
2806 def r : AVX512FMA3<opc, MRMSrcReg, (outs RC:$dst),
2807 (ins RC:$src1, RC:$src2, RC:$src3),
2808 !strconcat(OpcodeStr,
2809 " \t{$src3, $src2, $dst|$dst, $src2, $src3}"),
2811 (OpVT (OpNode RC:$src2, RC:$src1, RC:$src3)))]>;
2813 def m : AVX512FMA3<opc, MRMSrcMem, (outs RC:$dst),
2814 (ins RC:$src1, RC:$src2, f128mem:$src3),
2815 !strconcat(OpcodeStr,
2816 " \t{$src3, $src2, $dst|$dst, $src2, $src3}"),
2818 (OpVT (OpNode RC:$src2, RC:$src1,
2819 (mem_frag addr:$src3))))]>;
2822 } // Constraints = "$src1 = $dst"
2824 defm VFMADDSSZ : avx512_fma3s_rm<0xA9, "vfmadd213ss", X86Fmadd, FR32X,
2825 f32, f32mem, ssmem, loadf32>, EVEX_CD8<32, CD8VT1>;
2826 defm VFMADDSDZ : avx512_fma3s_rm<0xA9, "vfmadd213sd", X86Fmadd, FR64X,
2827 f64, f64mem, sdmem, loadf64>, VEX_W, EVEX_CD8<64, CD8VT1>;
2828 defm VFMSUBSSZ : avx512_fma3s_rm<0xAB, "vfmsub213ss", X86Fmsub, FR32X,
2829 f32, f32mem, ssmem, loadf32>, EVEX_CD8<32, CD8VT1>;
2830 defm VFMSUBSDZ : avx512_fma3s_rm<0xAB, "vfmsub213sd", X86Fmsub, FR64X,
2831 f64, f64mem, sdmem, loadf64>, VEX_W, EVEX_CD8<64, CD8VT1>;
2832 defm VFNMADDSSZ : avx512_fma3s_rm<0xAD, "vfnmadd213ss", X86Fnmadd, FR32X,
2833 f32, f32mem, ssmem, loadf32>, EVEX_CD8<32, CD8VT1>;
2834 defm VFNMADDSDZ : avx512_fma3s_rm<0xAD, "vfnmadd213sd", X86Fnmadd, FR64X,
2835 f64, f64mem, sdmem, loadf64>, VEX_W, EVEX_CD8<64, CD8VT1>;
2836 defm VFNMSUBSSZ : avx512_fma3s_rm<0xAF, "vfnmsub213ss", X86Fnmsub, FR32X,
2837 f32, f32mem, ssmem, loadf32>, EVEX_CD8<32, CD8VT1>;
2838 defm VFNMSUBSDZ : avx512_fma3s_rm<0xAF, "vfnmsub213sd", X86Fnmsub, FR64X,
2839 f64, f64mem, sdmem, loadf64>, VEX_W, EVEX_CD8<64, CD8VT1>;
2841 //===----------------------------------------------------------------------===//
2842 // AVX-512 Scalar convert from sign integer to float/double
2843 //===----------------------------------------------------------------------===//
2845 multiclass avx512_vcvtsi<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
2846 X86MemOperand x86memop, string asm> {
2847 let hasSideEffects = 0 in {
2848 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src),
2849 !strconcat(asm," \t{$src, $src1, $dst|$dst, $src1, $src}"), []>,
2852 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
2853 (ins DstRC:$src1, x86memop:$src),
2854 !strconcat(asm," \t{$src, $src1, $dst|$dst, $src1, $src}"), []>,
2856 } // hasSideEffects = 0
2858 let Predicates = [HasAVX512] in {
2859 defm VCVTSI2SSZ : avx512_vcvtsi<0x2A, GR32, FR32X, i32mem, "cvtsi2ss{l}">,
2860 XS, VEX_LIG, EVEX_CD8<32, CD8VT1>;
2861 defm VCVTSI642SSZ : avx512_vcvtsi<0x2A, GR64, FR32X, i64mem, "cvtsi2ss{q}">,
2862 XS, VEX_W, VEX_LIG, EVEX_CD8<64, CD8VT1>;
2863 defm VCVTSI2SDZ : avx512_vcvtsi<0x2A, GR32, FR64X, i32mem, "cvtsi2sd{l}">,
2864 XD, VEX_LIG, EVEX_CD8<32, CD8VT1>;
2865 defm VCVTSI642SDZ : avx512_vcvtsi<0x2A, GR64, FR64X, i64mem, "cvtsi2sd{q}">,
2866 XD, VEX_W, VEX_LIG, EVEX_CD8<64, CD8VT1>;
2868 def : Pat<(f32 (sint_to_fp (loadi32 addr:$src))),
2869 (VCVTSI2SSZrm (f32 (IMPLICIT_DEF)), addr:$src)>;
2870 def : Pat<(f32 (sint_to_fp (loadi64 addr:$src))),
2871 (VCVTSI642SSZrm (f32 (IMPLICIT_DEF)), addr:$src)>;
2872 def : Pat<(f64 (sint_to_fp (loadi32 addr:$src))),
2873 (VCVTSI2SDZrm (f64 (IMPLICIT_DEF)), addr:$src)>;
2874 def : Pat<(f64 (sint_to_fp (loadi64 addr:$src))),
2875 (VCVTSI642SDZrm (f64 (IMPLICIT_DEF)), addr:$src)>;
2877 def : Pat<(f32 (sint_to_fp GR32:$src)),
2878 (VCVTSI2SSZrr (f32 (IMPLICIT_DEF)), GR32:$src)>;
2879 def : Pat<(f32 (sint_to_fp GR64:$src)),
2880 (VCVTSI642SSZrr (f32 (IMPLICIT_DEF)), GR64:$src)>;
2881 def : Pat<(f64 (sint_to_fp GR32:$src)),
2882 (VCVTSI2SDZrr (f64 (IMPLICIT_DEF)), GR32:$src)>;
2883 def : Pat<(f64 (sint_to_fp GR64:$src)),
2884 (VCVTSI642SDZrr (f64 (IMPLICIT_DEF)), GR64:$src)>;
2886 defm VCVTUSI2SSZ : avx512_vcvtsi<0x7B, GR32, FR32X, i32mem, "cvtusi2ss{l}">,
2887 XS, VEX_LIG, EVEX_CD8<32, CD8VT1>;
2888 defm VCVTUSI642SSZ : avx512_vcvtsi<0x7B, GR64, FR32X, i64mem, "cvtusi2ss{q}">,
2889 XS, VEX_W, VEX_LIG, EVEX_CD8<64, CD8VT1>;
2890 defm VCVTUSI2SDZ : avx512_vcvtsi<0x7B, GR32, FR64X, i32mem, "cvtusi2sd{l}">,
2891 XD, VEX_LIG, EVEX_CD8<32, CD8VT1>;
2892 defm VCVTUSI642SDZ : avx512_vcvtsi<0x7B, GR64, FR64X, i64mem, "cvtusi2sd{q}">,
2893 XD, VEX_W, VEX_LIG, EVEX_CD8<64, CD8VT1>;
2895 def : Pat<(f32 (uint_to_fp (loadi32 addr:$src))),
2896 (VCVTUSI2SSZrm (f32 (IMPLICIT_DEF)), addr:$src)>;
2897 def : Pat<(f32 (uint_to_fp (loadi64 addr:$src))),
2898 (VCVTUSI642SSZrm (f32 (IMPLICIT_DEF)), addr:$src)>;
2899 def : Pat<(f64 (uint_to_fp (loadi32 addr:$src))),
2900 (VCVTUSI2SDZrm (f64 (IMPLICIT_DEF)), addr:$src)>;
2901 def : Pat<(f64 (uint_to_fp (loadi64 addr:$src))),
2902 (VCVTUSI642SDZrm (f64 (IMPLICIT_DEF)), addr:$src)>;
2904 def : Pat<(f32 (uint_to_fp GR32:$src)),
2905 (VCVTUSI2SSZrr (f32 (IMPLICIT_DEF)), GR32:$src)>;
2906 def : Pat<(f32 (uint_to_fp GR64:$src)),
2907 (VCVTUSI642SSZrr (f32 (IMPLICIT_DEF)), GR64:$src)>;
2908 def : Pat<(f64 (uint_to_fp GR32:$src)),
2909 (VCVTUSI2SDZrr (f64 (IMPLICIT_DEF)), GR32:$src)>;
2910 def : Pat<(f64 (uint_to_fp GR64:$src)),
2911 (VCVTUSI642SDZrr (f64 (IMPLICIT_DEF)), GR64:$src)>;
2914 //===----------------------------------------------------------------------===//
2915 // AVX-512 Scalar convert from float/double to integer
2916 //===----------------------------------------------------------------------===//
2917 multiclass avx512_cvt_s_int<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
2918 Intrinsic Int, Operand memop, ComplexPattern mem_cpat,
2920 let hasSideEffects = 0 in {
2921 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
2922 !strconcat(asm," \t{$src, $dst|$dst, $src}"),
2923 [(set DstRC:$dst, (Int SrcRC:$src))]>, EVEX, VEX_LIG,
2924 Requires<[HasAVX512]>;
2926 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins memop:$src),
2927 !strconcat(asm," \t{$src, $dst|$dst, $src}"), []>, EVEX, VEX_LIG,
2928 Requires<[HasAVX512]>;
2929 } // hasSideEffects = 0
2931 let Predicates = [HasAVX512] in {
2932 // Convert float/double to signed/unsigned int 32/64
2933 defm VCVTSS2SIZ: avx512_cvt_s_int<0x2D, VR128X, GR32, int_x86_sse_cvtss2si,
2934 ssmem, sse_load_f32, "cvtss2si">,
2935 XS, EVEX_CD8<32, CD8VT1>;
2936 defm VCVTSS2SI64Z: avx512_cvt_s_int<0x2D, VR128X, GR64, int_x86_sse_cvtss2si64,
2937 ssmem, sse_load_f32, "cvtss2si">,
2938 XS, VEX_W, EVEX_CD8<32, CD8VT1>;
2939 defm VCVTSS2USIZ: avx512_cvt_s_int<0x79, VR128X, GR32, int_x86_avx512_cvtss2usi,
2940 ssmem, sse_load_f32, "cvtss2usi">,
2941 XS, EVEX_CD8<32, CD8VT1>;
2942 defm VCVTSS2USI64Z: avx512_cvt_s_int<0x79, VR128X, GR64,
2943 int_x86_avx512_cvtss2usi64, ssmem,
2944 sse_load_f32, "cvtss2usi">, XS, VEX_W,
2945 EVEX_CD8<32, CD8VT1>;
2946 defm VCVTSD2SIZ: avx512_cvt_s_int<0x2D, VR128X, GR32, int_x86_sse2_cvtsd2si,
2947 sdmem, sse_load_f64, "cvtsd2si">,
2948 XD, EVEX_CD8<64, CD8VT1>;
2949 defm VCVTSD2SI64Z: avx512_cvt_s_int<0x2D, VR128X, GR64, int_x86_sse2_cvtsd2si64,
2950 sdmem, sse_load_f64, "cvtsd2si">,
2951 XD, VEX_W, EVEX_CD8<64, CD8VT1>;
2952 defm VCVTSD2USIZ: avx512_cvt_s_int<0x79, VR128X, GR32, int_x86_avx512_cvtsd2usi,
2953 sdmem, sse_load_f64, "cvtsd2usi">,
2954 XD, EVEX_CD8<64, CD8VT1>;
2955 defm VCVTSD2USI64Z: avx512_cvt_s_int<0x79, VR128X, GR64,
2956 int_x86_avx512_cvtsd2usi64, sdmem,
2957 sse_load_f64, "cvtsd2usi">, XD, VEX_W,
2958 EVEX_CD8<64, CD8VT1>;
2960 let isCodeGenOnly = 1 in {
2961 defm Int_VCVTSI2SSZ : sse12_cvt_sint_3addr<0x2A, GR32, VR128X,
2962 int_x86_sse_cvtsi2ss, i32mem, loadi32, "cvtsi2ss{l}",
2963 SSE_CVT_Scalar, 0>, XS, EVEX_4V;
2964 defm Int_VCVTSI2SS64Z : sse12_cvt_sint_3addr<0x2A, GR64, VR128X,
2965 int_x86_sse_cvtsi642ss, i64mem, loadi64, "cvtsi2ss{q}",
2966 SSE_CVT_Scalar, 0>, XS, EVEX_4V, VEX_W;
2967 defm Int_VCVTSI2SDZ : sse12_cvt_sint_3addr<0x2A, GR32, VR128X,
2968 int_x86_sse2_cvtsi2sd, i32mem, loadi32, "cvtsi2sd{l}",
2969 SSE_CVT_Scalar, 0>, XD, EVEX_4V;
2970 defm Int_VCVTSI2SD64Z : sse12_cvt_sint_3addr<0x2A, GR64, VR128X,
2971 int_x86_sse2_cvtsi642sd, i64mem, loadi64, "cvtsi2sd{q}",
2972 SSE_CVT_Scalar, 0>, XD, EVEX_4V, VEX_W;
2974 defm Int_VCVTUSI2SSZ : sse12_cvt_sint_3addr<0x2A, GR32, VR128X,
2975 int_x86_avx512_cvtusi2ss, i32mem, loadi32, "cvtusi2ss{l}",
2976 SSE_CVT_Scalar, 0>, XS, EVEX_4V;
2977 defm Int_VCVTUSI2SS64Z : sse12_cvt_sint_3addr<0x2A, GR64, VR128X,
2978 int_x86_avx512_cvtusi642ss, i64mem, loadi64, "cvtusi2ss{q}",
2979 SSE_CVT_Scalar, 0>, XS, EVEX_4V, VEX_W;
2980 defm Int_VCVTUSI2SDZ : sse12_cvt_sint_3addr<0x2A, GR32, VR128X,
2981 int_x86_avx512_cvtusi2sd, i32mem, loadi32, "cvtusi2sd{l}",
2982 SSE_CVT_Scalar, 0>, XD, EVEX_4V;
2983 defm Int_VCVTUSI2SD64Z : sse12_cvt_sint_3addr<0x2A, GR64, VR128X,
2984 int_x86_avx512_cvtusi642sd, i64mem, loadi64, "cvtusi2sd{q}",
2985 SSE_CVT_Scalar, 0>, XD, EVEX_4V, VEX_W;
2986 } // isCodeGenOnly = 1
2988 // Convert float/double to signed/unsigned int 32/64 with truncation
2989 let isCodeGenOnly = 1 in {
2990 defm Int_VCVTTSS2SIZ : avx512_cvt_s_int<0x2C, VR128X, GR32, int_x86_sse_cvttss2si,
2991 ssmem, sse_load_f32, "cvttss2si">,
2992 XS, EVEX_CD8<32, CD8VT1>;
2993 defm Int_VCVTTSS2SI64Z : avx512_cvt_s_int<0x2C, VR128X, GR64,
2994 int_x86_sse_cvttss2si64, ssmem, sse_load_f32,
2995 "cvttss2si">, XS, VEX_W,
2996 EVEX_CD8<32, CD8VT1>;
2997 defm Int_VCVTTSD2SIZ : avx512_cvt_s_int<0x2C, VR128X, GR32, int_x86_sse2_cvttsd2si,
2998 sdmem, sse_load_f64, "cvttsd2si">, XD,
2999 EVEX_CD8<64, CD8VT1>;
3000 defm Int_VCVTTSD2SI64Z : avx512_cvt_s_int<0x2C, VR128X, GR64,
3001 int_x86_sse2_cvttsd2si64, sdmem, sse_load_f64,
3002 "cvttsd2si">, XD, VEX_W,
3003 EVEX_CD8<64, CD8VT1>;
3004 defm Int_VCVTTSS2USIZ : avx512_cvt_s_int<0x78, VR128X, GR32,
3005 int_x86_avx512_cvttss2usi, ssmem, sse_load_f32,
3006 "cvttss2usi">, XS, EVEX_CD8<32, CD8VT1>;
3007 defm Int_VCVTTSS2USI64Z : avx512_cvt_s_int<0x78, VR128X, GR64,
3008 int_x86_avx512_cvttss2usi64, ssmem,
3009 sse_load_f32, "cvttss2usi">, XS, VEX_W,
3010 EVEX_CD8<32, CD8VT1>;
3011 defm Int_VCVTTSD2USIZ : avx512_cvt_s_int<0x78, VR128X, GR32,
3012 int_x86_avx512_cvttsd2usi,
3013 sdmem, sse_load_f64, "cvttsd2usi">, XD,
3014 EVEX_CD8<64, CD8VT1>;
3015 defm Int_VCVTTSD2USI64Z : avx512_cvt_s_int<0x78, VR128X, GR64,
3016 int_x86_avx512_cvttsd2usi64, sdmem,
3017 sse_load_f64, "cvttsd2usi">, XD, VEX_W,
3018 EVEX_CD8<64, CD8VT1>;
3019 } // isCodeGenOnly = 1
3021 multiclass avx512_cvt_s<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
3022 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
3024 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
3025 !strconcat(asm," \t{$src, $dst|$dst, $src}"),
3026 [(set DstRC:$dst, (OpNode SrcRC:$src))]>, EVEX;
3027 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
3028 !strconcat(asm," \t{$src, $dst|$dst, $src}"),
3029 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))]>, EVEX;
3032 defm VCVTTSS2SIZ : avx512_cvt_s<0x2C, FR32X, GR32, fp_to_sint, f32mem,
3033 loadf32, "cvttss2si">, XS,
3034 EVEX_CD8<32, CD8VT1>;
3035 defm VCVTTSS2USIZ : avx512_cvt_s<0x78, FR32X, GR32, fp_to_uint, f32mem,
3036 loadf32, "cvttss2usi">, XS,
3037 EVEX_CD8<32, CD8VT1>;
3038 defm VCVTTSS2SI64Z : avx512_cvt_s<0x2C, FR32X, GR64, fp_to_sint, f32mem,
3039 loadf32, "cvttss2si">, XS, VEX_W,
3040 EVEX_CD8<32, CD8VT1>;
3041 defm VCVTTSS2USI64Z : avx512_cvt_s<0x78, FR32X, GR64, fp_to_uint, f32mem,
3042 loadf32, "cvttss2usi">, XS, VEX_W,
3043 EVEX_CD8<32, CD8VT1>;
3044 defm VCVTTSD2SIZ : avx512_cvt_s<0x2C, FR64X, GR32, fp_to_sint, f64mem,
3045 loadf64, "cvttsd2si">, XD,
3046 EVEX_CD8<64, CD8VT1>;
3047 defm VCVTTSD2USIZ : avx512_cvt_s<0x78, FR64X, GR32, fp_to_uint, f64mem,
3048 loadf64, "cvttsd2usi">, XD,
3049 EVEX_CD8<64, CD8VT1>;
3050 defm VCVTTSD2SI64Z : avx512_cvt_s<0x2C, FR64X, GR64, fp_to_sint, f64mem,
3051 loadf64, "cvttsd2si">, XD, VEX_W,
3052 EVEX_CD8<64, CD8VT1>;
3053 defm VCVTTSD2USI64Z : avx512_cvt_s<0x78, FR64X, GR64, fp_to_uint, f64mem,
3054 loadf64, "cvttsd2usi">, XD, VEX_W,
3055 EVEX_CD8<64, CD8VT1>;
3057 //===----------------------------------------------------------------------===//
3058 // AVX-512 Convert form float to double and back
3059 //===----------------------------------------------------------------------===//
3060 let hasSideEffects = 0 in {
3061 def VCVTSS2SDZrr : AVX512XSI<0x5A, MRMSrcReg, (outs FR64X:$dst),
3062 (ins FR32X:$src1, FR32X:$src2),
3063 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3064 []>, EVEX_4V, VEX_LIG, Sched<[WriteCvtF2F]>;
3066 def VCVTSS2SDZrm : AVX512XSI<0x5A, MRMSrcMem, (outs FR64X:$dst),
3067 (ins FR32X:$src1, f32mem:$src2),
3068 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3069 []>, EVEX_4V, VEX_LIG, Sched<[WriteCvtF2FLd, ReadAfterLd]>,
3070 EVEX_CD8<32, CD8VT1>;
3072 // Convert scalar double to scalar single
3073 def VCVTSD2SSZrr : AVX512XDI<0x5A, MRMSrcReg, (outs FR32X:$dst),
3074 (ins FR64X:$src1, FR64X:$src2),
3075 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3076 []>, EVEX_4V, VEX_LIG, VEX_W, Sched<[WriteCvtF2F]>;
3078 def VCVTSD2SSZrm : AVX512XDI<0x5A, MRMSrcMem, (outs FR32X:$dst),
3079 (ins FR64X:$src1, f64mem:$src2),
3080 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3081 []>, EVEX_4V, VEX_LIG, VEX_W,
3082 Sched<[WriteCvtF2FLd, ReadAfterLd]>, EVEX_CD8<64, CD8VT1>;
3085 def : Pat<(f64 (fextend FR32X:$src)), (VCVTSS2SDZrr FR32X:$src, FR32X:$src)>,
3086 Requires<[HasAVX512]>;
3087 def : Pat<(fextend (loadf32 addr:$src)),
3088 (VCVTSS2SDZrm (f32 (IMPLICIT_DEF)), addr:$src)>, Requires<[HasAVX512]>;
3090 def : Pat<(extloadf32 addr:$src),
3091 (VCVTSS2SDZrm (f32 (IMPLICIT_DEF)), addr:$src)>,
3092 Requires<[HasAVX512, OptForSize]>;
3094 def : Pat<(extloadf32 addr:$src),
3095 (VCVTSS2SDZrr (f32 (IMPLICIT_DEF)), (VMOVSSZrm addr:$src))>,
3096 Requires<[HasAVX512, OptForSpeed]>;
3098 def : Pat<(f32 (fround FR64X:$src)), (VCVTSD2SSZrr FR64X:$src, FR64X:$src)>,
3099 Requires<[HasAVX512]>;
3101 multiclass avx512_vcvt_fp_with_rc<bits<8> opc, string asm, RegisterClass SrcRC,
3102 RegisterClass DstRC, SDNode OpNode, PatFrag mem_frag,
3103 X86MemOperand x86memop, ValueType OpVT, ValueType InVT,
3105 let hasSideEffects = 0 in {
3106 def rr : AVX512PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
3107 !strconcat(asm," \t{$src, $dst|$dst, $src}"),
3109 (OpVT (OpNode (InVT SrcRC:$src))))], d>, EVEX;
3110 def rrb : AVX512PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src, AVX512RC:$rc),
3111 !strconcat(asm," \t{$rc, $src, $dst|$dst, $src, $rc}"),
3112 [], d>, EVEX, EVEX_B, EVEX_RC;
3114 def rm : AVX512PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
3115 !strconcat(asm," \t{$src, $dst|$dst, $src}"),
3117 (OpVT (OpNode (InVT (bitconvert (mem_frag addr:$src))))))], d>, EVEX;
3118 } // hasSideEffects = 0
3121 multiclass avx512_vcvt_fp<bits<8> opc, string asm, RegisterClass SrcRC,
3122 RegisterClass DstRC, SDNode OpNode, PatFrag mem_frag,
3123 X86MemOperand x86memop, ValueType OpVT, ValueType InVT,
3125 let hasSideEffects = 0 in {
3126 def rr : AVX512PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
3127 !strconcat(asm," \t{$src, $dst|$dst, $src}"),
3129 (OpVT (OpNode (InVT SrcRC:$src))))], d>, EVEX;
3131 def rm : AVX512PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
3132 !strconcat(asm," \t{$src, $dst|$dst, $src}"),
3134 (OpVT (OpNode (InVT (bitconvert (mem_frag addr:$src))))))], d>, EVEX;
3135 } // hasSideEffects = 0
3138 defm VCVTPD2PSZ : avx512_vcvt_fp_with_rc<0x5A, "vcvtpd2ps", VR512, VR256X, fround,
3139 memopv8f64, f512mem, v8f32, v8f64,
3140 SSEPackedSingle>, EVEX_V512, VEX_W, PD,
3141 EVEX_CD8<64, CD8VF>;
3143 defm VCVTPS2PDZ : avx512_vcvt_fp<0x5A, "vcvtps2pd", VR256X, VR512, fextend,
3144 memopv4f64, f256mem, v8f64, v8f32,
3145 SSEPackedDouble>, EVEX_V512, PS,
3146 EVEX_CD8<32, CD8VH>;
3147 def : Pat<(v8f64 (extloadv8f32 addr:$src)),
3148 (VCVTPS2PDZrm addr:$src)>;
3150 def : Pat<(v8f32 (int_x86_avx512_mask_cvtpd2ps_512 (v8f64 VR512:$src),
3151 (bc_v8f32(v8i32 immAllZerosV)), (i8 -1), (i32 FROUND_CURRENT))),
3152 (VCVTPD2PSZrr VR512:$src)>;
3154 def : Pat<(v8f32 (int_x86_avx512_mask_cvtpd2ps_512 (v8f64 VR512:$src),
3155 (bc_v8f32(v8i32 immAllZerosV)), (i8 -1), imm:$rc)),
3156 (VCVTPD2PSZrrb VR512:$src, imm:$rc)>;
3158 //===----------------------------------------------------------------------===//
3159 // AVX-512 Vector convert from sign integer to float/double
3160 //===----------------------------------------------------------------------===//
3162 defm VCVTDQ2PSZ : avx512_vcvt_fp_with_rc<0x5B, "vcvtdq2ps", VR512, VR512, sint_to_fp,
3163 memopv8i64, i512mem, v16f32, v16i32,
3164 SSEPackedSingle>, EVEX_V512, PS,
3165 EVEX_CD8<32, CD8VF>;
3167 defm VCVTDQ2PDZ : avx512_vcvt_fp<0xE6, "vcvtdq2pd", VR256X, VR512, sint_to_fp,
3168 memopv4i64, i256mem, v8f64, v8i32,
3169 SSEPackedDouble>, EVEX_V512, XS,
3170 EVEX_CD8<32, CD8VH>;
3172 defm VCVTTPS2DQZ : avx512_vcvt_fp<0x5B, "vcvttps2dq", VR512, VR512, fp_to_sint,
3173 memopv16f32, f512mem, v16i32, v16f32,
3174 SSEPackedSingle>, EVEX_V512, XS,
3175 EVEX_CD8<32, CD8VF>;
3177 defm VCVTTPD2DQZ : avx512_vcvt_fp<0xE6, "vcvttpd2dq", VR512, VR256X, fp_to_sint,
3178 memopv8f64, f512mem, v8i32, v8f64,
3179 SSEPackedDouble>, EVEX_V512, PD, VEX_W,
3180 EVEX_CD8<64, CD8VF>;
3182 defm VCVTTPS2UDQZ : avx512_vcvt_fp<0x78, "vcvttps2udq", VR512, VR512, fp_to_uint,
3183 memopv16f32, f512mem, v16i32, v16f32,
3184 SSEPackedSingle>, EVEX_V512, PS,
3185 EVEX_CD8<32, CD8VF>;
3187 // cvttps2udq (src, 0, mask-all-ones, sae-current)
3188 def : Pat<(v16i32 (int_x86_avx512_mask_cvttps2udq_512 (v16f32 VR512:$src),
3189 (v16i32 immAllZerosV), (i16 -1), FROUND_CURRENT)),
3190 (VCVTTPS2UDQZrr VR512:$src)>;
3192 defm VCVTTPD2UDQZ : avx512_vcvt_fp<0x78, "vcvttpd2udq", VR512, VR256X, fp_to_uint,
3193 memopv8f64, f512mem, v8i32, v8f64,
3194 SSEPackedDouble>, EVEX_V512, PS, VEX_W,
3195 EVEX_CD8<64, CD8VF>;
3197 // cvttpd2udq (src, 0, mask-all-ones, sae-current)
3198 def : Pat<(v8i32 (int_x86_avx512_mask_cvttpd2udq_512 (v8f64 VR512:$src),
3199 (v8i32 immAllZerosV), (i8 -1), FROUND_CURRENT)),
3200 (VCVTTPD2UDQZrr VR512:$src)>;
3202 defm VCVTUDQ2PDZ : avx512_vcvt_fp<0x7A, "vcvtudq2pd", VR256X, VR512, uint_to_fp,
3203 memopv4i64, f256mem, v8f64, v8i32,
3204 SSEPackedDouble>, EVEX_V512, XS,
3205 EVEX_CD8<32, CD8VH>;
3207 defm VCVTUDQ2PSZ : avx512_vcvt_fp_with_rc<0x7A, "vcvtudq2ps", VR512, VR512, uint_to_fp,
3208 memopv16i32, f512mem, v16f32, v16i32,
3209 SSEPackedSingle>, EVEX_V512, XD,
3210 EVEX_CD8<32, CD8VF>;
3212 def : Pat<(v8i32 (fp_to_uint (v8f32 VR256X:$src1))),
3213 (EXTRACT_SUBREG (v16i32 (VCVTTPS2UDQZrr
3214 (v16f32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)))), sub_ymm)>;
3216 def : Pat<(v4i32 (fp_to_uint (v4f32 VR128X:$src1))),
3217 (EXTRACT_SUBREG (v16i32 (VCVTTPS2UDQZrr
3218 (v16f32 (SUBREG_TO_REG (i32 0), VR128X:$src1, sub_xmm)))), sub_xmm)>;
3220 def : Pat<(v8f32 (uint_to_fp (v8i32 VR256X:$src1))),
3221 (EXTRACT_SUBREG (v16f32 (VCVTUDQ2PSZrr
3222 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)))), sub_ymm)>;
3224 def : Pat<(v4f32 (uint_to_fp (v4i32 VR128X:$src1))),
3225 (EXTRACT_SUBREG (v16f32 (VCVTUDQ2PSZrr
3226 (v16i32 (SUBREG_TO_REG (i32 0), VR128X:$src1, sub_xmm)))), sub_xmm)>;
3228 def : Pat<(v4f64 (uint_to_fp (v4i32 VR128X:$src1))),
3229 (EXTRACT_SUBREG (v8f64 (VCVTUDQ2PDZrr
3230 (v8i32 (SUBREG_TO_REG (i32 0), VR128X:$src1, sub_xmm)))), sub_ymm)>;
3232 def : Pat<(v16f32 (int_x86_avx512_mask_cvtdq2ps_512 (v16i32 VR512:$src),
3233 (bc_v16f32 (v16i32 immAllZerosV)), (i16 -1), imm:$rc)),
3234 (VCVTDQ2PSZrrb VR512:$src, imm:$rc)>;
3235 def : Pat<(v8f64 (int_x86_avx512_mask_cvtdq2pd_512 (v8i32 VR256X:$src),
3236 (bc_v8f64 (v16i32 immAllZerosV)), (i8 -1))),
3237 (VCVTDQ2PDZrr VR256X:$src)>;
3238 def : Pat<(v16f32 (int_x86_avx512_mask_cvtudq2ps_512 (v16i32 VR512:$src),
3239 (bc_v16f32 (v16i32 immAllZerosV)), (i16 -1), imm:$rc)),
3240 (VCVTUDQ2PSZrrb VR512:$src, imm:$rc)>;
3241 def : Pat<(v8f64 (int_x86_avx512_mask_cvtudq2pd_512 (v8i32 VR256X:$src),
3242 (bc_v8f64 (v16i32 immAllZerosV)), (i8 -1))),
3243 (VCVTUDQ2PDZrr VR256X:$src)>;
3245 multiclass avx512_vcvt_fp2int<bits<8> opc, string asm, RegisterClass SrcRC,
3246 RegisterClass DstRC, PatFrag mem_frag,
3247 X86MemOperand x86memop, Domain d> {
3248 let hasSideEffects = 0 in {
3249 def rr : AVX512PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
3250 !strconcat(asm," \t{$src, $dst|$dst, $src}"),
3252 def rrb : AVX512PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src, AVX512RC:$rc),
3253 !strconcat(asm," \t{$rc, $src, $dst|$dst, $src, $rc}"),
3254 [], d>, EVEX, EVEX_B, EVEX_RC;
3256 def rm : AVX512PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
3257 !strconcat(asm," \t{$src, $dst|$dst, $src}"),
3259 } // hasSideEffects = 0
3262 defm VCVTPS2DQZ : avx512_vcvt_fp2int<0x5B, "vcvtps2dq", VR512, VR512,
3263 memopv16f32, f512mem, SSEPackedSingle>, PD,
3264 EVEX_V512, EVEX_CD8<32, CD8VF>;
3265 defm VCVTPD2DQZ : avx512_vcvt_fp2int<0xE6, "vcvtpd2dq", VR512, VR256X,
3266 memopv8f64, f512mem, SSEPackedDouble>, XD, VEX_W,
3267 EVEX_V512, EVEX_CD8<64, CD8VF>;
3269 def : Pat <(v16i32 (int_x86_avx512_mask_cvtps2dq_512 (v16f32 VR512:$src),
3270 (v16i32 immAllZerosV), (i16 -1), imm:$rc)),
3271 (VCVTPS2DQZrrb VR512:$src, imm:$rc)>;
3273 def : Pat <(v8i32 (int_x86_avx512_mask_cvtpd2dq_512 (v8f64 VR512:$src),
3274 (v8i32 immAllZerosV), (i8 -1), imm:$rc)),
3275 (VCVTPD2DQZrrb VR512:$src, imm:$rc)>;
3277 defm VCVTPS2UDQZ : avx512_vcvt_fp2int<0x79, "vcvtps2udq", VR512, VR512,
3278 memopv16f32, f512mem, SSEPackedSingle>,
3279 PS, EVEX_V512, EVEX_CD8<32, CD8VF>;
3280 defm VCVTPD2UDQZ : avx512_vcvt_fp2int<0x79, "vcvtpd2udq", VR512, VR256X,
3281 memopv8f64, f512mem, SSEPackedDouble>, VEX_W,
3282 PS, EVEX_V512, EVEX_CD8<64, CD8VF>;
3284 def : Pat <(v16i32 (int_x86_avx512_mask_cvtps2udq_512 (v16f32 VR512:$src),
3285 (v16i32 immAllZerosV), (i16 -1), imm:$rc)),
3286 (VCVTPS2UDQZrrb VR512:$src, imm:$rc)>;
3288 def : Pat <(v8i32 (int_x86_avx512_mask_cvtpd2udq_512 (v8f64 VR512:$src),
3289 (v8i32 immAllZerosV), (i8 -1), imm:$rc)),
3290 (VCVTPD2UDQZrrb VR512:$src, imm:$rc)>;
3292 let Predicates = [HasAVX512] in {
3293 def : Pat<(v8f32 (fround (loadv8f64 addr:$src))),
3294 (VCVTPD2PSZrm addr:$src)>;
3295 def : Pat<(v8f64 (extloadv8f32 addr:$src)),
3296 (VCVTPS2PDZrm addr:$src)>;
3299 //===----------------------------------------------------------------------===//
3300 // Half precision conversion instructions
3301 //===----------------------------------------------------------------------===//
3302 multiclass avx512_cvtph2ps<RegisterClass destRC, RegisterClass srcRC,
3303 X86MemOperand x86memop> {
3304 def rr : AVX5128I<0x13, MRMSrcReg, (outs destRC:$dst), (ins srcRC:$src),
3305 "vcvtph2ps\t{$src, $dst|$dst, $src}",
3307 let hasSideEffects = 0, mayLoad = 1 in
3308 def rm : AVX5128I<0x13, MRMSrcMem, (outs destRC:$dst), (ins x86memop:$src),
3309 "vcvtph2ps\t{$src, $dst|$dst, $src}", []>, EVEX;
3312 multiclass avx512_cvtps2ph<RegisterClass destRC, RegisterClass srcRC,
3313 X86MemOperand x86memop> {
3314 def rr : AVX512AIi8<0x1D, MRMDestReg, (outs destRC:$dst),
3315 (ins srcRC:$src1, i32i8imm:$src2),
3316 "vcvtps2ph \t{$src2, $src1, $dst|$dst, $src1, $src2}",
3318 let hasSideEffects = 0, mayStore = 1 in
3319 def mr : AVX512AIi8<0x1D, MRMDestMem, (outs),
3320 (ins x86memop:$dst, srcRC:$src1, i32i8imm:$src2),
3321 "vcvtps2ph \t{$src2, $src1, $dst|$dst, $src1, $src2}", []>, EVEX;
3324 defm VCVTPH2PSZ : avx512_cvtph2ps<VR512, VR256X, f256mem>, EVEX_V512,
3325 EVEX_CD8<32, CD8VH>;
3326 defm VCVTPS2PHZ : avx512_cvtps2ph<VR256X, VR512, f256mem>, EVEX_V512,
3327 EVEX_CD8<32, CD8VH>;
3329 def : Pat<(v16i16 (int_x86_avx512_mask_vcvtps2ph_512 (v16f32 VR512:$src),
3330 imm:$rc, (bc_v16i16(v8i32 immAllZerosV)), (i16 -1))),
3331 (VCVTPS2PHZrr VR512:$src, imm:$rc)>;
3333 def : Pat<(v16f32 (int_x86_avx512_mask_vcvtph2ps_512 (v16i16 VR256X:$src),
3334 (bc_v16f32(v16i32 immAllZerosV)), (i16 -1), (i32 FROUND_CURRENT))),
3335 (VCVTPH2PSZrr VR256X:$src)>;
3337 let Defs = [EFLAGS], Predicates = [HasAVX512] in {
3338 defm VUCOMISSZ : sse12_ord_cmp<0x2E, FR32X, X86cmp, f32, f32mem, loadf32,
3339 "ucomiss">, PS, EVEX, VEX_LIG,
3340 EVEX_CD8<32, CD8VT1>;
3341 defm VUCOMISDZ : sse12_ord_cmp<0x2E, FR64X, X86cmp, f64, f64mem, loadf64,
3342 "ucomisd">, PD, EVEX,
3343 VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
3344 let Pattern = []<dag> in {
3345 defm VCOMISSZ : sse12_ord_cmp<0x2F, VR128X, undef, v4f32, f128mem, load,
3346 "comiss">, PS, EVEX, VEX_LIG,
3347 EVEX_CD8<32, CD8VT1>;
3348 defm VCOMISDZ : sse12_ord_cmp<0x2F, VR128X, undef, v2f64, f128mem, load,
3349 "comisd">, PD, EVEX,
3350 VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
3352 let isCodeGenOnly = 1 in {
3353 defm Int_VUCOMISSZ : sse12_ord_cmp<0x2E, VR128X, X86ucomi, v4f32, f128mem,
3354 load, "ucomiss">, PS, EVEX, VEX_LIG,
3355 EVEX_CD8<32, CD8VT1>;
3356 defm Int_VUCOMISDZ : sse12_ord_cmp<0x2E, VR128X, X86ucomi, v2f64, f128mem,
3357 load, "ucomisd">, PD, EVEX,
3358 VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
3360 defm Int_VCOMISSZ : sse12_ord_cmp<0x2F, VR128X, X86comi, v4f32, f128mem,
3361 load, "comiss">, PS, EVEX, VEX_LIG,
3362 EVEX_CD8<32, CD8VT1>;
3363 defm Int_VCOMISDZ : sse12_ord_cmp<0x2F, VR128X, X86comi, v2f64, f128mem,
3364 load, "comisd">, PD, EVEX,
3365 VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
3369 /// avx512_fp14_s rcp14ss, rcp14sd, rsqrt14ss, rsqrt14sd
3370 multiclass avx512_fp14_s<bits<8> opc, string OpcodeStr, RegisterClass RC,
3371 X86MemOperand x86memop> {
3372 let hasSideEffects = 0 in {
3373 def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
3374 (ins RC:$src1, RC:$src2),
3375 !strconcat(OpcodeStr,
3376 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>, EVEX_4V;
3377 let mayLoad = 1 in {
3378 def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
3379 (ins RC:$src1, x86memop:$src2),
3380 !strconcat(OpcodeStr,
3381 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>, EVEX_4V;
3386 defm VRCP14SS : avx512_fp14_s<0x4D, "vrcp14ss", FR32X, f32mem>,
3387 EVEX_CD8<32, CD8VT1>;
3388 defm VRCP14SD : avx512_fp14_s<0x4D, "vrcp14sd", FR64X, f64mem>,
3389 VEX_W, EVEX_CD8<64, CD8VT1>;
3390 defm VRSQRT14SS : avx512_fp14_s<0x4F, "vrsqrt14ss", FR32X, f32mem>,
3391 EVEX_CD8<32, CD8VT1>;
3392 defm VRSQRT14SD : avx512_fp14_s<0x4F, "vrsqrt14sd", FR64X, f64mem>,
3393 VEX_W, EVEX_CD8<64, CD8VT1>;
3395 def : Pat <(v4f32 (int_x86_avx512_rcp14_ss (v4f32 VR128X:$src1),
3396 (v4f32 VR128X:$src2), (bc_v4f32 (v4i32 immAllZerosV)), (i8 -1))),
3397 (COPY_TO_REGCLASS (VRCP14SSrr (COPY_TO_REGCLASS VR128X:$src1, FR32X),
3398 (COPY_TO_REGCLASS VR128X:$src2, FR32X)), VR128X)>;
3400 def : Pat <(v2f64 (int_x86_avx512_rcp14_sd (v2f64 VR128X:$src1),
3401 (v2f64 VR128X:$src2), (bc_v2f64 (v4i32 immAllZerosV)), (i8 -1))),
3402 (COPY_TO_REGCLASS (VRCP14SDrr (COPY_TO_REGCLASS VR128X:$src1, FR64X),
3403 (COPY_TO_REGCLASS VR128X:$src2, FR64X)), VR128X)>;
3405 def : Pat <(v4f32 (int_x86_avx512_rsqrt14_ss (v4f32 VR128X:$src1),
3406 (v4f32 VR128X:$src2), (bc_v4f32 (v4i32 immAllZerosV)), (i8 -1))),
3407 (COPY_TO_REGCLASS (VRSQRT14SSrr (COPY_TO_REGCLASS VR128X:$src1, FR32X),
3408 (COPY_TO_REGCLASS VR128X:$src2, FR32X)), VR128X)>;
3410 def : Pat <(v2f64 (int_x86_avx512_rsqrt14_sd (v2f64 VR128X:$src1),
3411 (v2f64 VR128X:$src2), (bc_v2f64 (v4i32 immAllZerosV)), (i8 -1))),
3412 (COPY_TO_REGCLASS (VRSQRT14SDrr (COPY_TO_REGCLASS VR128X:$src1, FR64X),
3413 (COPY_TO_REGCLASS VR128X:$src2, FR64X)), VR128X)>;
3415 /// avx512_fp14_p rcp14ps, rcp14pd, rsqrt14ps, rsqrt14pd
3416 multiclass avx512_fp14_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
3417 RegisterClass RC, X86MemOperand x86memop,
3418 PatFrag mem_frag, ValueType OpVt> {
3419 def r : AVX5128I<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
3420 !strconcat(OpcodeStr,
3421 " \t{$src, $dst|$dst, $src}"),
3422 [(set RC:$dst, (OpVt (OpNode RC:$src)))]>,
3424 def m : AVX5128I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
3425 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
3426 [(set RC:$dst, (OpVt (OpNode (mem_frag addr:$src))))]>,
3429 defm VRSQRT14PSZ : avx512_fp14_p<0x4E, "vrsqrt14ps", X86frsqrt, VR512, f512mem,
3430 memopv16f32, v16f32>, EVEX_V512, EVEX_CD8<32, CD8VF>;
3431 defm VRSQRT14PDZ : avx512_fp14_p<0x4E, "vrsqrt14pd", X86frsqrt, VR512, f512mem,
3432 memopv8f64, v8f64>, VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
3433 defm VRCP14PSZ : avx512_fp14_p<0x4C, "vrcp14ps", X86frcp, VR512, f512mem,
3434 memopv16f32, v16f32>, EVEX_V512, EVEX_CD8<32, CD8VF>;
3435 defm VRCP14PDZ : avx512_fp14_p<0x4C, "vrcp14pd", X86frcp, VR512, f512mem,
3436 memopv8f64, v8f64>, VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
3438 def : Pat <(v16f32 (int_x86_avx512_rsqrt14_ps_512 (v16f32 VR512:$src),
3439 (bc_v16f32 (v16i32 immAllZerosV)), (i16 -1))),
3440 (VRSQRT14PSZr VR512:$src)>;
3441 def : Pat <(v8f64 (int_x86_avx512_rsqrt14_pd_512 (v8f64 VR512:$src),
3442 (bc_v8f64 (v16i32 immAllZerosV)), (i8 -1))),
3443 (VRSQRT14PDZr VR512:$src)>;
3445 def : Pat <(v16f32 (int_x86_avx512_rcp14_ps_512 (v16f32 VR512:$src),
3446 (bc_v16f32 (v16i32 immAllZerosV)), (i16 -1))),
3447 (VRCP14PSZr VR512:$src)>;
3448 def : Pat <(v8f64 (int_x86_avx512_rcp14_pd_512 (v8f64 VR512:$src),
3449 (bc_v8f64 (v16i32 immAllZerosV)), (i8 -1))),
3450 (VRCP14PDZr VR512:$src)>;
3452 /// avx512_fp28_s rcp28ss, rcp28sd, rsqrt28ss, rsqrt28sd
3453 multiclass avx512_fp28_s<bits<8> opc, string OpcodeStr, RegisterClass RC,
3454 X86MemOperand x86memop> {
3455 let hasSideEffects = 0, Predicates = [HasERI] in {
3456 def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
3457 (ins RC:$src1, RC:$src2),
3458 !strconcat(OpcodeStr,
3459 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>, EVEX_4V;
3460 def rrb : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
3461 (ins RC:$src1, RC:$src2),
3462 !strconcat(OpcodeStr,
3463 " \t{{sae}, $src2, $src1, $dst|$dst, $src1, $src2, {sae}}"),
3464 []>, EVEX_4V, EVEX_B;
3465 let mayLoad = 1 in {
3466 def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
3467 (ins RC:$src1, x86memop:$src2),
3468 !strconcat(OpcodeStr,
3469 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>, EVEX_4V;
3474 defm VRCP28SS : avx512_fp28_s<0xCB, "vrcp28ss", FR32X, f32mem>,
3475 EVEX_CD8<32, CD8VT1>;
3476 defm VRCP28SD : avx512_fp28_s<0xCB, "vrcp28sd", FR64X, f64mem>,
3477 VEX_W, EVEX_CD8<64, CD8VT1>;
3478 defm VRSQRT28SS : avx512_fp28_s<0xCD, "vrsqrt28ss", FR32X, f32mem>,
3479 EVEX_CD8<32, CD8VT1>;
3480 defm VRSQRT28SD : avx512_fp28_s<0xCD, "vrsqrt28sd", FR64X, f64mem>,
3481 VEX_W, EVEX_CD8<64, CD8VT1>;
3483 def : Pat <(v4f32 (int_x86_avx512_rcp28_ss (v4f32 VR128X:$src1),
3484 (v4f32 VR128X:$src2), (bc_v4f32 (v4i32 immAllZerosV)), (i8 -1),
3486 (COPY_TO_REGCLASS (VRCP28SSrrb (COPY_TO_REGCLASS VR128X:$src1, FR32X),
3487 (COPY_TO_REGCLASS VR128X:$src2, FR32X)), VR128X)>;
3489 def : Pat <(v2f64 (int_x86_avx512_rcp28_sd (v2f64 VR128X:$src1),
3490 (v2f64 VR128X:$src2), (bc_v2f64 (v4i32 immAllZerosV)), (i8 -1),
3492 (COPY_TO_REGCLASS (VRCP28SDrrb (COPY_TO_REGCLASS VR128X:$src1, FR64X),
3493 (COPY_TO_REGCLASS VR128X:$src2, FR64X)), VR128X)>;
3495 def : Pat <(v4f32 (int_x86_avx512_rsqrt28_ss (v4f32 VR128X:$src1),
3496 (v4f32 VR128X:$src2), (bc_v4f32 (v4i32 immAllZerosV)), (i8 -1),
3498 (COPY_TO_REGCLASS (VRSQRT28SSrrb (COPY_TO_REGCLASS VR128X:$src1, FR32X),
3499 (COPY_TO_REGCLASS VR128X:$src2, FR32X)), VR128X)>;
3501 def : Pat <(v2f64 (int_x86_avx512_rsqrt28_sd (v2f64 VR128X:$src1),
3502 (v2f64 VR128X:$src2), (bc_v2f64 (v4i32 immAllZerosV)), (i8 -1),
3504 (COPY_TO_REGCLASS (VRSQRT28SDrrb (COPY_TO_REGCLASS VR128X:$src1, FR64X),
3505 (COPY_TO_REGCLASS VR128X:$src2, FR64X)), VR128X)>;
3507 /// avx512_fp28_p rcp28ps, rcp28pd, rsqrt28ps, rsqrt28pd
3508 multiclass avx512_fp28_p<bits<8> opc, string OpcodeStr,
3509 RegisterClass RC, X86MemOperand x86memop> {
3510 let hasSideEffects = 0, Predicates = [HasERI] in {
3511 def r : AVX5128I<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
3512 !strconcat(OpcodeStr,
3513 " \t{$src, $dst|$dst, $src}"),
3515 def rb : AVX5128I<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
3516 !strconcat(OpcodeStr,
3517 " \t{{sae}, $src, $dst|$dst, $src, {sae}}"),
3519 def m : AVX5128I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
3520 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
3524 defm VRSQRT28PSZ : avx512_fp28_p<0xCC, "vrsqrt28ps", VR512, f512mem>,
3525 EVEX_V512, EVEX_CD8<32, CD8VF>;
3526 defm VRSQRT28PDZ : avx512_fp28_p<0xCC, "vrsqrt28pd", VR512, f512mem>,
3527 VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
3528 defm VRCP28PSZ : avx512_fp28_p<0xCA, "vrcp28ps", VR512, f512mem>,
3529 EVEX_V512, EVEX_CD8<32, CD8VF>;
3530 defm VRCP28PDZ : avx512_fp28_p<0xCA, "vrcp28pd", VR512, f512mem>,
3531 VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
3533 def : Pat <(v16f32 (int_x86_avx512_rsqrt28_ps (v16f32 VR512:$src),
3534 (bc_v16f32 (v16i32 immAllZerosV)), (i16 -1), FROUND_NO_EXC)),
3535 (VRSQRT28PSZrb VR512:$src)>;
3536 def : Pat <(v8f64 (int_x86_avx512_rsqrt28_pd (v8f64 VR512:$src),
3537 (bc_v8f64 (v16i32 immAllZerosV)), (i8 -1), FROUND_NO_EXC)),
3538 (VRSQRT28PDZrb VR512:$src)>;
3540 def : Pat <(v16f32 (int_x86_avx512_rcp28_ps (v16f32 VR512:$src),
3541 (bc_v16f32 (v16i32 immAllZerosV)), (i16 -1), FROUND_NO_EXC)),
3542 (VRCP28PSZrb VR512:$src)>;
3543 def : Pat <(v8f64 (int_x86_avx512_rcp28_pd (v8f64 VR512:$src),
3544 (bc_v8f64 (v16i32 immAllZerosV)), (i8 -1), FROUND_NO_EXC)),
3545 (VRCP28PDZrb VR512:$src)>;
3547 multiclass avx512_sqrt_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
3548 Intrinsic V16F32Int, Intrinsic V8F64Int,
3549 OpndItins itins_s, OpndItins itins_d> {
3550 def PSZrr :AVX512PSI<opc, MRMSrcReg, (outs VR512:$dst), (ins VR512:$src),
3551 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
3552 [(set VR512:$dst, (v16f32 (OpNode VR512:$src)))], itins_s.rr>,
3556 def PSZrm : AVX512PSI<opc, MRMSrcMem, (outs VR512:$dst), (ins f512mem:$src),
3557 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
3559 (OpNode (v16f32 (bitconvert (memopv16f32 addr:$src)))))],
3560 itins_s.rm>, EVEX, EVEX_V512, EVEX_CD8<32, CD8VF>;
3562 def PDZrr : AVX512PDI<opc, MRMSrcReg, (outs VR512:$dst), (ins VR512:$src),
3563 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
3564 [(set VR512:$dst, (v8f64 (OpNode VR512:$src)))], itins_d.rr>,
3568 def PDZrm : AVX512PDI<opc, MRMSrcMem, (outs VR512:$dst), (ins f512mem:$src),
3569 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
3570 [(set VR512:$dst, (OpNode
3571 (v8f64 (bitconvert (memopv16f32 addr:$src)))))],
3572 itins_d.rm>, EVEX, EVEX_V512, EVEX_CD8<64, CD8VF>;
3574 let isCodeGenOnly = 1 in {
3575 def PSZr_Int : AVX512PSI<opc, MRMSrcReg, (outs VR512:$dst), (ins VR512:$src),
3576 !strconcat(OpcodeStr,
3577 "ps\t{$src, $dst|$dst, $src}"),
3578 [(set VR512:$dst, (V16F32Int VR512:$src))]>,
3580 def PSZm_Int : AVX512PSI<opc, MRMSrcMem, (outs VR512:$dst), (ins f512mem:$src),
3581 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
3583 (V16F32Int (memopv16f32 addr:$src)))]>, EVEX,
3584 EVEX_V512, EVEX_CD8<32, CD8VF>;
3585 def PDZr_Int : AVX512PDI<opc, MRMSrcReg, (outs VR512:$dst), (ins VR512:$src),
3586 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
3587 [(set VR512:$dst, (V8F64Int VR512:$src))]>,
3588 EVEX, EVEX_V512, VEX_W;
3589 def PDZm_Int : AVX512PDI<opc, MRMSrcMem, (outs VR512:$dst), (ins f512mem:$src),
3590 !strconcat(OpcodeStr,
3591 "pd\t{$src, $dst|$dst, $src}"),
3592 [(set VR512:$dst, (V8F64Int (memopv8f64 addr:$src)))]>,
3593 EVEX, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
3594 } // isCodeGenOnly = 1
3597 multiclass avx512_sqrt_scalar<bits<8> opc, string OpcodeStr,
3598 Intrinsic F32Int, Intrinsic F64Int,
3599 OpndItins itins_s, OpndItins itins_d> {
3600 def SSZr : SI<opc, MRMSrcReg, (outs FR32X:$dst),
3601 (ins FR32X:$src1, FR32X:$src2),
3602 !strconcat(OpcodeStr,
3603 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3604 [], itins_s.rr>, XS, EVEX_4V;
3605 let isCodeGenOnly = 1 in
3606 def SSZr_Int : SIi8<opc, MRMSrcReg, (outs VR128X:$dst),
3607 (ins VR128X:$src1, VR128X:$src2),
3608 !strconcat(OpcodeStr,
3609 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3611 (F32Int VR128X:$src1, VR128X:$src2))],
3612 itins_s.rr>, XS, EVEX_4V;
3613 let mayLoad = 1 in {
3614 def SSZm : SI<opc, MRMSrcMem, (outs FR32X:$dst),
3615 (ins FR32X:$src1, f32mem:$src2),
3616 !strconcat(OpcodeStr,
3617 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3618 [], itins_s.rm>, XS, EVEX_4V, EVEX_CD8<32, CD8VT1>;
3619 let isCodeGenOnly = 1 in
3620 def SSZm_Int : SIi8<opc, MRMSrcMem, (outs VR128X:$dst),
3621 (ins VR128X:$src1, ssmem:$src2),
3622 !strconcat(OpcodeStr,
3623 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3625 (F32Int VR128X:$src1, sse_load_f32:$src2))],
3626 itins_s.rm>, XS, EVEX_4V, EVEX_CD8<32, CD8VT1>;
3628 def SDZr : SI<opc, MRMSrcReg, (outs FR64X:$dst),
3629 (ins FR64X:$src1, FR64X:$src2),
3630 !strconcat(OpcodeStr,
3631 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>,
3633 let isCodeGenOnly = 1 in
3634 def SDZr_Int : SIi8<opc, MRMSrcReg, (outs VR128X:$dst),
3635 (ins VR128X:$src1, VR128X:$src2),
3636 !strconcat(OpcodeStr,
3637 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3639 (F64Int VR128X:$src1, VR128X:$src2))],
3640 itins_s.rr>, XD, EVEX_4V, VEX_W;
3641 let mayLoad = 1 in {
3642 def SDZm : SI<opc, MRMSrcMem, (outs FR64X:$dst),
3643 (ins FR64X:$src1, f64mem:$src2),
3644 !strconcat(OpcodeStr,
3645 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>,
3646 XD, EVEX_4V, VEX_W, EVEX_CD8<64, CD8VT1>;
3647 let isCodeGenOnly = 1 in
3648 def SDZm_Int : SIi8<opc, MRMSrcMem, (outs VR128X:$dst),
3649 (ins VR128X:$src1, sdmem:$src2),
3650 !strconcat(OpcodeStr,
3651 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3653 (F64Int VR128X:$src1, sse_load_f64:$src2))]>,
3654 XD, EVEX_4V, VEX_W, EVEX_CD8<64, CD8VT1>;
3659 defm VSQRT : avx512_sqrt_scalar<0x51, "sqrt",
3660 int_x86_avx512_sqrt_ss, int_x86_avx512_sqrt_sd,
3661 SSE_SQRTSS, SSE_SQRTSD>,
3662 avx512_sqrt_packed<0x51, "vsqrt", fsqrt,
3663 int_x86_avx512_sqrt_ps_512, int_x86_avx512_sqrt_pd_512,
3664 SSE_SQRTPS, SSE_SQRTPD>;
3666 let Predicates = [HasAVX512] in {
3667 def : Pat<(f32 (fsqrt FR32X:$src)),
3668 (VSQRTSSZr (f32 (IMPLICIT_DEF)), FR32X:$src)>;
3669 def : Pat<(f32 (fsqrt (load addr:$src))),
3670 (VSQRTSSZm (f32 (IMPLICIT_DEF)), addr:$src)>,
3671 Requires<[OptForSize]>;
3672 def : Pat<(f64 (fsqrt FR64X:$src)),
3673 (VSQRTSDZr (f64 (IMPLICIT_DEF)), FR64X:$src)>;
3674 def : Pat<(f64 (fsqrt (load addr:$src))),
3675 (VSQRTSDZm (f64 (IMPLICIT_DEF)), addr:$src)>,
3676 Requires<[OptForSize]>;
3678 def : Pat<(f32 (X86frsqrt FR32X:$src)),
3679 (VRSQRT14SSrr (f32 (IMPLICIT_DEF)), FR32X:$src)>;
3680 def : Pat<(f32 (X86frsqrt (load addr:$src))),
3681 (VRSQRT14SSrm (f32 (IMPLICIT_DEF)), addr:$src)>,
3682 Requires<[OptForSize]>;
3684 def : Pat<(f32 (X86frcp FR32X:$src)),
3685 (VRCP14SSrr (f32 (IMPLICIT_DEF)), FR32X:$src)>;
3686 def : Pat<(f32 (X86frcp (load addr:$src))),
3687 (VRCP14SSrm (f32 (IMPLICIT_DEF)), addr:$src)>,
3688 Requires<[OptForSize]>;
3690 def : Pat<(int_x86_sse_sqrt_ss VR128X:$src),
3691 (COPY_TO_REGCLASS (VSQRTSSZr (f32 (IMPLICIT_DEF)),
3692 (COPY_TO_REGCLASS VR128X:$src, FR32)),
3694 def : Pat<(int_x86_sse_sqrt_ss sse_load_f32:$src),
3695 (VSQRTSSZm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
3697 def : Pat<(int_x86_sse2_sqrt_sd VR128X:$src),
3698 (COPY_TO_REGCLASS (VSQRTSDZr (f64 (IMPLICIT_DEF)),
3699 (COPY_TO_REGCLASS VR128X:$src, FR64)),
3701 def : Pat<(int_x86_sse2_sqrt_sd sse_load_f64:$src),
3702 (VSQRTSDZm_Int (v2f64 (IMPLICIT_DEF)), sse_load_f64:$src)>;
3706 multiclass avx512_fp_unop_rm<bits<8> opcps, bits<8> opcpd, string OpcodeStr,
3707 X86MemOperand x86memop, RegisterClass RC,
3708 PatFrag mem_frag32, PatFrag mem_frag64,
3709 Intrinsic V4F32Int, Intrinsic V2F64Int,
3711 let ExeDomain = SSEPackedSingle in {
3712 // Intrinsic operation, reg.
3713 // Vector intrinsic operation, reg
3714 def PSr : AVX512AIi8<opcps, MRMSrcReg,
3715 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
3716 !strconcat(OpcodeStr,
3717 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3718 [(set RC:$dst, (V4F32Int RC:$src1, imm:$src2))]>;
3720 // Vector intrinsic operation, mem
3721 def PSm : AVX512AIi8<opcps, MRMSrcMem,
3722 (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
3723 !strconcat(OpcodeStr,
3724 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3726 (V4F32Int (mem_frag32 addr:$src1),imm:$src2))]>,
3727 EVEX_CD8<32, VForm>;
3728 } // ExeDomain = SSEPackedSingle
3730 let ExeDomain = SSEPackedDouble in {
3731 // Vector intrinsic operation, reg
3732 def PDr : AVX512AIi8<opcpd, MRMSrcReg,
3733 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
3734 !strconcat(OpcodeStr,
3735 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3736 [(set RC:$dst, (V2F64Int RC:$src1, imm:$src2))]>;
3738 // Vector intrinsic operation, mem
3739 def PDm : AVX512AIi8<opcpd, MRMSrcMem,
3740 (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
3741 !strconcat(OpcodeStr,
3742 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3744 (V2F64Int (mem_frag64 addr:$src1),imm:$src2))]>,
3745 EVEX_CD8<64, VForm>;
3746 } // ExeDomain = SSEPackedDouble
3749 multiclass avx512_fp_binop_rm<bits<8> opcss, bits<8> opcsd,
3753 let ExeDomain = GenericDomain in {
3755 let hasSideEffects = 0 in
3756 def SSr : AVX512AIi8<opcss, MRMSrcReg,
3757 (outs FR32X:$dst), (ins FR32X:$src1, FR32X:$src2, i32i8imm:$src3),
3758 !strconcat(OpcodeStr,
3759 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
3762 // Intrinsic operation, reg.
3763 let isCodeGenOnly = 1 in
3764 def SSr_Int : AVX512AIi8<opcss, MRMSrcReg,
3765 (outs VR128X:$dst), (ins VR128X:$src1, VR128X:$src2, i32i8imm:$src3),
3766 !strconcat(OpcodeStr,
3767 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
3768 [(set VR128X:$dst, (F32Int VR128X:$src1, VR128X:$src2, imm:$src3))]>;
3770 // Intrinsic operation, mem.
3771 def SSm : AVX512AIi8<opcss, MRMSrcMem, (outs VR128X:$dst),
3772 (ins VR128X:$src1, ssmem:$src2, i32i8imm:$src3),
3773 !strconcat(OpcodeStr,
3774 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
3775 [(set VR128X:$dst, (F32Int VR128X:$src1,
3776 sse_load_f32:$src2, imm:$src3))]>,
3777 EVEX_CD8<32, CD8VT1>;
3780 let hasSideEffects = 0 in
3781 def SDr : AVX512AIi8<opcsd, MRMSrcReg,
3782 (outs FR64X:$dst), (ins FR64X:$src1, FR64X:$src2, i32i8imm:$src3),
3783 !strconcat(OpcodeStr,
3784 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
3787 // Intrinsic operation, reg.
3788 let isCodeGenOnly = 1 in
3789 def SDr_Int : AVX512AIi8<opcsd, MRMSrcReg,
3790 (outs VR128X:$dst), (ins VR128X:$src1, VR128X:$src2, i32i8imm:$src3),
3791 !strconcat(OpcodeStr,
3792 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
3793 [(set VR128X:$dst, (F64Int VR128X:$src1, VR128X:$src2, imm:$src3))]>,
3796 // Intrinsic operation, mem.
3797 def SDm : AVX512AIi8<opcsd, MRMSrcMem,
3798 (outs VR128X:$dst), (ins VR128X:$src1, sdmem:$src2, i32i8imm:$src3),
3799 !strconcat(OpcodeStr,
3800 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
3802 (F64Int VR128X:$src1, sse_load_f64:$src2, imm:$src3))]>,
3803 VEX_W, EVEX_CD8<64, CD8VT1>;
3804 } // ExeDomain = GenericDomain
3807 multiclass avx512_rndscale<bits<8> opc, string OpcodeStr,
3808 X86MemOperand x86memop, RegisterClass RC,
3809 PatFrag mem_frag, Domain d> {
3810 let ExeDomain = d in {
3811 // Intrinsic operation, reg.
3812 // Vector intrinsic operation, reg
3813 def r : AVX512AIi8<opc, MRMSrcReg,
3814 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
3815 !strconcat(OpcodeStr,
3816 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3819 // Vector intrinsic operation, mem
3820 def m : AVX512AIi8<opc, MRMSrcMem,
3821 (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
3822 !strconcat(OpcodeStr,
3823 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3829 defm VRNDSCALEPSZ : avx512_rndscale<0x08, "vrndscaleps", f512mem, VR512,
3830 memopv16f32, SSEPackedSingle>, EVEX_V512,
3831 EVEX_CD8<32, CD8VF>;
3833 def : Pat<(v16f32 (int_x86_avx512_mask_rndscale_ps_512 (v16f32 VR512:$src1),
3834 imm:$src2, (v16f32 VR512:$src1), (i16 -1),
3836 (VRNDSCALEPSZr VR512:$src1, imm:$src2)>;
3839 defm VRNDSCALEPDZ : avx512_rndscale<0x09, "vrndscalepd", f512mem, VR512,
3840 memopv8f64, SSEPackedDouble>, EVEX_V512,
3841 VEX_W, EVEX_CD8<64, CD8VF>;
3843 def : Pat<(v8f64 (int_x86_avx512_mask_rndscale_pd_512 (v8f64 VR512:$src1),
3844 imm:$src2, (v8f64 VR512:$src1), (i8 -1),
3846 (VRNDSCALEPDZr VR512:$src1, imm:$src2)>;
3848 multiclass avx512_rndscale_scalar<bits<8> opc, string OpcodeStr,
3849 Operand x86memop, RegisterClass RC, Domain d> {
3850 let ExeDomain = d in {
3851 def r : AVX512AIi8<opc, MRMSrcReg,
3852 (outs RC:$dst), (ins RC:$src1, RC:$src2, i32i8imm:$src3),
3853 !strconcat(OpcodeStr,
3854 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3857 def m : AVX512AIi8<opc, MRMSrcMem,
3858 (outs RC:$dst), (ins RC:$src1, x86memop:$src2, i32i8imm:$src3),
3859 !strconcat(OpcodeStr,
3860 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3865 defm VRNDSCALESS : avx512_rndscale_scalar<0x0A, "vrndscaless", ssmem, FR32X,
3866 SSEPackedSingle>, EVEX_CD8<32, CD8VT1>;
3868 defm VRNDSCALESD : avx512_rndscale_scalar<0x0B, "vrndscalesd", sdmem, FR64X,
3869 SSEPackedDouble>, EVEX_CD8<64, CD8VT1>;
3871 def : Pat<(ffloor FR32X:$src),
3872 (VRNDSCALESSr (f32 (IMPLICIT_DEF)), FR32X:$src, (i32 0x1))>;
3873 def : Pat<(f64 (ffloor FR64X:$src)),
3874 (VRNDSCALESDr (f64 (IMPLICIT_DEF)), FR64X:$src, (i32 0x1))>;
3875 def : Pat<(f32 (fnearbyint FR32X:$src)),
3876 (VRNDSCALESSr (f32 (IMPLICIT_DEF)), FR32X:$src, (i32 0xC))>;
3877 def : Pat<(f64 (fnearbyint FR64X:$src)),
3878 (VRNDSCALESDr (f64 (IMPLICIT_DEF)), FR64X:$src, (i32 0xC))>;
3879 def : Pat<(f32 (fceil FR32X:$src)),
3880 (VRNDSCALESSr (f32 (IMPLICIT_DEF)), FR32X:$src, (i32 0x2))>;
3881 def : Pat<(f64 (fceil FR64X:$src)),
3882 (VRNDSCALESDr (f64 (IMPLICIT_DEF)), FR64X:$src, (i32 0x2))>;
3883 def : Pat<(f32 (frint FR32X:$src)),
3884 (VRNDSCALESSr (f32 (IMPLICIT_DEF)), FR32X:$src, (i32 0x4))>;
3885 def : Pat<(f64 (frint FR64X:$src)),
3886 (VRNDSCALESDr (f64 (IMPLICIT_DEF)), FR64X:$src, (i32 0x4))>;
3887 def : Pat<(f32 (ftrunc FR32X:$src)),
3888 (VRNDSCALESSr (f32 (IMPLICIT_DEF)), FR32X:$src, (i32 0x3))>;
3889 def : Pat<(f64 (ftrunc FR64X:$src)),
3890 (VRNDSCALESDr (f64 (IMPLICIT_DEF)), FR64X:$src, (i32 0x3))>;
3892 def : Pat<(v16f32 (ffloor VR512:$src)),
3893 (VRNDSCALEPSZr VR512:$src, (i32 0x1))>;
3894 def : Pat<(v16f32 (fnearbyint VR512:$src)),
3895 (VRNDSCALEPSZr VR512:$src, (i32 0xC))>;
3896 def : Pat<(v16f32 (fceil VR512:$src)),
3897 (VRNDSCALEPSZr VR512:$src, (i32 0x2))>;
3898 def : Pat<(v16f32 (frint VR512:$src)),
3899 (VRNDSCALEPSZr VR512:$src, (i32 0x4))>;
3900 def : Pat<(v16f32 (ftrunc VR512:$src)),
3901 (VRNDSCALEPSZr VR512:$src, (i32 0x3))>;
3903 def : Pat<(v8f64 (ffloor VR512:$src)),
3904 (VRNDSCALEPDZr VR512:$src, (i32 0x1))>;
3905 def : Pat<(v8f64 (fnearbyint VR512:$src)),
3906 (VRNDSCALEPDZr VR512:$src, (i32 0xC))>;
3907 def : Pat<(v8f64 (fceil VR512:$src)),
3908 (VRNDSCALEPDZr VR512:$src, (i32 0x2))>;
3909 def : Pat<(v8f64 (frint VR512:$src)),
3910 (VRNDSCALEPDZr VR512:$src, (i32 0x4))>;
3911 def : Pat<(v8f64 (ftrunc VR512:$src)),
3912 (VRNDSCALEPDZr VR512:$src, (i32 0x3))>;
3914 //-------------------------------------------------
3915 // Integer truncate and extend operations
3916 //-------------------------------------------------
3918 multiclass avx512_trunc_sat<bits<8> opc, string OpcodeStr,
3919 RegisterClass dstRC, RegisterClass srcRC,
3920 RegisterClass KRC, X86MemOperand x86memop> {
3921 def rr : AVX512XS8I<opc, MRMDestReg, (outs dstRC:$dst),
3923 !strconcat(OpcodeStr," \t{$src, $dst|$dst, $src}"),
3926 def rrk : AVX512XS8I<opc, MRMDestReg, (outs dstRC:$dst),
3927 (ins KRC:$mask, srcRC:$src),
3928 !strconcat(OpcodeStr,
3929 " \t{$src, ${dst} {${mask}}|${dst} {${mask}}, $src}"),
3932 def rrkz : AVX512XS8I<opc, MRMDestReg, (outs dstRC:$dst),
3933 (ins KRC:$mask, srcRC:$src),
3934 !strconcat(OpcodeStr,
3935 " \t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
3938 def mr : AVX512XS8I<opc, MRMDestMem, (outs), (ins x86memop:$dst, srcRC:$src),
3939 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
3942 def mrk : AVX512XS8I<opc, MRMDestMem, (outs),
3943 (ins x86memop:$dst, KRC:$mask, srcRC:$src),
3944 !strconcat(OpcodeStr, " \t{$src, $dst {${mask}}|${dst} {${mask}}, $src}"),
3948 defm VPMOVQB : avx512_trunc_sat<0x32, "vpmovqb", VR128X, VR512, VK8WM,
3949 i128mem>, EVEX_V512, EVEX_CD8<8, CD8VO>;
3950 defm VPMOVSQB : avx512_trunc_sat<0x22, "vpmovsqb", VR128X, VR512, VK8WM,
3951 i128mem>, EVEX_V512, EVEX_CD8<8, CD8VO>;
3952 defm VPMOVUSQB : avx512_trunc_sat<0x12, "vpmovusqb", VR128X, VR512, VK8WM,
3953 i128mem>, EVEX_V512, EVEX_CD8<8, CD8VO>;
3954 defm VPMOVQW : avx512_trunc_sat<0x34, "vpmovqw", VR128X, VR512, VK8WM,
3955 i128mem>, EVEX_V512, EVEX_CD8<16, CD8VQ>;
3956 defm VPMOVSQW : avx512_trunc_sat<0x24, "vpmovsqw", VR128X, VR512, VK8WM,
3957 i128mem>, EVEX_V512, EVEX_CD8<16, CD8VQ>;
3958 defm VPMOVUSQW : avx512_trunc_sat<0x14, "vpmovusqw", VR128X, VR512, VK8WM,
3959 i128mem>, EVEX_V512, EVEX_CD8<16, CD8VQ>;
3960 defm VPMOVQD : avx512_trunc_sat<0x35, "vpmovqd", VR256X, VR512, VK8WM,
3961 i256mem>, EVEX_V512, EVEX_CD8<32, CD8VH>;
3962 defm VPMOVSQD : avx512_trunc_sat<0x25, "vpmovsqd", VR256X, VR512, VK8WM,
3963 i256mem>, EVEX_V512, EVEX_CD8<32, CD8VH>;
3964 defm VPMOVUSQD : avx512_trunc_sat<0x15, "vpmovusqd", VR256X, VR512, VK8WM,
3965 i256mem>, EVEX_V512, EVEX_CD8<32, CD8VH>;
3966 defm VPMOVDW : avx512_trunc_sat<0x33, "vpmovdw", VR256X, VR512, VK16WM,
3967 i256mem>, EVEX_V512, EVEX_CD8<16, CD8VH>;
3968 defm VPMOVSDW : avx512_trunc_sat<0x23, "vpmovsdw", VR256X, VR512, VK16WM,
3969 i256mem>, EVEX_V512, EVEX_CD8<16, CD8VH>;
3970 defm VPMOVUSDW : avx512_trunc_sat<0x13, "vpmovusdw", VR256X, VR512, VK16WM,
3971 i256mem>, EVEX_V512, EVEX_CD8<16, CD8VH>;
3972 defm VPMOVDB : avx512_trunc_sat<0x31, "vpmovdb", VR128X, VR512, VK16WM,
3973 i128mem>, EVEX_V512, EVEX_CD8<8, CD8VQ>;
3974 defm VPMOVSDB : avx512_trunc_sat<0x21, "vpmovsdb", VR128X, VR512, VK16WM,
3975 i128mem>, EVEX_V512, EVEX_CD8<8, CD8VQ>;
3976 defm VPMOVUSDB : avx512_trunc_sat<0x11, "vpmovusdb", VR128X, VR512, VK16WM,
3977 i128mem>, EVEX_V512, EVEX_CD8<8, CD8VQ>;
3979 def : Pat<(v16i8 (X86vtrunc (v8i64 VR512:$src))), (VPMOVQBrr VR512:$src)>;
3980 def : Pat<(v8i16 (X86vtrunc (v8i64 VR512:$src))), (VPMOVQWrr VR512:$src)>;
3981 def : Pat<(v16i16 (X86vtrunc (v16i32 VR512:$src))), (VPMOVDWrr VR512:$src)>;
3982 def : Pat<(v16i8 (X86vtrunc (v16i32 VR512:$src))), (VPMOVDBrr VR512:$src)>;
3983 def : Pat<(v8i32 (X86vtrunc (v8i64 VR512:$src))), (VPMOVQDrr VR512:$src)>;
3985 def : Pat<(v16i8 (X86vtruncm VK16WM:$mask, (v16i32 VR512:$src))),
3986 (VPMOVDBrrkz VK16WM:$mask, VR512:$src)>;
3987 def : Pat<(v16i16 (X86vtruncm VK16WM:$mask, (v16i32 VR512:$src))),
3988 (VPMOVDWrrkz VK16WM:$mask, VR512:$src)>;
3989 def : Pat<(v8i16 (X86vtruncm VK8WM:$mask, (v8i64 VR512:$src))),
3990 (VPMOVQWrrkz VK8WM:$mask, VR512:$src)>;
3991 def : Pat<(v8i32 (X86vtruncm VK8WM:$mask, (v8i64 VR512:$src))),
3992 (VPMOVQDrrkz VK8WM:$mask, VR512:$src)>;
3995 multiclass avx512_extend<bits<8> opc, string OpcodeStr, RegisterClass KRC,
3996 RegisterClass DstRC, RegisterClass SrcRC, SDNode OpNode,
3997 PatFrag mem_frag, X86MemOperand x86memop,
3998 ValueType OpVT, ValueType InVT> {
4000 def rr : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst),
4002 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
4003 [(set DstRC:$dst, (OpVT (OpNode (InVT SrcRC:$src))))]>, EVEX;
4005 def rrk : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst),
4006 (ins KRC:$mask, SrcRC:$src),
4007 !strconcat(OpcodeStr, " \t{$src, $dst {${mask}} |$dst {${mask}}, $src}"),
4010 def rrkz : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst),
4011 (ins KRC:$mask, SrcRC:$src),
4012 !strconcat(OpcodeStr, " \t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}"),
4015 let mayLoad = 1 in {
4016 def rm : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst),
4017 (ins x86memop:$src),
4018 !strconcat(OpcodeStr," \t{$src, $dst|$dst, $src}"),
4020 (OpVT (OpNode (InVT (bitconvert (mem_frag addr:$src))))))]>,
4023 def rmk : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst),
4024 (ins KRC:$mask, x86memop:$src),
4025 !strconcat(OpcodeStr," \t{$src, $dst {${mask}} |$dst {${mask}}, $src}"),
4029 def rmkz : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst),
4030 (ins KRC:$mask, x86memop:$src),
4031 !strconcat(OpcodeStr," \t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}"),
4037 defm VPMOVZXBDZ: avx512_extend<0x31, "vpmovzxbd", VK16WM, VR512, VR128X, X86vzext,
4038 memopv2i64, i128mem, v16i32, v16i8>, EVEX_V512,
4040 defm VPMOVZXBQZ: avx512_extend<0x32, "vpmovzxbq", VK8WM, VR512, VR128X, X86vzext,
4041 memopv2i64, i128mem, v8i64, v16i8>, EVEX_V512,
4043 defm VPMOVZXWDZ: avx512_extend<0x33, "vpmovzxwd", VK16WM, VR512, VR256X, X86vzext,
4044 memopv4i64, i256mem, v16i32, v16i16>, EVEX_V512,
4045 EVEX_CD8<16, CD8VH>;
4046 defm VPMOVZXWQZ: avx512_extend<0x34, "vpmovzxwq", VK8WM, VR512, VR128X, X86vzext,
4047 memopv2i64, i128mem, v8i64, v8i16>, EVEX_V512,
4048 EVEX_CD8<16, CD8VQ>;
4049 defm VPMOVZXDQZ: avx512_extend<0x35, "vpmovzxdq", VK8WM, VR512, VR256X, X86vzext,
4050 memopv4i64, i256mem, v8i64, v8i32>, EVEX_V512,
4051 EVEX_CD8<32, CD8VH>;
4053 defm VPMOVSXBDZ: avx512_extend<0x21, "vpmovsxbd", VK16WM, VR512, VR128X, X86vsext,
4054 memopv2i64, i128mem, v16i32, v16i8>, EVEX_V512,
4056 defm VPMOVSXBQZ: avx512_extend<0x22, "vpmovsxbq", VK8WM, VR512, VR128X, X86vsext,
4057 memopv2i64, i128mem, v8i64, v16i8>, EVEX_V512,
4059 defm VPMOVSXWDZ: avx512_extend<0x23, "vpmovsxwd", VK16WM, VR512, VR256X, X86vsext,
4060 memopv4i64, i256mem, v16i32, v16i16>, EVEX_V512,
4061 EVEX_CD8<16, CD8VH>;
4062 defm VPMOVSXWQZ: avx512_extend<0x24, "vpmovsxwq", VK8WM, VR512, VR128X, X86vsext,
4063 memopv2i64, i128mem, v8i64, v8i16>, EVEX_V512,
4064 EVEX_CD8<16, CD8VQ>;
4065 defm VPMOVSXDQZ: avx512_extend<0x25, "vpmovsxdq", VK8WM, VR512, VR256X, X86vsext,
4066 memopv4i64, i256mem, v8i64, v8i32>, EVEX_V512,
4067 EVEX_CD8<32, CD8VH>;
4069 //===----------------------------------------------------------------------===//
4070 // GATHER - SCATTER Operations
4072 multiclass avx512_gather<bits<8> opc, string OpcodeStr, RegisterClass KRC,
4073 RegisterClass RC, X86MemOperand memop> {
4075 Constraints = "@earlyclobber $dst, $src1 = $dst, $mask = $mask_wb" in
4076 def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst, KRC:$mask_wb),
4077 (ins RC:$src1, KRC:$mask, memop:$src2),
4078 !strconcat(OpcodeStr,
4079 " \t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"),
4083 let ExeDomain = SSEPackedDouble in {
4084 defm VGATHERDPDZ : avx512_gather<0x92, "vgatherdpd", VK8WM, VR512, vy64xmem>,
4085 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
4086 defm VGATHERQPDZ : avx512_gather<0x93, "vgatherqpd", VK8WM, VR512, vz64mem>,
4087 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
4090 let ExeDomain = SSEPackedSingle in {
4091 defm VGATHERDPSZ : avx512_gather<0x92, "vgatherdps", VK16WM, VR512, vz32mem>,
4092 EVEX_V512, EVEX_CD8<32, CD8VT1>;
4093 defm VGATHERQPSZ : avx512_gather<0x93, "vgatherqps", VK8WM, VR256X, vz64mem>,
4094 EVEX_V512, EVEX_CD8<32, CD8VT1>;
4097 defm VPGATHERDQZ : avx512_gather<0x90, "vpgatherdq", VK8WM, VR512, vy64xmem>,
4098 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
4099 defm VPGATHERDDZ : avx512_gather<0x90, "vpgatherdd", VK16WM, VR512, vz32mem>,
4100 EVEX_V512, EVEX_CD8<32, CD8VT1>;
4102 defm VPGATHERQQZ : avx512_gather<0x91, "vpgatherqq", VK8WM, VR512, vz64mem>,
4103 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
4104 defm VPGATHERQDZ : avx512_gather<0x91, "vpgatherqd", VK8WM, VR256X, vz64mem>,
4105 EVEX_V512, EVEX_CD8<32, CD8VT1>;
4107 multiclass avx512_scatter<bits<8> opc, string OpcodeStr, RegisterClass KRC,
4108 RegisterClass RC, X86MemOperand memop> {
4109 let mayStore = 1, Constraints = "$mask = $mask_wb" in
4110 def mr : AVX5128I<opc, MRMDestMem, (outs KRC:$mask_wb),
4111 (ins memop:$dst, KRC:$mask, RC:$src2),
4112 !strconcat(OpcodeStr,
4113 " \t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"),
4117 let ExeDomain = SSEPackedDouble in {
4118 defm VSCATTERDPDZ : avx512_scatter<0xA2, "vscatterdpd", VK8WM, VR512, vy64xmem>,
4119 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
4120 defm VSCATTERQPDZ : avx512_scatter<0xA3, "vscatterqpd", VK8WM, VR512, vz64mem>,
4121 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
4124 let ExeDomain = SSEPackedSingle in {
4125 defm VSCATTERDPSZ : avx512_scatter<0xA2, "vscatterdps", VK16WM, VR512, vz32mem>,
4126 EVEX_V512, EVEX_CD8<32, CD8VT1>;
4127 defm VSCATTERQPSZ : avx512_scatter<0xA3, "vscatterqps", VK8WM, VR256X, vz64mem>,
4128 EVEX_V512, EVEX_CD8<32, CD8VT1>;
4131 defm VPSCATTERDQZ : avx512_scatter<0xA0, "vpscatterdq", VK8WM, VR512, vy64xmem>,
4132 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
4133 defm VPSCATTERDDZ : avx512_scatter<0xA0, "vpscatterdd", VK16WM, VR512, vz32mem>,
4134 EVEX_V512, EVEX_CD8<32, CD8VT1>;
4136 defm VPSCATTERQQZ : avx512_scatter<0xA1, "vpscatterqq", VK8WM, VR512, vz64mem>,
4137 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
4138 defm VPSCATTERQDZ : avx512_scatter<0xA1, "vpscatterqd", VK8WM, VR256X, vz64mem>,
4139 EVEX_V512, EVEX_CD8<32, CD8VT1>;
4142 multiclass avx512_gather_scatter_prefetch<bits<8> opc, Format F, string OpcodeStr,
4143 RegisterClass KRC, X86MemOperand memop> {
4144 let Predicates = [HasPFI], hasSideEffects = 1 in
4145 def m : AVX5128I<opc, F, (outs), (ins KRC:$mask, memop:$src),
4146 !strconcat(OpcodeStr, " \t{$src {${mask}}|{${mask}}, $src}"),
4150 defm VGATHERPF0DPS: avx512_gather_scatter_prefetch<0xC6, MRM1m, "vgatherpf0dps",
4151 VK16WM, vz32mem>, EVEX_V512, EVEX_CD8<32, CD8VT1>;
4153 defm VGATHERPF0QPS: avx512_gather_scatter_prefetch<0xC7, MRM1m, "vgatherpf0qps",
4154 VK8WM, vz64mem>, EVEX_V512, EVEX_CD8<64, CD8VT1>;
4156 defm VGATHERPF0DPD: avx512_gather_scatter_prefetch<0xC6, MRM1m, "vgatherpf0dpd",
4157 VK8WM, vy32mem>, EVEX_V512, VEX_W, EVEX_CD8<32, CD8VT1>;
4159 defm VGATHERPF0QPD: avx512_gather_scatter_prefetch<0xC7, MRM1m, "vgatherpf0qpd",
4160 VK8WM, vz64mem>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
4162 defm VGATHERPF1DPS: avx512_gather_scatter_prefetch<0xC6, MRM2m, "vgatherpf1dps",
4163 VK16WM, vz32mem>, EVEX_V512, EVEX_CD8<32, CD8VT1>;
4165 defm VGATHERPF1QPS: avx512_gather_scatter_prefetch<0xC7, MRM2m, "vgatherpf1qps",
4166 VK8WM, vz64mem>, EVEX_V512, EVEX_CD8<64, CD8VT1>;
4168 defm VGATHERPF1DPD: avx512_gather_scatter_prefetch<0xC6, MRM2m, "vgatherpf1dpd",
4169 VK8WM, vy32mem>, EVEX_V512, VEX_W, EVEX_CD8<32, CD8VT1>;
4171 defm VGATHERPF1QPD: avx512_gather_scatter_prefetch<0xC7, MRM2m, "vgatherpf1qpd",
4172 VK8WM, vz64mem>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
4174 defm VSCATTERPF0DPS: avx512_gather_scatter_prefetch<0xC6, MRM5m, "vscatterpf0dps",
4175 VK16WM, vz32mem>, EVEX_V512, EVEX_CD8<32, CD8VT1>;
4177 defm VSCATTERPF0QPS: avx512_gather_scatter_prefetch<0xC7, MRM5m, "vscatterpf0qps",
4178 VK8WM, vz64mem>, EVEX_V512, EVEX_CD8<64, CD8VT1>;
4180 defm VSCATTERPF0DPD: avx512_gather_scatter_prefetch<0xC6, MRM5m, "vscatterpf0dpd",
4181 VK8WM, vy32mem>, EVEX_V512, VEX_W, EVEX_CD8<32, CD8VT1>;
4183 defm VSCATTERPF0QPD: avx512_gather_scatter_prefetch<0xC7, MRM5m, "vscatterpf0qpd",
4184 VK8WM, vz64mem>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
4186 defm VSCATTERPF1DPS: avx512_gather_scatter_prefetch<0xC6, MRM6m, "vscatterpf1dps",
4187 VK16WM, vz32mem>, EVEX_V512, EVEX_CD8<32, CD8VT1>;
4189 defm VSCATTERPF1QPS: avx512_gather_scatter_prefetch<0xC7, MRM6m, "vscatterpf1qps",
4190 VK8WM, vz64mem>, EVEX_V512, EVEX_CD8<64, CD8VT1>;
4192 defm VSCATTERPF1DPD: avx512_gather_scatter_prefetch<0xC6, MRM6m, "vscatterpf1dpd",
4193 VK8WM, vy32mem>, EVEX_V512, VEX_W, EVEX_CD8<32, CD8VT1>;
4195 defm VSCATTERPF1QPD: avx512_gather_scatter_prefetch<0xC7, MRM6m, "vscatterpf1qpd",
4196 VK8WM, vz64mem>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
4197 //===----------------------------------------------------------------------===//
4198 // VSHUFPS - VSHUFPD Operations
4200 multiclass avx512_shufp<RegisterClass RC, X86MemOperand x86memop,
4201 ValueType vt, string OpcodeStr, PatFrag mem_frag,
4203 def rmi : AVX512PIi8<0xC6, MRMSrcMem, (outs RC:$dst),
4204 (ins RC:$src1, x86memop:$src2, i8imm:$src3),
4205 !strconcat(OpcodeStr,
4206 " \t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4207 [(set RC:$dst, (vt (X86Shufp RC:$src1, (mem_frag addr:$src2),
4208 (i8 imm:$src3))))], d, IIC_SSE_SHUFP>,
4209 EVEX_4V, Sched<[WriteShuffleLd, ReadAfterLd]>;
4210 def rri : AVX512PIi8<0xC6, MRMSrcReg, (outs RC:$dst),
4211 (ins RC:$src1, RC:$src2, i8imm:$src3),
4212 !strconcat(OpcodeStr,
4213 " \t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4214 [(set RC:$dst, (vt (X86Shufp RC:$src1, RC:$src2,
4215 (i8 imm:$src3))))], d, IIC_SSE_SHUFP>,
4216 EVEX_4V, Sched<[WriteShuffle]>;
4219 defm VSHUFPSZ : avx512_shufp<VR512, f512mem, v16f32, "vshufps", memopv16f32,
4220 SSEPackedSingle>, PS, EVEX_V512, EVEX_CD8<32, CD8VF>;
4221 defm VSHUFPDZ : avx512_shufp<VR512, f512mem, v8f64, "vshufpd", memopv8f64,
4222 SSEPackedDouble>, PD, VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
4224 def : Pat<(v16i32 (X86Shufp VR512:$src1, VR512:$src2, (i8 imm:$imm))),
4225 (VSHUFPSZrri VR512:$src1, VR512:$src2, imm:$imm)>;
4226 def : Pat<(v16i32 (X86Shufp VR512:$src1,
4227 (memopv16i32 addr:$src2), (i8 imm:$imm))),
4228 (VSHUFPSZrmi VR512:$src1, addr:$src2, imm:$imm)>;
4230 def : Pat<(v8i64 (X86Shufp VR512:$src1, VR512:$src2, (i8 imm:$imm))),
4231 (VSHUFPDZrri VR512:$src1, VR512:$src2, imm:$imm)>;
4232 def : Pat<(v8i64 (X86Shufp VR512:$src1,
4233 (memopv8i64 addr:$src2), (i8 imm:$imm))),
4234 (VSHUFPDZrmi VR512:$src1, addr:$src2, imm:$imm)>;
4236 multiclass avx512_alignr<string OpcodeStr, RegisterClass RC,
4237 X86MemOperand x86memop> {
4238 def rri : AVX512AIi8<0x03, MRMSrcReg, (outs RC:$dst),
4239 (ins RC:$src1, RC:$src2, i8imm:$src3),
4240 !strconcat(OpcodeStr,
4241 " \t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4244 def rmi : AVX512AIi8<0x03, MRMSrcMem, (outs RC:$dst),
4245 (ins RC:$src1, x86memop:$src2, i8imm:$src3),
4246 !strconcat(OpcodeStr,
4247 " \t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4250 defm VALIGND : avx512_alignr<"valignd", VR512, i512mem>,
4251 EVEX_V512, EVEX_CD8<32, CD8VF>;
4252 defm VALIGNQ : avx512_alignr<"valignq", VR512, i512mem>,
4253 VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
4255 def : Pat<(v16f32 (X86PAlignr VR512:$src1, VR512:$src2, (i8 imm:$imm))),
4256 (VALIGNDrri VR512:$src2, VR512:$src1, imm:$imm)>;
4257 def : Pat<(v8f64 (X86PAlignr VR512:$src1, VR512:$src2, (i8 imm:$imm))),
4258 (VALIGNQrri VR512:$src2, VR512:$src1, imm:$imm)>;
4259 def : Pat<(v16i32 (X86PAlignr VR512:$src1, VR512:$src2, (i8 imm:$imm))),
4260 (VALIGNDrri VR512:$src2, VR512:$src1, imm:$imm)>;
4261 def : Pat<(v8i64 (X86PAlignr VR512:$src1, VR512:$src2, (i8 imm:$imm))),
4262 (VALIGNQrri VR512:$src2, VR512:$src1, imm:$imm)>;
4264 // Helper fragments to match sext vXi1 to vXiY.
4265 def v16i1sextv16i32 : PatLeaf<(v16i32 (X86vsrai VR512:$src, (i8 31)))>;
4266 def v8i1sextv8i64 : PatLeaf<(v8i64 (X86vsrai VR512:$src, (i8 63)))>;
4268 multiclass avx512_vpabs<bits<8> opc, string OpcodeStr, ValueType OpVT,
4269 RegisterClass KRC, RegisterClass RC,
4270 X86MemOperand x86memop, X86MemOperand x86scalar_mop,
4272 def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
4273 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
4275 def rrk : AVX5128I<opc, MRMSrcReg, (outs RC:$dst), (ins KRC:$mask, RC:$src),
4276 !strconcat(OpcodeStr, " \t{$src, $dst {${mask}}|$dst {${mask}}, $src}"),
4278 def rrkz : AVX5128I<opc, MRMSrcReg, (outs RC:$dst), (ins KRC:$mask, RC:$src),
4279 !strconcat(OpcodeStr,
4280 " \t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}"),
4282 let mayLoad = 1 in {
4283 def rm : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst),
4284 (ins x86memop:$src),
4285 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
4287 def rmk : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst),
4288 (ins KRC:$mask, x86memop:$src),
4289 !strconcat(OpcodeStr,
4290 " \t{$src, $dst {${mask}}|$dst {${mask}}, $src}"),
4292 def rmkz : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst),
4293 (ins KRC:$mask, x86memop:$src),
4294 !strconcat(OpcodeStr,
4295 " \t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}"),
4297 def rmb : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst),
4298 (ins x86scalar_mop:$src),
4299 !strconcat(OpcodeStr, " \t{${src}", BrdcstStr,
4300 ", $dst|$dst, ${src}", BrdcstStr, "}"),
4302 def rmbk : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst),
4303 (ins KRC:$mask, x86scalar_mop:$src),
4304 !strconcat(OpcodeStr, " \t{${src}", BrdcstStr,
4305 ", $dst {${mask}}|$dst {${mask}}, ${src}", BrdcstStr, "}"),
4306 []>, EVEX, EVEX_B, EVEX_K;
4307 def rmbkz : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst),
4308 (ins KRC:$mask, x86scalar_mop:$src),
4309 !strconcat(OpcodeStr, " \t{${src}", BrdcstStr,
4310 ", $dst {${mask}} {z}|$dst {${mask}} {z}, ${src}",
4312 []>, EVEX, EVEX_B, EVEX_KZ;
4316 defm VPABSDZ : avx512_vpabs<0x1E, "vpabsd", v16i32, VK16WM, VR512,
4317 i512mem, i32mem, "{1to16}">, EVEX_V512,
4318 EVEX_CD8<32, CD8VF>;
4319 defm VPABSQZ : avx512_vpabs<0x1F, "vpabsq", v8i64, VK8WM, VR512,
4320 i512mem, i64mem, "{1to8}">, EVEX_V512, VEX_W,
4321 EVEX_CD8<64, CD8VF>;
4324 (bc_v16i32 (v16i1sextv16i32)),
4325 (bc_v16i32 (add (v16i32 VR512:$src), (v16i1sextv16i32)))),
4326 (VPABSDZrr VR512:$src)>;
4328 (bc_v8i64 (v8i1sextv8i64)),
4329 (bc_v8i64 (add (v8i64 VR512:$src), (v8i1sextv8i64)))),
4330 (VPABSQZrr VR512:$src)>;
4332 def : Pat<(v16i32 (int_x86_avx512_mask_pabs_d_512 (v16i32 VR512:$src),
4333 (v16i32 immAllZerosV), (i16 -1))),
4334 (VPABSDZrr VR512:$src)>;
4335 def : Pat<(v8i64 (int_x86_avx512_mask_pabs_q_512 (v8i64 VR512:$src),
4336 (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
4337 (VPABSQZrr VR512:$src)>;
4339 multiclass avx512_conflict<bits<8> opc, string OpcodeStr,
4340 RegisterClass RC, RegisterClass KRC,
4341 X86MemOperand x86memop,
4342 X86MemOperand x86scalar_mop, string BrdcstStr> {
4343 def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
4345 !strconcat(OpcodeStr, " \t{$src, ${dst} |${dst}, $src}"),
4347 def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
4348 (ins x86memop:$src),
4349 !strconcat(OpcodeStr, " \t{$src, ${dst}|${dst}, $src}"),
4351 def rmb : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
4352 (ins x86scalar_mop:$src),
4353 !strconcat(OpcodeStr, " \t{${src}", BrdcstStr,
4354 ", ${dst}|${dst}, ${src}", BrdcstStr, "}"),
4356 def rrkz : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
4357 (ins KRC:$mask, RC:$src),
4358 !strconcat(OpcodeStr,
4359 " \t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
4361 def rmkz : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
4362 (ins KRC:$mask, x86memop:$src),
4363 !strconcat(OpcodeStr,
4364 " \t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
4366 def rmbkz : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
4367 (ins KRC:$mask, x86scalar_mop:$src),
4368 !strconcat(OpcodeStr, " \t{${src}", BrdcstStr,
4369 ", ${dst} {${mask}} {z}|${dst} {${mask}} {z}, ${src}",
4371 []>, EVEX, EVEX_KZ, EVEX_B;
4373 let Constraints = "$src1 = $dst" in {
4374 def rrk : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
4375 (ins RC:$src1, KRC:$mask, RC:$src2),
4376 !strconcat(OpcodeStr,
4377 " \t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"),
4379 def rmk : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
4380 (ins RC:$src1, KRC:$mask, x86memop:$src2),
4381 !strconcat(OpcodeStr,
4382 " \t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"),
4384 def rmbk : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
4385 (ins RC:$src1, KRC:$mask, x86scalar_mop:$src2),
4386 !strconcat(OpcodeStr, " \t{${src2}", BrdcstStr,
4387 ", ${dst} {${mask}}|${dst} {${mask}}, ${src2}", BrdcstStr, "}"),
4388 []>, EVEX, EVEX_K, EVEX_B;
4392 let Predicates = [HasCDI] in {
4393 defm VPCONFLICTD : avx512_conflict<0xC4, "vpconflictd", VR512, VK16WM,
4394 i512mem, i32mem, "{1to16}">,
4395 EVEX_V512, EVEX_CD8<32, CD8VF>;
4398 defm VPCONFLICTQ : avx512_conflict<0xC4, "vpconflictq", VR512, VK8WM,
4399 i512mem, i64mem, "{1to8}">,
4400 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
4404 def : Pat<(int_x86_avx512_mask_conflict_d_512 VR512:$src2, VR512:$src1,
4406 (VPCONFLICTDrrk VR512:$src1,
4407 (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)), VR512:$src2)>;
4409 def : Pat<(int_x86_avx512_mask_conflict_q_512 VR512:$src2, VR512:$src1,
4411 (VPCONFLICTQrrk VR512:$src1,
4412 (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)), VR512:$src2)>;
4414 let Predicates = [HasCDI] in {
4415 defm VPLZCNTD : avx512_conflict<0x44, "vplzcntd", VR512, VK16WM,
4416 i512mem, i32mem, "{1to16}">,
4417 EVEX_V512, EVEX_CD8<32, CD8VF>;
4420 defm VPLZCNTQ : avx512_conflict<0x44, "vplzcntq", VR512, VK8WM,
4421 i512mem, i64mem, "{1to8}">,
4422 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
4426 def : Pat<(int_x86_avx512_mask_lzcnt_d_512 VR512:$src2, VR512:$src1,
4428 (VPLZCNTDrrk VR512:$src1,
4429 (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)), VR512:$src2)>;
4431 def : Pat<(int_x86_avx512_mask_lzcnt_q_512 VR512:$src2, VR512:$src1,
4433 (VPLZCNTQrrk VR512:$src1,
4434 (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)), VR512:$src2)>;
4436 def : Pat<(v16i32 (ctlz (memopv16i32 addr:$src))),
4437 (VPLZCNTDrm addr:$src)>;
4438 def : Pat<(v16i32 (ctlz (v16i32 VR512:$src))),
4439 (VPLZCNTDrr VR512:$src)>;
4440 def : Pat<(v8i64 (ctlz (memopv8i64 addr:$src))),
4441 (VPLZCNTQrm addr:$src)>;
4442 def : Pat<(v8i64 (ctlz (v8i64 VR512:$src))),
4443 (VPLZCNTQrr VR512:$src)>;
4445 def : Pat<(store (i1 -1), addr:$dst), (MOV8mi addr:$dst, (i8 1))>;
4446 def : Pat<(store (i1 1), addr:$dst), (MOV8mi addr:$dst, (i8 1))>;
4447 def : Pat<(store (i1 0), addr:$dst), (MOV8mi addr:$dst, (i8 0))>;
4449 def : Pat<(store VK1:$src, addr:$dst),
4450 (KMOVWmk addr:$dst, (COPY_TO_REGCLASS VK1:$src, VK16))>;
4452 def truncstorei1 : PatFrag<(ops node:$val, node:$ptr),
4453 (truncstore node:$val, node:$ptr), [{
4454 return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i1;
4457 def : Pat<(truncstorei1 GR8:$src, addr:$dst),
4458 (MOV8mr addr:$dst, GR8:$src)>;