1 // Bitcasts between 512-bit vector types. Return the original type since
2 // no instruction is needed for the conversion
3 let Predicates = [HasAVX512] in {
4 def : Pat<(v8f64 (bitconvert (v16f32 VR512:$src))), (v8f64 VR512:$src)>;
5 def : Pat<(v8f64 (bitconvert (v16i32 VR512:$src))), (v8f64 VR512:$src)>;
6 def : Pat<(v8f64 (bitconvert (v8i64 VR512:$src))), (v8f64 VR512:$src)>;
7 def : Pat<(v16f32 (bitconvert (v16i32 VR512:$src))), (v16f32 VR512:$src)>;
8 def : Pat<(v16f32 (bitconvert (v8i64 VR512:$src))), (v16f32 VR512:$src)>;
9 def : Pat<(v16f32 (bitconvert (v8f64 VR512:$src))), (v16f32 VR512:$src)>;
10 def : Pat<(v8i64 (bitconvert (v16f32 VR512:$src))), (v8i64 VR512:$src)>;
11 def : Pat<(v8i64 (bitconvert (v16i32 VR512:$src))), (v8i64 VR512:$src)>;
12 def : Pat<(v8i64 (bitconvert (v8f64 VR512:$src))), (v8i64 VR512:$src)>;
13 def : Pat<(v16i32 (bitconvert (v16f32 VR512:$src))), (v16i32 VR512:$src)>;
14 def : Pat<(v16i32 (bitconvert (v8i64 VR512:$src))), (v16i32 VR512:$src)>;
15 def : Pat<(v16i32 (bitconvert (v8f64 VR512:$src))), (v16i32 VR512:$src)>;
16 def : Pat<(v8f64 (bitconvert (v8i64 VR512:$src))), (v8f64 VR512:$src)>;
18 def : Pat<(v2i64 (bitconvert (v4i32 VR128X:$src))), (v2i64 VR128X:$src)>;
19 def : Pat<(v2i64 (bitconvert (v8i16 VR128X:$src))), (v2i64 VR128X:$src)>;
20 def : Pat<(v2i64 (bitconvert (v16i8 VR128X:$src))), (v2i64 VR128X:$src)>;
21 def : Pat<(v2i64 (bitconvert (v2f64 VR128X:$src))), (v2i64 VR128X:$src)>;
22 def : Pat<(v2i64 (bitconvert (v4f32 VR128X:$src))), (v2i64 VR128X:$src)>;
23 def : Pat<(v4i32 (bitconvert (v2i64 VR128X:$src))), (v4i32 VR128X:$src)>;
24 def : Pat<(v4i32 (bitconvert (v8i16 VR128X:$src))), (v4i32 VR128X:$src)>;
25 def : Pat<(v4i32 (bitconvert (v16i8 VR128X:$src))), (v4i32 VR128X:$src)>;
26 def : Pat<(v4i32 (bitconvert (v2f64 VR128X:$src))), (v4i32 VR128X:$src)>;
27 def : Pat<(v4i32 (bitconvert (v4f32 VR128X:$src))), (v4i32 VR128X:$src)>;
28 def : Pat<(v8i16 (bitconvert (v2i64 VR128X:$src))), (v8i16 VR128X:$src)>;
29 def : Pat<(v8i16 (bitconvert (v4i32 VR128X:$src))), (v8i16 VR128X:$src)>;
30 def : Pat<(v8i16 (bitconvert (v16i8 VR128X:$src))), (v8i16 VR128X:$src)>;
31 def : Pat<(v8i16 (bitconvert (v2f64 VR128X:$src))), (v8i16 VR128X:$src)>;
32 def : Pat<(v8i16 (bitconvert (v4f32 VR128X:$src))), (v8i16 VR128X:$src)>;
33 def : Pat<(v16i8 (bitconvert (v2i64 VR128X:$src))), (v16i8 VR128X:$src)>;
34 def : Pat<(v16i8 (bitconvert (v4i32 VR128X:$src))), (v16i8 VR128X:$src)>;
35 def : Pat<(v16i8 (bitconvert (v8i16 VR128X:$src))), (v16i8 VR128X:$src)>;
36 def : Pat<(v16i8 (bitconvert (v2f64 VR128X:$src))), (v16i8 VR128X:$src)>;
37 def : Pat<(v16i8 (bitconvert (v4f32 VR128X:$src))), (v16i8 VR128X:$src)>;
38 def : Pat<(v4f32 (bitconvert (v2i64 VR128X:$src))), (v4f32 VR128X:$src)>;
39 def : Pat<(v4f32 (bitconvert (v4i32 VR128X:$src))), (v4f32 VR128X:$src)>;
40 def : Pat<(v4f32 (bitconvert (v8i16 VR128X:$src))), (v4f32 VR128X:$src)>;
41 def : Pat<(v4f32 (bitconvert (v16i8 VR128X:$src))), (v4f32 VR128X:$src)>;
42 def : Pat<(v4f32 (bitconvert (v2f64 VR128X:$src))), (v4f32 VR128X:$src)>;
43 def : Pat<(v2f64 (bitconvert (v2i64 VR128X:$src))), (v2f64 VR128X:$src)>;
44 def : Pat<(v2f64 (bitconvert (v4i32 VR128X:$src))), (v2f64 VR128X:$src)>;
45 def : Pat<(v2f64 (bitconvert (v8i16 VR128X:$src))), (v2f64 VR128X:$src)>;
46 def : Pat<(v2f64 (bitconvert (v16i8 VR128X:$src))), (v2f64 VR128X:$src)>;
47 def : Pat<(v2f64 (bitconvert (v4f32 VR128X:$src))), (v2f64 VR128X:$src)>;
49 // Bitcasts between 256-bit vector types. Return the original type since
50 // no instruction is needed for the conversion
51 def : Pat<(v4f64 (bitconvert (v8f32 VR256X:$src))), (v4f64 VR256X:$src)>;
52 def : Pat<(v4f64 (bitconvert (v8i32 VR256X:$src))), (v4f64 VR256X:$src)>;
53 def : Pat<(v4f64 (bitconvert (v4i64 VR256X:$src))), (v4f64 VR256X:$src)>;
54 def : Pat<(v4f64 (bitconvert (v16i16 VR256X:$src))), (v4f64 VR256X:$src)>;
55 def : Pat<(v4f64 (bitconvert (v32i8 VR256X:$src))), (v4f64 VR256X:$src)>;
56 def : Pat<(v8f32 (bitconvert (v8i32 VR256X:$src))), (v8f32 VR256X:$src)>;
57 def : Pat<(v8f32 (bitconvert (v4i64 VR256X:$src))), (v8f32 VR256X:$src)>;
58 def : Pat<(v8f32 (bitconvert (v4f64 VR256X:$src))), (v8f32 VR256X:$src)>;
59 def : Pat<(v8f32 (bitconvert (v32i8 VR256X:$src))), (v8f32 VR256X:$src)>;
60 def : Pat<(v8f32 (bitconvert (v16i16 VR256X:$src))), (v8f32 VR256X:$src)>;
61 def : Pat<(v4i64 (bitconvert (v8f32 VR256X:$src))), (v4i64 VR256X:$src)>;
62 def : Pat<(v4i64 (bitconvert (v8i32 VR256X:$src))), (v4i64 VR256X:$src)>;
63 def : Pat<(v4i64 (bitconvert (v4f64 VR256X:$src))), (v4i64 VR256X:$src)>;
64 def : Pat<(v4i64 (bitconvert (v32i8 VR256X:$src))), (v4i64 VR256X:$src)>;
65 def : Pat<(v4i64 (bitconvert (v16i16 VR256X:$src))), (v4i64 VR256X:$src)>;
66 def : Pat<(v32i8 (bitconvert (v4f64 VR256X:$src))), (v32i8 VR256X:$src)>;
67 def : Pat<(v32i8 (bitconvert (v4i64 VR256X:$src))), (v32i8 VR256X:$src)>;
68 def : Pat<(v32i8 (bitconvert (v8f32 VR256X:$src))), (v32i8 VR256X:$src)>;
69 def : Pat<(v32i8 (bitconvert (v8i32 VR256X:$src))), (v32i8 VR256X:$src)>;
70 def : Pat<(v32i8 (bitconvert (v16i16 VR256X:$src))), (v32i8 VR256X:$src)>;
71 def : Pat<(v8i32 (bitconvert (v32i8 VR256X:$src))), (v8i32 VR256X:$src)>;
72 def : Pat<(v8i32 (bitconvert (v16i16 VR256X:$src))), (v8i32 VR256X:$src)>;
73 def : Pat<(v8i32 (bitconvert (v8f32 VR256X:$src))), (v8i32 VR256X:$src)>;
74 def : Pat<(v8i32 (bitconvert (v4i64 VR256X:$src))), (v8i32 VR256X:$src)>;
75 def : Pat<(v8i32 (bitconvert (v4f64 VR256X:$src))), (v8i32 VR256X:$src)>;
76 def : Pat<(v16i16 (bitconvert (v8f32 VR256X:$src))), (v16i16 VR256X:$src)>;
77 def : Pat<(v16i16 (bitconvert (v8i32 VR256X:$src))), (v16i16 VR256X:$src)>;
78 def : Pat<(v16i16 (bitconvert (v4i64 VR256X:$src))), (v16i16 VR256X:$src)>;
79 def : Pat<(v16i16 (bitconvert (v4f64 VR256X:$src))), (v16i16 VR256X:$src)>;
80 def : Pat<(v16i16 (bitconvert (v32i8 VR256X:$src))), (v16i16 VR256X:$src)>;
84 // AVX-512: VPXOR instruction writes zero to its upper part, it's safe build zeros.
87 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
88 isPseudo = 1, Predicates = [HasAVX512] in {
89 def AVX512_512_SET0 : I<0, Pseudo, (outs VR512:$dst), (ins), "",
90 [(set VR512:$dst, (v16f32 immAllZerosV))]>;
93 let Predicates = [HasAVX512] in {
94 def : Pat<(v8i64 immAllZerosV), (AVX512_512_SET0)>;
95 def : Pat<(v16i32 immAllZerosV), (AVX512_512_SET0)>;
96 def : Pat<(v8f64 immAllZerosV), (AVX512_512_SET0)>;
99 //===----------------------------------------------------------------------===//
100 // AVX-512 - VECTOR INSERT
103 let hasSideEffects = 0, ExeDomain = SSEPackedSingle in {
104 def VINSERTF32x4rr : AVX512AIi8<0x18, MRMSrcReg, (outs VR512:$dst),
105 (ins VR512:$src1, VR128X:$src2, i8imm:$src3),
106 "vinsertf32x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
107 []>, EVEX_4V, EVEX_V512;
109 def VINSERTF32x4rm : AVX512AIi8<0x18, MRMSrcMem, (outs VR512:$dst),
110 (ins VR512:$src1, f128mem:$src2, i8imm:$src3),
111 "vinsertf32x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
112 []>, EVEX_4V, EVEX_V512, EVEX_CD8<32, CD8VT4>;
115 // -- 64x4 fp form --
116 let hasSideEffects = 0, ExeDomain = SSEPackedDouble in {
117 def VINSERTF64x4rr : AVX512AIi8<0x1a, MRMSrcReg, (outs VR512:$dst),
118 (ins VR512:$src1, VR256X:$src2, i8imm:$src3),
119 "vinsertf64x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
120 []>, EVEX_4V, EVEX_V512, VEX_W;
122 def VINSERTF64x4rm : AVX512AIi8<0x1a, MRMSrcMem, (outs VR512:$dst),
123 (ins VR512:$src1, i256mem:$src2, i8imm:$src3),
124 "vinsertf64x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
125 []>, EVEX_4V, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT4>;
127 // -- 32x4 integer form --
128 let hasSideEffects = 0 in {
129 def VINSERTI32x4rr : AVX512AIi8<0x38, MRMSrcReg, (outs VR512:$dst),
130 (ins VR512:$src1, VR128X:$src2, i8imm:$src3),
131 "vinserti32x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
132 []>, EVEX_4V, EVEX_V512;
134 def VINSERTI32x4rm : AVX512AIi8<0x38, MRMSrcMem, (outs VR512:$dst),
135 (ins VR512:$src1, i128mem:$src2, i8imm:$src3),
136 "vinserti32x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
137 []>, EVEX_4V, EVEX_V512, EVEX_CD8<32, CD8VT4>;
141 let hasSideEffects = 0 in {
143 def VINSERTI64x4rr : AVX512AIi8<0x3a, MRMSrcReg, (outs VR512:$dst),
144 (ins VR512:$src1, VR256X:$src2, i8imm:$src3),
145 "vinserti64x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
146 []>, EVEX_4V, EVEX_V512, VEX_W;
148 def VINSERTI64x4rm : AVX512AIi8<0x3a, MRMSrcMem, (outs VR512:$dst),
149 (ins VR512:$src1, i256mem:$src2, i8imm:$src3),
150 "vinserti64x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
151 []>, EVEX_4V, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT4>;
154 def : Pat<(vinsert128_insert:$ins (v16f32 VR512:$src1), (v4f32 VR128X:$src2),
155 (iPTR imm)), (VINSERTF32x4rr VR512:$src1, VR128X:$src2,
156 (INSERT_get_vinsert128_imm VR512:$ins))>;
157 def : Pat<(vinsert128_insert:$ins (v8f64 VR512:$src1), (v2f64 VR128X:$src2),
158 (iPTR imm)), (VINSERTF32x4rr VR512:$src1, VR128X:$src2,
159 (INSERT_get_vinsert128_imm VR512:$ins))>;
160 def : Pat<(vinsert128_insert:$ins (v8i64 VR512:$src1), (v2i64 VR128X:$src2),
161 (iPTR imm)), (VINSERTI32x4rr VR512:$src1, VR128X:$src2,
162 (INSERT_get_vinsert128_imm VR512:$ins))>;
163 def : Pat<(vinsert128_insert:$ins (v16i32 VR512:$src1), (v4i32 VR128X:$src2),
164 (iPTR imm)), (VINSERTI32x4rr VR512:$src1, VR128X:$src2,
165 (INSERT_get_vinsert128_imm VR512:$ins))>;
167 def : Pat<(vinsert128_insert:$ins (v16f32 VR512:$src1), (loadv4f32 addr:$src2),
168 (iPTR imm)), (VINSERTF32x4rm VR512:$src1, addr:$src2,
169 (INSERT_get_vinsert128_imm VR512:$ins))>;
170 def : Pat<(vinsert128_insert:$ins (v16i32 VR512:$src1),
171 (bc_v4i32 (loadv2i64 addr:$src2)),
172 (iPTR imm)), (VINSERTI32x4rm VR512:$src1, addr:$src2,
173 (INSERT_get_vinsert128_imm VR512:$ins))>;
174 def : Pat<(vinsert128_insert:$ins (v8f64 VR512:$src1), (loadv2f64 addr:$src2),
175 (iPTR imm)), (VINSERTF32x4rm VR512:$src1, addr:$src2,
176 (INSERT_get_vinsert128_imm VR512:$ins))>;
177 def : Pat<(vinsert128_insert:$ins (v8i64 VR512:$src1), (loadv2i64 addr:$src2),
178 (iPTR imm)), (VINSERTI32x4rm VR512:$src1, addr:$src2,
179 (INSERT_get_vinsert128_imm VR512:$ins))>;
181 def : Pat<(vinsert256_insert:$ins (v16f32 VR512:$src1), (v8f32 VR256X:$src2),
182 (iPTR imm)), (VINSERTF64x4rr VR512:$src1, VR256X:$src2,
183 (INSERT_get_vinsert256_imm VR512:$ins))>;
184 def : Pat<(vinsert256_insert:$ins (v8f64 VR512:$src1), (v4f64 VR256X:$src2),
185 (iPTR imm)), (VINSERTF64x4rr VR512:$src1, VR256X:$src2,
186 (INSERT_get_vinsert256_imm VR512:$ins))>;
187 def : Pat<(vinsert128_insert:$ins (v8i64 VR512:$src1), (v4i64 VR256X:$src2),
188 (iPTR imm)), (VINSERTI64x4rr VR512:$src1, VR256X:$src2,
189 (INSERT_get_vinsert256_imm VR512:$ins))>;
190 def : Pat<(vinsert128_insert:$ins (v16i32 VR512:$src1), (v8i32 VR256X:$src2),
191 (iPTR imm)), (VINSERTI64x4rr VR512:$src1, VR256X:$src2,
192 (INSERT_get_vinsert256_imm VR512:$ins))>;
194 def : Pat<(vinsert256_insert:$ins (v16f32 VR512:$src1), (loadv8f32 addr:$src2),
195 (iPTR imm)), (VINSERTF64x4rm VR512:$src1, addr:$src2,
196 (INSERT_get_vinsert256_imm VR512:$ins))>;
197 def : Pat<(vinsert256_insert:$ins (v8f64 VR512:$src1), (loadv4f64 addr:$src2),
198 (iPTR imm)), (VINSERTF64x4rm VR512:$src1, addr:$src2,
199 (INSERT_get_vinsert256_imm VR512:$ins))>;
200 def : Pat<(vinsert256_insert:$ins (v8i64 VR512:$src1), (loadv4i64 addr:$src2),
201 (iPTR imm)), (VINSERTI64x4rm VR512:$src1, addr:$src2,
202 (INSERT_get_vinsert256_imm VR512:$ins))>;
203 def : Pat<(vinsert256_insert:$ins (v16i32 VR512:$src1),
204 (bc_v8i32 (loadv4i64 addr:$src2)),
205 (iPTR imm)), (VINSERTI64x4rm VR512:$src1, addr:$src2,
206 (INSERT_get_vinsert256_imm VR512:$ins))>;
208 // vinsertps - insert f32 to XMM
209 def VINSERTPSzrr : AVX512AIi8<0x21, MRMSrcReg, (outs VR128X:$dst),
210 (ins VR128X:$src1, VR128X:$src2, u32u8imm:$src3),
211 "vinsertps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
212 [(set VR128X:$dst, (X86insertps VR128X:$src1, VR128X:$src2, imm:$src3))]>,
214 def VINSERTPSzrm: AVX512AIi8<0x21, MRMSrcMem, (outs VR128X:$dst),
215 (ins VR128X:$src1, f32mem:$src2, u32u8imm:$src3),
216 "vinsertps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
217 [(set VR128X:$dst, (X86insertps VR128X:$src1,
218 (v4f32 (scalar_to_vector (loadf32 addr:$src2))),
219 imm:$src3))]>, EVEX_4V, EVEX_CD8<32, CD8VT1>;
221 //===----------------------------------------------------------------------===//
222 // AVX-512 VECTOR EXTRACT
224 let hasSideEffects = 0, ExeDomain = SSEPackedSingle in {
226 def VEXTRACTF32x4rr : AVX512AIi8<0x19, MRMDestReg, (outs VR128X:$dst),
227 (ins VR512:$src1, i8imm:$src2),
228 "vextractf32x4\t{$src2, $src1, $dst|$dst, $src1, $src2}",
229 []>, EVEX, EVEX_V512;
230 def VEXTRACTF32x4mr : AVX512AIi8<0x19, MRMDestMem, (outs),
231 (ins f128mem:$dst, VR512:$src1, i8imm:$src2),
232 "vextractf32x4\t{$src2, $src1, $dst|$dst, $src1, $src2}",
233 []>, EVEX, EVEX_V512, EVEX_CD8<32, CD8VT4>;
236 def VEXTRACTF64x4rr : AVX512AIi8<0x1b, MRMDestReg, (outs VR256X:$dst),
237 (ins VR512:$src1, i8imm:$src2),
238 "vextractf64x4\t{$src2, $src1, $dst|$dst, $src1, $src2}",
239 []>, EVEX, EVEX_V512, VEX_W;
241 def VEXTRACTF64x4mr : AVX512AIi8<0x1b, MRMDestMem, (outs),
242 (ins f256mem:$dst, VR512:$src1, i8imm:$src2),
243 "vextractf64x4\t{$src2, $src1, $dst|$dst, $src1, $src2}",
244 []>, EVEX, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT4>;
247 let hasSideEffects = 0 in {
249 def VEXTRACTI32x4rr : AVX512AIi8<0x39, MRMDestReg, (outs VR128X:$dst),
250 (ins VR512:$src1, i8imm:$src2),
251 "vextracti32x4\t{$src2, $src1, $dst|$dst, $src1, $src2}",
252 []>, EVEX, EVEX_V512;
253 def VEXTRACTI32x4mr : AVX512AIi8<0x39, MRMDestMem, (outs),
254 (ins i128mem:$dst, VR512:$src1, i8imm:$src2),
255 "vextracti32x4\t{$src2, $src1, $dst|$dst, $src1, $src2}",
256 []>, EVEX, EVEX_V512, EVEX_CD8<32, CD8VT4>;
259 def VEXTRACTI64x4rr : AVX512AIi8<0x3b, MRMDestReg, (outs VR256X:$dst),
260 (ins VR512:$src1, i8imm:$src2),
261 "vextracti64x4\t{$src2, $src1, $dst|$dst, $src1, $src2}",
262 []>, EVEX, EVEX_V512, VEX_W;
264 def VEXTRACTI64x4mr : AVX512AIi8<0x3b, MRMDestMem, (outs),
265 (ins i256mem:$dst, VR512:$src1, i8imm:$src2),
266 "vextracti64x4\t{$src2, $src1, $dst|$dst, $src1, $src2}",
267 []>, EVEX, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT4>;
270 def : Pat<(vextract128_extract:$ext (v16f32 VR512:$src1), (iPTR imm)),
271 (v4f32 (VEXTRACTF32x4rr VR512:$src1,
272 (EXTRACT_get_vextract128_imm VR128X:$ext)))>;
274 def : Pat<(vextract128_extract:$ext VR512:$src1, (iPTR imm)),
275 (v4i32 (VEXTRACTF32x4rr VR512:$src1,
276 (EXTRACT_get_vextract128_imm VR128X:$ext)))>;
278 def : Pat<(vextract128_extract:$ext (v8f64 VR512:$src1), (iPTR imm)),
279 (v2f64 (VEXTRACTF32x4rr VR512:$src1,
280 (EXTRACT_get_vextract128_imm VR128X:$ext)))>;
282 def : Pat<(vextract128_extract:$ext (v8i64 VR512:$src1), (iPTR imm)),
283 (v2i64 (VEXTRACTI32x4rr VR512:$src1,
284 (EXTRACT_get_vextract128_imm VR128X:$ext)))>;
287 def : Pat<(vextract256_extract:$ext (v16f32 VR512:$src1), (iPTR imm)),
288 (v8f32 (VEXTRACTF64x4rr VR512:$src1,
289 (EXTRACT_get_vextract256_imm VR256X:$ext)))>;
291 def : Pat<(vextract256_extract:$ext (v16i32 VR512:$src1), (iPTR imm)),
292 (v8i32 (VEXTRACTI64x4rr VR512:$src1,
293 (EXTRACT_get_vextract256_imm VR256X:$ext)))>;
295 def : Pat<(vextract256_extract:$ext (v8f64 VR512:$src1), (iPTR imm)),
296 (v4f64 (VEXTRACTF64x4rr VR512:$src1,
297 (EXTRACT_get_vextract256_imm VR256X:$ext)))>;
299 def : Pat<(vextract256_extract:$ext (v8i64 VR512:$src1), (iPTR imm)),
300 (v4i64 (VEXTRACTI64x4rr VR512:$src1,
301 (EXTRACT_get_vextract256_imm VR256X:$ext)))>;
303 // A 256-bit subvector extract from the first 512-bit vector position
304 // is a subregister copy that needs no instruction.
305 def : Pat<(v8i32 (extract_subvector (v16i32 VR512:$src), (iPTR 0))),
306 (v8i32 (EXTRACT_SUBREG (v16i32 VR512:$src), sub_ymm))>;
307 def : Pat<(v8f32 (extract_subvector (v16f32 VR512:$src), (iPTR 0))),
308 (v8f32 (EXTRACT_SUBREG (v16f32 VR512:$src), sub_ymm))>;
309 def : Pat<(v4i64 (extract_subvector (v8i64 VR512:$src), (iPTR 0))),
310 (v4i64 (EXTRACT_SUBREG (v8i64 VR512:$src), sub_ymm))>;
311 def : Pat<(v4f64 (extract_subvector (v8f64 VR512:$src), (iPTR 0))),
312 (v4f64 (EXTRACT_SUBREG (v8f64 VR512:$src), sub_ymm))>;
315 def : Pat<(v4i32 (extract_subvector (v16i32 VR512:$src), (iPTR 0))),
316 (v4i32 (EXTRACT_SUBREG (v16i32 VR512:$src), sub_xmm))>;
317 def : Pat<(v2i64 (extract_subvector (v8i64 VR512:$src), (iPTR 0))),
318 (v2i64 (EXTRACT_SUBREG (v8i64 VR512:$src), sub_xmm))>;
319 def : Pat<(v2f64 (extract_subvector (v8f64 VR512:$src), (iPTR 0))),
320 (v2f64 (EXTRACT_SUBREG (v8f64 VR512:$src), sub_xmm))>;
321 def : Pat<(v4f32 (extract_subvector (v16f32 VR512:$src), (iPTR 0))),
322 (v4f32 (EXTRACT_SUBREG (v16f32 VR512:$src), sub_xmm))>;
325 // A 128-bit subvector insert to the first 512-bit vector position
326 // is a subregister copy that needs no instruction.
327 def : Pat<(insert_subvector undef, (v2i64 VR128X:$src), (iPTR 0)),
328 (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)),
329 (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
331 def : Pat<(insert_subvector undef, (v2f64 VR128X:$src), (iPTR 0)),
332 (INSERT_SUBREG (v8f64 (IMPLICIT_DEF)),
333 (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
335 def : Pat<(insert_subvector undef, (v4i32 VR128X:$src), (iPTR 0)),
336 (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)),
337 (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
339 def : Pat<(insert_subvector undef, (v4f32 VR128X:$src), (iPTR 0)),
340 (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)),
341 (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
344 def : Pat<(insert_subvector undef, (v4i64 VR256X:$src), (iPTR 0)),
345 (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
346 def : Pat<(insert_subvector undef, (v4f64 VR256X:$src), (iPTR 0)),
347 (INSERT_SUBREG (v8f64 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
348 def : Pat<(insert_subvector undef, (v8i32 VR256X:$src), (iPTR 0)),
349 (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
350 def : Pat<(insert_subvector undef, (v8f32 VR256X:$src), (iPTR 0)),
351 (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
353 // vextractps - extract 32 bits from XMM
354 def VEXTRACTPSzrr : AVX512AIi8<0x17, MRMDestReg, (outs GR32:$dst),
355 (ins VR128X:$src1, u32u8imm:$src2),
356 "vextractps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
357 [(set GR32:$dst, (extractelt (bc_v4i32 (v4f32 VR128X:$src1)), imm:$src2))]>,
360 def VEXTRACTPSzmr : AVX512AIi8<0x17, MRMDestMem, (outs),
361 (ins f32mem:$dst, VR128X:$src1, u32u8imm:$src2),
362 "vextractps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
363 [(store (extractelt (bc_v4i32 (v4f32 VR128X:$src1)), imm:$src2),
364 addr:$dst)]>, EVEX, EVEX_CD8<32, CD8VT1>;
366 //===---------------------------------------------------------------------===//
369 multiclass avx512_fp_broadcast<bits<8> opc, string OpcodeStr,
370 RegisterClass DestRC,
371 RegisterClass SrcRC, X86MemOperand x86memop> {
372 def rr : AVX5128I<opc, MRMSrcReg, (outs DestRC:$dst), (ins SrcRC:$src),
373 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
375 def rm : AVX5128I<opc, MRMSrcMem, (outs DestRC:$dst), (ins x86memop:$src),
376 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),[]>, EVEX;
378 let ExeDomain = SSEPackedSingle in {
379 defm VBROADCASTSSZ : avx512_fp_broadcast<0x18, "vbroadcastss", VR512,
381 EVEX_V512, EVEX_CD8<32, CD8VT1>;
384 let ExeDomain = SSEPackedDouble in {
385 defm VBROADCASTSDZ : avx512_fp_broadcast<0x19, "vbroadcastsd", VR512,
387 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
390 def : Pat<(v16f32 (X86VBroadcast (loadf32 addr:$src))),
391 (VBROADCASTSSZrm addr:$src)>;
392 def : Pat<(v8f64 (X86VBroadcast (loadf64 addr:$src))),
393 (VBROADCASTSDZrm addr:$src)>;
395 def : Pat<(int_x86_avx512_vbroadcast_ss_512 addr:$src),
396 (VBROADCASTSSZrm addr:$src)>;
397 def : Pat<(int_x86_avx512_vbroadcast_sd_512 addr:$src),
398 (VBROADCASTSDZrm addr:$src)>;
400 multiclass avx512_int_broadcast_reg<bits<8> opc, string OpcodeStr,
401 RegisterClass SrcRC, RegisterClass KRC> {
402 def Zrr : AVX5128I<opc, MRMSrcReg, (outs VR512:$dst), (ins SrcRC:$src),
403 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
404 []>, EVEX, EVEX_V512;
405 def Zkrr : AVX5128I<opc, MRMSrcReg, (outs VR512:$dst),
406 (ins KRC:$mask, SrcRC:$src),
407 !strconcat(OpcodeStr,
408 " \t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}"),
409 []>, EVEX, EVEX_V512, EVEX_KZ;
412 defm VPBROADCASTDr : avx512_int_broadcast_reg<0x7C, "vpbroadcastd", GR32, VK16WM>;
413 defm VPBROADCASTQr : avx512_int_broadcast_reg<0x7C, "vpbroadcastq", GR64, VK8WM>,
416 def : Pat <(v16i32 (X86vzext VK16WM:$mask)),
417 (VPBROADCASTDrZkrr VK16WM:$mask, (i32 (MOV32ri 0x1)))>;
419 def : Pat <(v8i64 (X86vzext VK8WM:$mask)),
420 (VPBROADCASTQrZkrr VK8WM:$mask, (i64 (MOV64ri 0x1)))>;
422 def : Pat<(v16i32 (X86VBroadcast (i32 GR32:$src))),
423 (VPBROADCASTDrZrr GR32:$src)>;
424 def : Pat<(v16i32 (X86VBroadcastm VK16WM:$mask, (i32 GR32:$src))),
425 (VPBROADCASTDrZkrr VK16WM:$mask, GR32:$src)>;
426 def : Pat<(v8i64 (X86VBroadcast (i64 GR64:$src))),
427 (VPBROADCASTQrZrr GR64:$src)>;
428 def : Pat<(v8i64 (X86VBroadcastm VK8WM:$mask, (i64 GR64:$src))),
429 (VPBROADCASTQrZkrr VK8WM:$mask, GR64:$src)>;
431 def : Pat<(v16i32 (int_x86_avx512_pbroadcastd_i32_512 (i32 GR32:$src))),
432 (VPBROADCASTDrZrr GR32:$src)>;
433 def : Pat<(v8i64 (int_x86_avx512_pbroadcastq_i64_512 (i64 GR64:$src))),
434 (VPBROADCASTQrZrr GR64:$src)>;
436 def : Pat<(v16i32 (int_x86_avx512_mask_pbroadcast_d_gpr_512 (i32 GR32:$src),
437 (v16i32 immAllZerosV), (i16 GR16:$mask))),
438 (VPBROADCASTDrZkrr (COPY_TO_REGCLASS GR16:$mask, VK16WM), GR32:$src)>;
439 def : Pat<(v8i64 (int_x86_avx512_mask_pbroadcast_q_gpr_512 (i64 GR64:$src),
440 (bc_v8i64 (v16i32 immAllZerosV)), (i8 GR8:$mask))),
441 (VPBROADCASTQrZkrr (COPY_TO_REGCLASS GR8:$mask, VK8WM), GR64:$src)>;
443 multiclass avx512_int_broadcast_rm<bits<8> opc, string OpcodeStr,
444 X86MemOperand x86memop, PatFrag ld_frag,
445 RegisterClass DstRC, ValueType OpVT, ValueType SrcVT,
447 def rr : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst), (ins VR128X:$src),
448 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
450 (OpVT (X86VBroadcast (SrcVT VR128X:$src))))]>, EVEX;
451 def krr : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst), (ins KRC:$mask,
453 !strconcat(OpcodeStr,
454 " \t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
456 (OpVT (X86VBroadcastm KRC:$mask, (SrcVT VR128X:$src))))]>,
459 def rm : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
460 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
462 (OpVT (X86VBroadcast (ld_frag addr:$src))))]>, EVEX;
463 def krm : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst), (ins KRC:$mask,
465 !strconcat(OpcodeStr,
466 " \t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
467 [(set DstRC:$dst, (OpVT (X86VBroadcastm KRC:$mask,
468 (ld_frag addr:$src))))]>, EVEX, EVEX_KZ;
472 defm VPBROADCASTDZ : avx512_int_broadcast_rm<0x58, "vpbroadcastd", i32mem,
473 loadi32, VR512, v16i32, v4i32, VK16WM>,
474 EVEX_V512, EVEX_CD8<32, CD8VT1>;
475 defm VPBROADCASTQZ : avx512_int_broadcast_rm<0x59, "vpbroadcastq", i64mem,
476 loadi64, VR512, v8i64, v2i64, VK8WM>, EVEX_V512, VEX_W,
477 EVEX_CD8<64, CD8VT1>;
479 multiclass avx512_int_subvec_broadcast_rm<bits<8> opc, string OpcodeStr,
480 X86MemOperand x86memop, PatFrag ld_frag,
483 def rm : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst), (ins x86memop:$src),
484 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
486 def krm : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst), (ins KRC:$mask,
488 !strconcat(OpcodeStr,
489 " \t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
494 defm VBROADCASTI32X4 : avx512_int_subvec_broadcast_rm<0x5a, "vbroadcasti32x4",
495 i128mem, loadv2i64, VK16WM>,
496 EVEX_V512, EVEX_CD8<32, CD8VT4>;
497 defm VBROADCASTI64X4 : avx512_int_subvec_broadcast_rm<0x5b, "vbroadcasti64x4",
498 i256mem, loadv4i64, VK16WM>, VEX_W,
499 EVEX_V512, EVEX_CD8<64, CD8VT4>;
501 def : Pat<(v16i32 (int_x86_avx512_pbroadcastd_512 (v4i32 VR128X:$src))),
502 (VPBROADCASTDZrr VR128X:$src)>;
503 def : Pat<(v8i64 (int_x86_avx512_pbroadcastq_512 (v2i64 VR128X:$src))),
504 (VPBROADCASTQZrr VR128X:$src)>;
506 def : Pat<(v16f32 (X86VBroadcast (v4f32 VR128X:$src))),
507 (VBROADCASTSSZrr VR128X:$src)>;
508 def : Pat<(v8f64 (X86VBroadcast (v2f64 VR128X:$src))),
509 (VBROADCASTSDZrr VR128X:$src)>;
511 def : Pat<(v16f32 (int_x86_avx512_vbroadcast_ss_ps_512 (v4f32 VR128X:$src))),
512 (VBROADCASTSSZrr VR128X:$src)>;
513 def : Pat<(v8f64 (int_x86_avx512_vbroadcast_sd_pd_512 (v2f64 VR128X:$src))),
514 (VBROADCASTSDZrr VR128X:$src)>;
516 // Provide fallback in case the load node that is used in the patterns above
517 // is used by additional users, which prevents the pattern selection.
518 def : Pat<(v16f32 (X86VBroadcast FR32X:$src)),
519 (VBROADCASTSSZrr (COPY_TO_REGCLASS FR32X:$src, VR128X))>;
520 def : Pat<(v8f64 (X86VBroadcast FR64X:$src)),
521 (VBROADCASTSDZrr (COPY_TO_REGCLASS FR64X:$src, VR128X))>;
524 let Predicates = [HasAVX512] in {
525 def : Pat<(v8i32 (X86VBroadcastm (v8i1 VK8WM:$mask), (loadi32 addr:$src))),
527 (v16i32 (VPBROADCASTDZkrm (COPY_TO_REGCLASS VK8WM:$mask, VK16WM),
528 addr:$src)), sub_ymm)>;
530 //===----------------------------------------------------------------------===//
531 // AVX-512 BROADCAST MASK TO VECTOR REGISTER
534 multiclass avx512_mask_broadcast<bits<8> opc, string OpcodeStr,
535 RegisterClass DstRC, RegisterClass KRC,
536 ValueType OpVT, ValueType SrcVT> {
537 def rr : AVX512XS8I<opc, MRMDestReg, (outs DstRC:$dst), (ins KRC:$src),
538 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
542 let Predicates = [HasCDI] in {
543 defm VPBROADCASTMW2D : avx512_mask_broadcast<0x3A, "vpbroadcastmw2d", VR512,
544 VK16, v16i32, v16i1>, EVEX_V512;
545 defm VPBROADCASTMB2Q : avx512_mask_broadcast<0x2A, "vpbroadcastmb2q", VR512,
546 VK8, v8i64, v8i1>, EVEX_V512, VEX_W;
549 //===----------------------------------------------------------------------===//
552 // -- immediate form --
553 multiclass avx512_perm_imm<bits<8> opc, string OpcodeStr, RegisterClass RC,
554 SDNode OpNode, PatFrag mem_frag,
555 X86MemOperand x86memop, ValueType OpVT> {
556 def ri : AVX512AIi8<opc, MRMSrcReg, (outs RC:$dst),
557 (ins RC:$src1, i8imm:$src2),
558 !strconcat(OpcodeStr,
559 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
561 (OpVT (OpNode RC:$src1, (i8 imm:$src2))))]>,
563 def mi : AVX512AIi8<opc, MRMSrcMem, (outs RC:$dst),
564 (ins x86memop:$src1, i8imm:$src2),
565 !strconcat(OpcodeStr,
566 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
568 (OpVT (OpNode (mem_frag addr:$src1),
569 (i8 imm:$src2))))]>, EVEX;
572 defm VPERMQZ : avx512_perm_imm<0x00, "vpermq", VR512, X86VPermi, memopv8i64,
573 i512mem, v8i64>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
574 let ExeDomain = SSEPackedDouble in
575 defm VPERMPDZ : avx512_perm_imm<0x01, "vpermpd", VR512, X86VPermi, memopv8f64,
576 f512mem, v8f64>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
578 // -- VPERM - register form --
579 multiclass avx512_perm<bits<8> opc, string OpcodeStr, RegisterClass RC,
580 PatFrag mem_frag, X86MemOperand x86memop, ValueType OpVT> {
582 def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
583 (ins RC:$src1, RC:$src2),
584 !strconcat(OpcodeStr,
585 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
587 (OpVT (X86VPermv RC:$src1, RC:$src2)))]>, EVEX_4V;
589 def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
590 (ins RC:$src1, x86memop:$src2),
591 !strconcat(OpcodeStr,
592 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
594 (OpVT (X86VPermv RC:$src1, (mem_frag addr:$src2))))]>,
598 defm VPERMDZ : avx512_perm<0x36, "vpermd", VR512, memopv16i32, i512mem,
599 v16i32>, EVEX_V512, EVEX_CD8<32, CD8VF>;
600 defm VPERMQZ : avx512_perm<0x36, "vpermq", VR512, memopv8i64, i512mem,
601 v8i64>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
602 let ExeDomain = SSEPackedSingle in
603 defm VPERMPSZ : avx512_perm<0x16, "vpermps", VR512, memopv16f32, f512mem,
604 v16f32>, EVEX_V512, EVEX_CD8<32, CD8VF>;
605 let ExeDomain = SSEPackedDouble in
606 defm VPERMPDZ : avx512_perm<0x16, "vpermpd", VR512, memopv8f64, f512mem,
607 v8f64>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
609 // -- VPERM2I - 3 source operands form --
610 multiclass avx512_perm_3src<bits<8> opc, string OpcodeStr, RegisterClass RC,
611 PatFrag mem_frag, X86MemOperand x86memop,
612 SDNode OpNode, ValueType OpVT> {
613 let Constraints = "$src1 = $dst" in {
614 def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
615 (ins RC:$src1, RC:$src2, RC:$src3),
616 !strconcat(OpcodeStr,
617 " \t{$src3, $src2, $dst|$dst, $src2, $src3}"),
619 (OpVT (OpNode RC:$src1, RC:$src2, RC:$src3)))]>,
622 def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
623 (ins RC:$src1, RC:$src2, x86memop:$src3),
624 !strconcat(OpcodeStr,
625 " \t{$src3, $src2, $dst|$dst, $src2, $src3}"),
627 (OpVT (OpNode RC:$src1, RC:$src2,
628 (mem_frag addr:$src3))))]>, EVEX_4V;
631 defm VPERMI2D : avx512_perm_3src<0x76, "vpermi2d", VR512, memopv16i32, i512mem,
632 X86VPermiv3, v16i32>, EVEX_V512, EVEX_CD8<32, CD8VF>;
633 defm VPERMI2Q : avx512_perm_3src<0x76, "vpermi2q", VR512, memopv8i64, i512mem,
634 X86VPermiv3, v8i64>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
635 defm VPERMI2PS : avx512_perm_3src<0x77, "vpermi2ps", VR512, memopv16f32, i512mem,
636 X86VPermiv3, v16f32>, EVEX_V512, EVEX_CD8<32, CD8VF>;
637 defm VPERMI2PD : avx512_perm_3src<0x77, "vpermi2pd", VR512, memopv8f64, i512mem,
638 X86VPermiv3, v8f64>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
640 defm VPERMT2D : avx512_perm_3src<0x7E, "vpermt2d", VR512, memopv16i32, i512mem,
641 X86VPermv3, v16i32>, EVEX_V512, EVEX_CD8<32, CD8VF>;
642 defm VPERMT2Q : avx512_perm_3src<0x7E, "vpermt2q", VR512, memopv8i64, i512mem,
643 X86VPermv3, v8i64>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
644 defm VPERMT2PS : avx512_perm_3src<0x7F, "vpermt2ps", VR512, memopv16f32, i512mem,
645 X86VPermv3, v16f32>, EVEX_V512, EVEX_CD8<32, CD8VF>;
646 defm VPERMT2PD : avx512_perm_3src<0x7F, "vpermt2pd", VR512, memopv8f64, i512mem,
647 X86VPermv3, v8f64>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
649 def : Pat<(v16f32 (int_x86_avx512_mask_vpermt_ps_512 (v16i32 VR512:$idx),
650 (v16f32 VR512:$src1), (v16f32 VR512:$src2), (i16 -1))),
651 (VPERMT2PSrr VR512:$src1, VR512:$idx, VR512:$src2)>;
653 def : Pat<(v16i32 (int_x86_avx512_mask_vpermt_d_512 (v16i32 VR512:$idx),
654 (v16i32 VR512:$src1), (v16i32 VR512:$src2), (i16 -1))),
655 (VPERMT2Drr VR512:$src1, VR512:$idx, VR512:$src2)>;
657 def : Pat<(v8f64 (int_x86_avx512_mask_vpermt_pd_512 (v8i64 VR512:$idx),
658 (v8f64 VR512:$src1), (v8f64 VR512:$src2), (i8 -1))),
659 (VPERMT2PDrr VR512:$src1, VR512:$idx, VR512:$src2)>;
661 def : Pat<(v8i64 (int_x86_avx512_mask_vpermt_q_512 (v8i64 VR512:$idx),
662 (v8i64 VR512:$src1), (v8i64 VR512:$src2), (i8 -1))),
663 (VPERMT2Qrr VR512:$src1, VR512:$idx, VR512:$src2)>;
664 //===----------------------------------------------------------------------===//
665 // AVX-512 - BLEND using mask
667 multiclass avx512_blendmask<bits<8> opc, string OpcodeStr,
668 RegisterClass KRC, RegisterClass RC,
669 X86MemOperand x86memop, PatFrag mem_frag,
670 SDNode OpNode, ValueType vt> {
671 def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
672 (ins KRC:$mask, RC:$src1, RC:$src2),
673 !strconcat(OpcodeStr,
674 " \t{$src2, $src1, ${dst} {${mask}}|${dst} {${mask}}, $src1, $src2}"),
675 [(set RC:$dst, (OpNode KRC:$mask, (vt RC:$src2),
676 (vt RC:$src1)))]>, EVEX_4V, EVEX_K;
678 def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
679 (ins KRC:$mask, RC:$src1, x86memop:$src2),
680 !strconcat(OpcodeStr,
681 " \t{$src2, $src1, ${dst} {${mask}}|${dst} {${mask}}, $src1, $src2}"),
682 []>, EVEX_4V, EVEX_K;
685 let ExeDomain = SSEPackedSingle in
686 defm VBLENDMPSZ : avx512_blendmask<0x65, "vblendmps",
687 VK16WM, VR512, f512mem,
688 memopv16f32, vselect, v16f32>,
689 EVEX_CD8<32, CD8VF>, EVEX_V512;
690 let ExeDomain = SSEPackedDouble in
691 defm VBLENDMPDZ : avx512_blendmask<0x65, "vblendmpd",
692 VK8WM, VR512, f512mem,
693 memopv8f64, vselect, v8f64>,
694 VEX_W, EVEX_CD8<64, CD8VF>, EVEX_V512;
696 def : Pat<(v16f32 (int_x86_avx512_mask_blend_ps_512 (v16f32 VR512:$src1),
697 (v16f32 VR512:$src2), (i16 GR16:$mask))),
698 (VBLENDMPSZrr (COPY_TO_REGCLASS GR16:$mask, VK16WM),
699 VR512:$src1, VR512:$src2)>;
701 def : Pat<(v8f64 (int_x86_avx512_mask_blend_pd_512 (v8f64 VR512:$src1),
702 (v8f64 VR512:$src2), (i8 GR8:$mask))),
703 (VBLENDMPDZrr (COPY_TO_REGCLASS GR8:$mask, VK8WM),
704 VR512:$src1, VR512:$src2)>;
706 defm VPBLENDMDZ : avx512_blendmask<0x64, "vpblendmd",
707 VK16WM, VR512, f512mem,
708 memopv16i32, vselect, v16i32>,
709 EVEX_CD8<32, CD8VF>, EVEX_V512;
711 defm VPBLENDMQZ : avx512_blendmask<0x64, "vpblendmq",
712 VK8WM, VR512, f512mem,
713 memopv8i64, vselect, v8i64>,
714 VEX_W, EVEX_CD8<64, CD8VF>, EVEX_V512;
716 def : Pat<(v16i32 (int_x86_avx512_mask_blend_d_512 (v16i32 VR512:$src1),
717 (v16i32 VR512:$src2), (i16 GR16:$mask))),
718 (VPBLENDMDZrr (COPY_TO_REGCLASS GR16:$mask, VK16),
719 VR512:$src1, VR512:$src2)>;
721 def : Pat<(v8i64 (int_x86_avx512_mask_blend_q_512 (v8i64 VR512:$src1),
722 (v8i64 VR512:$src2), (i8 GR8:$mask))),
723 (VPBLENDMQZrr (COPY_TO_REGCLASS GR8:$mask, VK8),
724 VR512:$src1, VR512:$src2)>;
726 let Predicates = [HasAVX512] in {
727 def : Pat<(v8f32 (vselect (v8i1 VK8WM:$mask), (v8f32 VR256X:$src1),
728 (v8f32 VR256X:$src2))),
730 (v16f32 (VBLENDMPSZrr (COPY_TO_REGCLASS VK8WM:$mask, VK16WM),
731 (v16f32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)),
732 (v16f32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)))), sub_ymm)>;
734 def : Pat<(v8i32 (vselect (v8i1 VK8WM:$mask), (v8i32 VR256X:$src1),
735 (v8i32 VR256X:$src2))),
737 (v16i32 (VPBLENDMDZrr (COPY_TO_REGCLASS VK8WM:$mask, VK16WM),
738 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)),
739 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)))), sub_ymm)>;
741 //===----------------------------------------------------------------------===//
742 // Compare Instructions
743 //===----------------------------------------------------------------------===//
745 // avx512_cmp_scalar - AVX512 CMPSS and CMPSD
746 multiclass avx512_cmp_scalar<RegisterClass RC, X86MemOperand x86memop,
747 Operand CC, SDNode OpNode, ValueType VT,
748 PatFrag ld_frag, string asm, string asm_alt> {
749 def rr : AVX512Ii8<0xC2, MRMSrcReg,
750 (outs VK1:$dst), (ins RC:$src1, RC:$src2, CC:$cc), asm,
751 [(set VK1:$dst, (OpNode (VT RC:$src1), RC:$src2, imm:$cc))],
752 IIC_SSE_ALU_F32S_RR>, EVEX_4V;
753 def rm : AVX512Ii8<0xC2, MRMSrcMem,
754 (outs VK1:$dst), (ins RC:$src1, x86memop:$src2, CC:$cc), asm,
755 [(set VK1:$dst, (OpNode (VT RC:$src1),
756 (ld_frag addr:$src2), imm:$cc))], IIC_SSE_ALU_F32P_RM>, EVEX_4V;
757 let isAsmParserOnly = 1, hasSideEffects = 0 in {
758 def rri_alt : AVX512Ii8<0xC2, MRMSrcReg,
759 (outs VK1:$dst), (ins RC:$src1, RC:$src2, i8imm:$cc),
760 asm_alt, [], IIC_SSE_ALU_F32S_RR>, EVEX_4V;
761 def rmi_alt : AVX512Ii8<0xC2, MRMSrcMem,
762 (outs VK1:$dst), (ins RC:$src1, x86memop:$src2, i8imm:$cc),
763 asm_alt, [], IIC_SSE_ALU_F32P_RM>, EVEX_4V;
767 let Predicates = [HasAVX512] in {
768 defm VCMPSSZ : avx512_cmp_scalar<FR32X, f32mem, AVXCC, X86cmpms, f32, loadf32,
769 "vcmp${cc}ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
770 "vcmpss\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}">,
772 defm VCMPSDZ : avx512_cmp_scalar<FR64X, f64mem, AVXCC, X86cmpms, f64, loadf64,
773 "vcmp${cc}sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
774 "vcmpsd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}">,
778 multiclass avx512_icmp_packed<bits<8> opc, string OpcodeStr, RegisterClass KRC,
779 RegisterClass RC, X86MemOperand x86memop, PatFrag memop_frag,
780 SDNode OpNode, ValueType vt> {
781 def rr : AVX512BI<opc, MRMSrcReg,
782 (outs KRC:$dst), (ins RC:$src1, RC:$src2),
783 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
784 [(set KRC:$dst, (OpNode (vt RC:$src1), (vt RC:$src2)))],
785 IIC_SSE_ALU_F32P_RR>, EVEX_4V;
786 def rm : AVX512BI<opc, MRMSrcMem,
787 (outs KRC:$dst), (ins RC:$src1, x86memop:$src2),
788 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
789 [(set KRC:$dst, (OpNode (vt RC:$src1), (memop_frag addr:$src2)))],
790 IIC_SSE_ALU_F32P_RM>, EVEX_4V;
793 defm VPCMPEQDZ : avx512_icmp_packed<0x76, "vpcmpeqd", VK16, VR512, i512mem,
794 memopv16i32, X86pcmpeqm, v16i32>, EVEX_V512,
796 defm VPCMPEQQZ : avx512_icmp_packed<0x29, "vpcmpeqq", VK8, VR512, i512mem,
797 memopv8i64, X86pcmpeqm, v8i64>, T8PD, EVEX_V512,
798 VEX_W, EVEX_CD8<64, CD8VF>;
800 defm VPCMPGTDZ : avx512_icmp_packed<0x66, "vpcmpgtd", VK16, VR512, i512mem,
801 memopv16i32, X86pcmpgtm, v16i32>, EVEX_V512,
803 defm VPCMPGTQZ : avx512_icmp_packed<0x37, "vpcmpgtq", VK8, VR512, i512mem,
804 memopv8i64, X86pcmpgtm, v8i64>, T8PD, EVEX_V512,
805 VEX_W, EVEX_CD8<64, CD8VF>;
807 def : Pat<(v8i1 (X86pcmpgtm (v8i32 VR256X:$src1), (v8i32 VR256X:$src2))),
808 (COPY_TO_REGCLASS (VPCMPGTDZrr
809 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)),
810 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm))), VK8)>;
812 def : Pat<(v8i1 (X86pcmpeqm (v8i32 VR256X:$src1), (v8i32 VR256X:$src2))),
813 (COPY_TO_REGCLASS (VPCMPEQDZrr
814 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)),
815 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm))), VK8)>;
817 multiclass avx512_icmp_cc<bits<8> opc, RegisterClass KRC,
818 RegisterClass RC, X86MemOperand x86memop, PatFrag memop_frag,
819 SDNode OpNode, ValueType vt, Operand CC, string Suffix> {
820 def rri : AVX512AIi8<opc, MRMSrcReg,
821 (outs KRC:$dst), (ins RC:$src1, RC:$src2, CC:$cc),
822 !strconcat("vpcmp${cc}", Suffix,
823 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
824 [(set KRC:$dst, (OpNode (vt RC:$src1), (vt RC:$src2), imm:$cc))],
825 IIC_SSE_ALU_F32P_RR>, EVEX_4V;
826 def rmi : AVX512AIi8<opc, MRMSrcMem,
827 (outs KRC:$dst), (ins RC:$src1, x86memop:$src2, CC:$cc),
828 !strconcat("vpcmp${cc}", Suffix,
829 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
830 [(set KRC:$dst, (OpNode (vt RC:$src1), (memop_frag addr:$src2),
831 imm:$cc))], IIC_SSE_ALU_F32P_RM>, EVEX_4V;
832 // Accept explicit immediate argument form instead of comparison code.
833 let isAsmParserOnly = 1, hasSideEffects = 0 in {
834 def rri_alt : AVX512AIi8<opc, MRMSrcReg,
835 (outs KRC:$dst), (ins RC:$src1, RC:$src2, i8imm:$cc),
836 !strconcat("vpcmp", Suffix,
837 "\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}"),
838 [], IIC_SSE_ALU_F32P_RR>, EVEX_4V;
839 def rrik_alt : AVX512AIi8<opc, MRMSrcReg,
840 (outs KRC:$dst), (ins KRC:$mask, RC:$src1, RC:$src2, i8imm:$cc),
841 !strconcat("vpcmp", Suffix,
842 "\t{$cc, $src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2, $cc}"),
843 [], IIC_SSE_ALU_F32P_RR>, EVEX_4V, EVEX_K;
844 def rmi_alt : AVX512AIi8<opc, MRMSrcMem,
845 (outs KRC:$dst), (ins RC:$src1, x86memop:$src2, i8imm:$cc),
846 !strconcat("vpcmp", Suffix,
847 "\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}"),
848 [], IIC_SSE_ALU_F32P_RM>, EVEX_4V;
849 def rmik_alt : AVX512AIi8<opc, MRMSrcMem,
850 (outs KRC:$dst), (ins KRC:$mask, RC:$src1, x86memop:$src2, i8imm:$cc),
851 !strconcat("vpcmp", Suffix,
852 "\t{$cc, $src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2, $cc}"),
853 [], IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_K;
857 defm VPCMPDZ : avx512_icmp_cc<0x1F, VK16, VR512, i512mem, memopv16i32,
858 X86cmpm, v16i32, AVXCC, "d">,
859 EVEX_V512, EVEX_CD8<32, CD8VF>;
860 defm VPCMPUDZ : avx512_icmp_cc<0x1E, VK16, VR512, i512mem, memopv16i32,
861 X86cmpmu, v16i32, AVXCC, "ud">,
862 EVEX_V512, EVEX_CD8<32, CD8VF>;
864 defm VPCMPQZ : avx512_icmp_cc<0x1F, VK8, VR512, i512mem, memopv8i64,
865 X86cmpm, v8i64, AVXCC, "q">,
866 VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
867 defm VPCMPUQZ : avx512_icmp_cc<0x1E, VK8, VR512, i512mem, memopv8i64,
868 X86cmpmu, v8i64, AVXCC, "uq">,
869 VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
871 // avx512_cmp_packed - compare packed instructions
872 multiclass avx512_cmp_packed<RegisterClass KRC, RegisterClass RC,
873 X86MemOperand x86memop, ValueType vt,
874 string suffix, Domain d> {
875 def rri : AVX512PIi8<0xC2, MRMSrcReg,
876 (outs KRC:$dst), (ins RC:$src1, RC:$src2, AVXCC:$cc),
877 !strconcat("vcmp${cc}", suffix,
878 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
879 [(set KRC:$dst, (X86cmpm (vt RC:$src1), (vt RC:$src2), imm:$cc))], d>;
880 def rrib: AVX512PIi8<0xC2, MRMSrcReg,
881 (outs KRC:$dst), (ins RC:$src1, RC:$src2, AVXCC:$cc),
882 !strconcat("vcmp${cc}", suffix,
883 " \t{{sae}, $src2, $src1, $dst|$dst, $src1, $src2, {sae}}"),
885 def rmi : AVX512PIi8<0xC2, MRMSrcMem,
886 (outs KRC:$dst), (ins RC:$src1, x86memop:$src2, AVXCC:$cc),
887 !strconcat("vcmp${cc}", suffix,
888 " \t{$src2, $src1, $dst|$dst, $src1, $src2, $cc}"),
890 (X86cmpm (vt RC:$src1), (memop addr:$src2), imm:$cc))], d>;
892 // Accept explicit immediate argument form instead of comparison code.
893 let isAsmParserOnly = 1, hasSideEffects = 0 in {
894 def rri_alt : AVX512PIi8<0xC2, MRMSrcReg,
895 (outs KRC:$dst), (ins RC:$src1, RC:$src2, i8imm:$cc),
896 !strconcat("vcmp", suffix,
897 " \t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}"), [], d>;
898 def rmi_alt : AVX512PIi8<0xC2, MRMSrcMem,
899 (outs KRC:$dst), (ins RC:$src1, x86memop:$src2, i8imm:$cc),
900 !strconcat("vcmp", suffix,
901 " \t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}"), [], d>;
905 defm VCMPPSZ : avx512_cmp_packed<VK16, VR512, f512mem, v16f32,
906 "ps", SSEPackedSingle>, PS, EVEX_4V, EVEX_V512,
908 defm VCMPPDZ : avx512_cmp_packed<VK8, VR512, f512mem, v8f64,
909 "pd", SSEPackedDouble>, PD, EVEX_4V, VEX_W, EVEX_V512,
912 def : Pat<(v8i1 (X86cmpm (v8f32 VR256X:$src1), (v8f32 VR256X:$src2), imm:$cc)),
913 (COPY_TO_REGCLASS (VCMPPSZrri
914 (v16f32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)),
915 (v16f32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)),
917 def : Pat<(v8i1 (X86cmpm (v8i32 VR256X:$src1), (v8i32 VR256X:$src2), imm:$cc)),
918 (COPY_TO_REGCLASS (VPCMPDZrri
919 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)),
920 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)),
922 def : Pat<(v8i1 (X86cmpmu (v8i32 VR256X:$src1), (v8i32 VR256X:$src2), imm:$cc)),
923 (COPY_TO_REGCLASS (VPCMPUDZrri
924 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)),
925 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)),
928 def : Pat<(i16 (int_x86_avx512_mask_cmp_ps_512 (v16f32 VR512:$src1),
929 (v16f32 VR512:$src2), imm:$cc, (i16 -1),
931 (COPY_TO_REGCLASS (VCMPPSZrrib VR512:$src1, VR512:$src2,
932 (I8Imm imm:$cc)), GR16)>;
934 def : Pat<(i8 (int_x86_avx512_mask_cmp_pd_512 (v8f64 VR512:$src1),
935 (v8f64 VR512:$src2), imm:$cc, (i8 -1),
937 (COPY_TO_REGCLASS (VCMPPDZrrib VR512:$src1, VR512:$src2,
938 (I8Imm imm:$cc)), GR8)>;
940 def : Pat<(i16 (int_x86_avx512_mask_cmp_ps_512 (v16f32 VR512:$src1),
941 (v16f32 VR512:$src2), imm:$cc, (i16 -1),
943 (COPY_TO_REGCLASS (VCMPPSZrri VR512:$src1, VR512:$src2,
944 (I8Imm imm:$cc)), GR16)>;
946 def : Pat<(i8 (int_x86_avx512_mask_cmp_pd_512 (v8f64 VR512:$src1),
947 (v8f64 VR512:$src2), imm:$cc, (i8 -1),
949 (COPY_TO_REGCLASS (VCMPPDZrri VR512:$src1, VR512:$src2,
950 (I8Imm imm:$cc)), GR8)>;
952 // Mask register copy, including
953 // - copy between mask registers
954 // - load/store mask registers
955 // - copy from GPR to mask register and vice versa
957 multiclass avx512_mask_mov<bits<8> opc_kk, bits<8> opc_km, bits<8> opc_mk,
958 string OpcodeStr, RegisterClass KRC,
959 ValueType vt, X86MemOperand x86memop> {
960 let hasSideEffects = 0 in {
961 def kk : I<opc_kk, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src),
962 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"), []>;
964 def km : I<opc_km, MRMSrcMem, (outs KRC:$dst), (ins x86memop:$src),
965 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
966 [(set KRC:$dst, (vt (load addr:$src)))]>;
968 def mk : I<opc_mk, MRMDestMem, (outs), (ins x86memop:$dst, KRC:$src),
969 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"), []>;
973 multiclass avx512_mask_mov_gpr<bits<8> opc_kr, bits<8> opc_rk,
975 RegisterClass KRC, RegisterClass GRC> {
976 let hasSideEffects = 0 in {
977 def kr : I<opc_kr, MRMSrcReg, (outs KRC:$dst), (ins GRC:$src),
978 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"), []>;
979 def rk : I<opc_rk, MRMSrcReg, (outs GRC:$dst), (ins KRC:$src),
980 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"), []>;
984 let Predicates = [HasAVX512] in {
985 defm KMOVW : avx512_mask_mov<0x90, 0x90, 0x91, "kmovw", VK16, v16i1, i16mem>,
987 defm KMOVW : avx512_mask_mov_gpr<0x92, 0x93, "kmovw", VK16, GR32>,
991 let Predicates = [HasAVX512] in {
992 // GR16 from/to 16-bit mask
993 def : Pat<(v16i1 (bitconvert (i16 GR16:$src))),
994 (KMOVWkr (SUBREG_TO_REG (i32 0), GR16:$src, sub_16bit))>;
995 def : Pat<(i16 (bitconvert (v16i1 VK16:$src))),
996 (EXTRACT_SUBREG (KMOVWrk VK16:$src), sub_16bit)>;
998 // Store kreg in memory
999 def : Pat<(store (v16i1 VK16:$src), addr:$dst),
1000 (KMOVWmk addr:$dst, VK16:$src)>;
1002 def : Pat<(store VK8:$src, addr:$dst),
1003 (KMOVWmk addr:$dst, (COPY_TO_REGCLASS VK8:$src, VK16))>;
1005 def : Pat<(i1 (load addr:$src)),
1006 (COPY_TO_REGCLASS (KMOVWkm addr:$src), VK1)>;
1008 def : Pat<(v8i1 (load addr:$src)),
1009 (COPY_TO_REGCLASS (KMOVWkm addr:$src), VK8)>;
1011 def : Pat<(i1 (trunc (i32 GR32:$src))),
1012 (COPY_TO_REGCLASS (KMOVWkr (AND32ri $src, (i32 1))), VK1)>;
1014 def : Pat<(i1 (trunc (i8 GR8:$src))),
1016 (KMOVWkr (AND32ri (SUBREG_TO_REG (i32 0), GR8:$src, sub_8bit), (i32 1))),
1018 def : Pat<(i1 (trunc (i16 GR16:$src))),
1020 (KMOVWkr (AND32ri (SUBREG_TO_REG (i32 0), $src, sub_16bit), (i32 1))),
1023 def : Pat<(i32 (zext VK1:$src)),
1024 (AND32ri (KMOVWrk (COPY_TO_REGCLASS VK1:$src, VK16)), (i32 1))>;
1025 def : Pat<(i8 (zext VK1:$src)),
1028 (COPY_TO_REGCLASS VK1:$src, VK16)), (i32 1)), sub_8bit)>;
1029 def : Pat<(i64 (zext VK1:$src)),
1030 (AND64ri8 (SUBREG_TO_REG (i64 0),
1031 (KMOVWrk (COPY_TO_REGCLASS VK1:$src, VK16)), sub_32bit), (i64 1))>;
1032 def : Pat<(i16 (zext VK1:$src)),
1034 (AND32ri (KMOVWrk (COPY_TO_REGCLASS VK1:$src, VK16)), (i32 1)),
1036 def : Pat<(v16i1 (scalar_to_vector VK1:$src)),
1037 (COPY_TO_REGCLASS VK1:$src, VK16)>;
1038 def : Pat<(v8i1 (scalar_to_vector VK1:$src)),
1039 (COPY_TO_REGCLASS VK1:$src, VK8)>;
1041 // With AVX-512 only, 8-bit mask is promoted to 16-bit mask.
1042 let Predicates = [HasAVX512] in {
1043 // GR from/to 8-bit mask without native support
1044 def : Pat<(v8i1 (bitconvert (i8 GR8:$src))),
1046 (KMOVWkr (SUBREG_TO_REG (i32 0), GR8:$src, sub_8bit)),
1048 def : Pat<(i8 (bitconvert (v8i1 VK8:$src))),
1050 (KMOVWrk (COPY_TO_REGCLASS VK8:$src, VK16)),
1053 def : Pat<(i1 (X86Vextract VK16:$src, (iPTR 0))),
1054 (COPY_TO_REGCLASS VK16:$src, VK1)>;
1055 def : Pat<(i1 (X86Vextract VK8:$src, (iPTR 0))),
1056 (COPY_TO_REGCLASS VK8:$src, VK1)>;
1060 // Mask unary operation
1062 multiclass avx512_mask_unop<bits<8> opc, string OpcodeStr,
1063 RegisterClass KRC, SDPatternOperator OpNode> {
1064 let Predicates = [HasAVX512] in
1065 def rr : I<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src),
1066 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
1067 [(set KRC:$dst, (OpNode KRC:$src))]>;
1070 multiclass avx512_mask_unop_w<bits<8> opc, string OpcodeStr,
1071 SDPatternOperator OpNode> {
1072 defm W : avx512_mask_unop<opc, !strconcat(OpcodeStr, "w"), VK16, OpNode>,
1076 defm KNOT : avx512_mask_unop_w<0x44, "knot", not>;
1078 multiclass avx512_mask_unop_int<string IntName, string InstName> {
1079 let Predicates = [HasAVX512] in
1080 def : Pat<(!cast<Intrinsic>("int_x86_avx512_"##IntName##"_w")
1082 (COPY_TO_REGCLASS (!cast<Instruction>(InstName##"Wrr")
1083 (v16i1 (COPY_TO_REGCLASS GR16:$src, VK16))), GR16)>;
1085 defm : avx512_mask_unop_int<"knot", "KNOT">;
1087 def : Pat<(xor VK16:$src1, (v16i1 immAllOnesV)), (KNOTWrr VK16:$src1)>;
1088 def : Pat<(xor VK8:$src1, (v8i1 immAllOnesV)),
1089 (COPY_TO_REGCLASS (KNOTWrr (COPY_TO_REGCLASS VK8:$src1, VK16)), VK8)>;
1091 // With AVX-512, 8-bit mask is promoted to 16-bit mask.
1092 def : Pat<(not VK8:$src),
1094 (KNOTWrr (COPY_TO_REGCLASS VK8:$src, VK16)), VK8)>;
1096 // Mask binary operation
1097 // - KAND, KANDN, KOR, KXNOR, KXOR
1098 multiclass avx512_mask_binop<bits<8> opc, string OpcodeStr,
1099 RegisterClass KRC, SDPatternOperator OpNode> {
1100 let Predicates = [HasAVX512] in
1101 def rr : I<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src1, KRC:$src2),
1102 !strconcat(OpcodeStr,
1103 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1104 [(set KRC:$dst, (OpNode KRC:$src1, KRC:$src2))]>;
1107 multiclass avx512_mask_binop_w<bits<8> opc, string OpcodeStr,
1108 SDPatternOperator OpNode> {
1109 defm W : avx512_mask_binop<opc, !strconcat(OpcodeStr, "w"), VK16, OpNode>,
1113 def andn : PatFrag<(ops node:$i0, node:$i1), (and (not node:$i0), node:$i1)>;
1114 def xnor : PatFrag<(ops node:$i0, node:$i1), (not (xor node:$i0, node:$i1))>;
1116 let isCommutable = 1 in {
1117 defm KAND : avx512_mask_binop_w<0x41, "kand", and>;
1118 let isCommutable = 0 in
1119 defm KANDN : avx512_mask_binop_w<0x42, "kandn", andn>;
1120 defm KOR : avx512_mask_binop_w<0x45, "kor", or>;
1121 defm KXNOR : avx512_mask_binop_w<0x46, "kxnor", xnor>;
1122 defm KXOR : avx512_mask_binop_w<0x47, "kxor", xor>;
1125 def : Pat<(xor VK1:$src1, VK1:$src2),
1126 (COPY_TO_REGCLASS (KXORWrr (COPY_TO_REGCLASS VK1:$src1, VK16),
1127 (COPY_TO_REGCLASS VK1:$src2, VK16)), VK1)>;
1129 def : Pat<(or VK1:$src1, VK1:$src2),
1130 (COPY_TO_REGCLASS (KORWrr (COPY_TO_REGCLASS VK1:$src1, VK16),
1131 (COPY_TO_REGCLASS VK1:$src2, VK16)), VK1)>;
1133 def : Pat<(and VK1:$src1, VK1:$src2),
1134 (COPY_TO_REGCLASS (KANDWrr (COPY_TO_REGCLASS VK1:$src1, VK16),
1135 (COPY_TO_REGCLASS VK1:$src2, VK16)), VK1)>;
1137 multiclass avx512_mask_binop_int<string IntName, string InstName> {
1138 let Predicates = [HasAVX512] in
1139 def : Pat<(!cast<Intrinsic>("int_x86_avx512_"##IntName##"_w")
1140 (i16 GR16:$src1), (i16 GR16:$src2)),
1141 (COPY_TO_REGCLASS (!cast<Instruction>(InstName##"Wrr")
1142 (v16i1 (COPY_TO_REGCLASS GR16:$src1, VK16)),
1143 (v16i1 (COPY_TO_REGCLASS GR16:$src2, VK16))), GR16)>;
1146 defm : avx512_mask_binop_int<"kand", "KAND">;
1147 defm : avx512_mask_binop_int<"kandn", "KANDN">;
1148 defm : avx512_mask_binop_int<"kor", "KOR">;
1149 defm : avx512_mask_binop_int<"kxnor", "KXNOR">;
1150 defm : avx512_mask_binop_int<"kxor", "KXOR">;
1152 // With AVX-512, 8-bit mask is promoted to 16-bit mask.
1153 multiclass avx512_binop_pat<SDPatternOperator OpNode, Instruction Inst> {
1154 let Predicates = [HasAVX512] in
1155 def : Pat<(OpNode VK8:$src1, VK8:$src2),
1157 (Inst (COPY_TO_REGCLASS VK8:$src1, VK16),
1158 (COPY_TO_REGCLASS VK8:$src2, VK16)), VK8)>;
1161 defm : avx512_binop_pat<and, KANDWrr>;
1162 defm : avx512_binop_pat<andn, KANDNWrr>;
1163 defm : avx512_binop_pat<or, KORWrr>;
1164 defm : avx512_binop_pat<xnor, KXNORWrr>;
1165 defm : avx512_binop_pat<xor, KXORWrr>;
1168 multiclass avx512_mask_unpck<bits<8> opc, string OpcodeStr,
1169 RegisterClass KRC> {
1170 let Predicates = [HasAVX512] in
1171 def rr : I<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src1, KRC:$src2),
1172 !strconcat(OpcodeStr,
1173 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1176 multiclass avx512_mask_unpck_bw<bits<8> opc, string OpcodeStr> {
1177 defm BW : avx512_mask_unpck<opc, !strconcat(OpcodeStr, "bw"), VK16>,
1181 defm KUNPCK : avx512_mask_unpck_bw<0x4b, "kunpck">;
1182 def : Pat<(v16i1 (concat_vectors (v8i1 VK8:$src1), (v8i1 VK8:$src2))),
1183 (KUNPCKBWrr (COPY_TO_REGCLASS VK8:$src2, VK16),
1184 (COPY_TO_REGCLASS VK8:$src1, VK16))>;
1187 multiclass avx512_mask_unpck_int<string IntName, string InstName> {
1188 let Predicates = [HasAVX512] in
1189 def : Pat<(!cast<Intrinsic>("int_x86_avx512_"##IntName##"_bw")
1190 (i16 GR16:$src1), (i16 GR16:$src2)),
1191 (COPY_TO_REGCLASS (!cast<Instruction>(InstName##"BWrr")
1192 (v16i1 (COPY_TO_REGCLASS GR16:$src1, VK16)),
1193 (v16i1 (COPY_TO_REGCLASS GR16:$src2, VK16))), GR16)>;
1195 defm : avx512_mask_unpck_int<"kunpck", "KUNPCK">;
1198 multiclass avx512_mask_testop<bits<8> opc, string OpcodeStr, RegisterClass KRC,
1200 let Predicates = [HasAVX512], Defs = [EFLAGS] in
1201 def rr : I<opc, MRMSrcReg, (outs), (ins KRC:$src1, KRC:$src2),
1202 !strconcat(OpcodeStr, " \t{$src2, $src1|$src1, $src2}"),
1203 [(set EFLAGS, (OpNode KRC:$src1, KRC:$src2))]>;
1206 multiclass avx512_mask_testop_w<bits<8> opc, string OpcodeStr, SDNode OpNode> {
1207 defm W : avx512_mask_testop<opc, !strconcat(OpcodeStr, "w"), VK16, OpNode>,
1211 defm KORTEST : avx512_mask_testop_w<0x98, "kortest", X86kortest>;
1213 def : Pat<(X86cmp VK1:$src1, (i1 0)),
1214 (KORTESTWrr (COPY_TO_REGCLASS VK1:$src1, VK16),
1215 (COPY_TO_REGCLASS VK1:$src1, VK16))>;
1218 multiclass avx512_mask_shiftop<bits<8> opc, string OpcodeStr, RegisterClass KRC,
1220 let Predicates = [HasAVX512] in
1221 def ri : Ii8<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src, i8imm:$imm),
1222 !strconcat(OpcodeStr,
1223 " \t{$imm, $src, $dst|$dst, $src, $imm}"),
1224 [(set KRC:$dst, (OpNode KRC:$src, (i8 imm:$imm)))]>;
1227 multiclass avx512_mask_shiftop_w<bits<8> opc1, bits<8> opc2, string OpcodeStr,
1229 defm W : avx512_mask_shiftop<opc1, !strconcat(OpcodeStr, "w"), VK16, OpNode>,
1233 defm KSHIFTL : avx512_mask_shiftop_w<0x32, 0x33, "kshiftl", X86vshli>;
1234 defm KSHIFTR : avx512_mask_shiftop_w<0x30, 0x31, "kshiftr", X86vsrli>;
1236 // Mask setting all 0s or 1s
1237 multiclass avx512_mask_setop<RegisterClass KRC, ValueType VT, PatFrag Val> {
1238 let Predicates = [HasAVX512] in
1239 let isReMaterializable = 1, isAsCheapAsAMove = 1, isPseudo = 1 in
1240 def #NAME# : I<0, Pseudo, (outs KRC:$dst), (ins), "",
1241 [(set KRC:$dst, (VT Val))]>;
1244 multiclass avx512_mask_setop_w<PatFrag Val> {
1245 defm B : avx512_mask_setop<VK8, v8i1, Val>;
1246 defm W : avx512_mask_setop<VK16, v16i1, Val>;
1249 defm KSET0 : avx512_mask_setop_w<immAllZerosV>;
1250 defm KSET1 : avx512_mask_setop_w<immAllOnesV>;
1252 // With AVX-512 only, 8-bit mask is promoted to 16-bit mask.
1253 let Predicates = [HasAVX512] in {
1254 def : Pat<(v8i1 immAllZerosV), (COPY_TO_REGCLASS (KSET0W), VK8)>;
1255 def : Pat<(v8i1 immAllOnesV), (COPY_TO_REGCLASS (KSET1W), VK8)>;
1256 def : Pat<(i1 0), (COPY_TO_REGCLASS (KSET0W), VK1)>;
1257 def : Pat<(i1 1), (COPY_TO_REGCLASS (KSET1W), VK1)>;
1258 def : Pat<(i1 -1), (COPY_TO_REGCLASS (KSET1W), VK1)>;
1260 def : Pat<(v8i1 (extract_subvector (v16i1 VK16:$src), (iPTR 0))),
1261 (v8i1 (COPY_TO_REGCLASS VK16:$src, VK8))>;
1263 def : Pat<(v16i1 (insert_subvector undef, (v8i1 VK8:$src), (iPTR 0))),
1264 (v16i1 (COPY_TO_REGCLASS VK8:$src, VK16))>;
1266 def : Pat<(v8i1 (extract_subvector (v16i1 VK16:$src), (iPTR 8))),
1267 (v8i1 (COPY_TO_REGCLASS (KSHIFTRWri VK16:$src, (i8 8)), VK8))>;
1269 def : Pat<(v8i1 (X86vshli VK8:$src, (i8 imm:$imm))),
1270 (v8i1 (COPY_TO_REGCLASS (KSHIFTLWri (COPY_TO_REGCLASS VK8:$src, VK16), (I8Imm $imm)), VK8))>;
1272 def : Pat<(v8i1 (X86vsrli VK8:$src, (i8 imm:$imm))),
1273 (v8i1 (COPY_TO_REGCLASS (KSHIFTRWri (COPY_TO_REGCLASS VK8:$src, VK16), (I8Imm $imm)), VK8))>;
1274 //===----------------------------------------------------------------------===//
1275 // AVX-512 - Aligned and unaligned load and store
1278 multiclass avx512_load<bits<8> opc, RegisterClass RC, RegisterClass KRC,
1279 X86MemOperand x86memop, PatFrag ld_frag,
1280 string asm, Domain d,
1281 ValueType vt, bit IsReMaterializable = 1> {
1282 let hasSideEffects = 0 in {
1283 def rr : AVX512PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
1284 !strconcat(asm, " \t{$src, $dst|$dst, $src}"), [], d>,
1286 def rrkz : AVX512PI<opc, MRMSrcReg, (outs RC:$dst), (ins KRC:$mask, RC:$src),
1288 " \t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
1289 [], d>, EVEX, EVEX_KZ;
1291 let canFoldAsLoad = 1, isReMaterializable = IsReMaterializable in
1292 def rm : AVX512PI<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
1293 !strconcat(asm, " \t{$src, $dst|$dst, $src}"),
1294 [(set (vt RC:$dst), (ld_frag addr:$src))], d>, EVEX;
1295 let Constraints = "$src1 = $dst", hasSideEffects = 0 in {
1296 def rrk : AVX512PI<opc, MRMSrcReg, (outs RC:$dst),
1297 (ins RC:$src1, KRC:$mask, RC:$src2),
1299 " \t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"), [], d>,
1302 def rmk : AVX512PI<opc, MRMSrcMem, (outs RC:$dst),
1303 (ins RC:$src1, KRC:$mask, x86memop:$src2),
1305 " \t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"),
1306 [], d>, EVEX, EVEX_K;
1309 def rmkz : AVX512PI<opc, MRMSrcMem, (outs RC:$dst),
1310 (ins KRC:$mask, x86memop:$src2),
1312 " \t{$src2, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src2}"),
1313 [], d>, EVEX, EVEX_KZ;
1316 multiclass avx512_store<bits<8> opc, RegisterClass RC, RegisterClass KRC,
1317 X86MemOperand x86memop, PatFrag store_frag,
1318 string asm, Domain d, ValueType vt> {
1319 let isAsmParserOnly = 1, hasSideEffects = 0 in {
1320 def rr_alt : AVX512PI<opc, MRMDestReg, (outs RC:$dst), (ins RC:$src),
1321 !strconcat(asm, " \t{$src, $dst|$dst, $src}"), [], d>,
1323 let Constraints = "$src1 = $dst" in
1324 def alt_rrk : AVX512PI<opc, MRMDestReg, (outs RC:$dst),
1325 (ins RC:$src1, KRC:$mask, RC:$src2),
1327 " \t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"), [], d>,
1329 def alt_rrkz : AVX512PI<opc, MRMDestReg, (outs RC:$dst),
1330 (ins KRC:$mask, RC:$src),
1332 " \t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
1333 [], d>, EVEX, EVEX_KZ;
1335 let mayStore = 1 in {
1336 def mr : AVX512PI<opc, MRMDestMem, (outs), (ins x86memop:$dst, RC:$src),
1337 !strconcat(asm, " \t{$src, $dst|$dst, $src}"),
1338 [(store_frag (vt RC:$src), addr:$dst)], d>, EVEX;
1339 def mrk : AVX512PI<opc, MRMDestMem, (outs),
1340 (ins x86memop:$dst, KRC:$mask, RC:$src),
1342 " \t{$src, ${dst} {${mask}}|${dst} {${mask}}, $src}"),
1343 [], d>, EVEX, EVEX_K;
1344 def mrkz : AVX512PI<opc, MRMDestMem, (outs),
1345 (ins x86memop:$dst, KRC:$mask, RC:$src),
1347 " \t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
1348 [], d>, EVEX, EVEX_KZ;
1352 defm VMOVAPSZ : avx512_load<0x28, VR512, VK16WM, f512mem, alignedloadv16f32,
1353 "vmovaps", SSEPackedSingle, v16f32>,
1354 avx512_store<0x29, VR512, VK16WM, f512mem, alignedstore512,
1355 "vmovaps", SSEPackedSingle, v16f32>,
1356 PS, EVEX_V512, EVEX_CD8<32, CD8VF>;
1357 defm VMOVAPDZ : avx512_load<0x28, VR512, VK8WM, f512mem, alignedloadv8f64,
1358 "vmovapd", SSEPackedDouble, v8f64>,
1359 avx512_store<0x29, VR512, VK8WM, f512mem, alignedstore512,
1360 "vmovapd", SSEPackedDouble, v8f64>,
1361 PD, EVEX_V512, VEX_W,
1362 EVEX_CD8<64, CD8VF>;
1363 defm VMOVUPSZ : avx512_load<0x10, VR512, VK16WM, f512mem, loadv16f32,
1364 "vmovups", SSEPackedSingle, v16f32>,
1365 avx512_store<0x11, VR512, VK16WM, f512mem, store,
1366 "vmovups", SSEPackedSingle, v16f32>,
1367 PS, EVEX_V512, EVEX_CD8<32, CD8VF>;
1368 defm VMOVUPDZ : avx512_load<0x10, VR512, VK8WM, f512mem, loadv8f64,
1369 "vmovupd", SSEPackedDouble, v8f64, 0>,
1370 avx512_store<0x11, VR512, VK8WM, f512mem, store,
1371 "vmovupd", SSEPackedDouble, v8f64>,
1372 PD, EVEX_V512, VEX_W,
1373 EVEX_CD8<64, CD8VF>;
1374 def: Pat<(v8f64 (int_x86_avx512_mask_loadu_pd_512 addr:$ptr,
1375 (bc_v8f64 (v16i32 immAllZerosV)), GR8:$mask)),
1376 (VMOVUPDZrmkz (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)), addr:$ptr)>;
1378 def: Pat<(v16f32 (int_x86_avx512_mask_loadu_ps_512 addr:$ptr,
1379 (bc_v16f32 (v16i32 immAllZerosV)), GR16:$mask)),
1380 (VMOVUPSZrmkz (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)), addr:$ptr)>;
1382 def: Pat<(int_x86_avx512_mask_storeu_ps_512 addr:$ptr, (v16f32 VR512:$src),
1384 (VMOVUPSZmrk addr:$ptr, (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)),
1386 def: Pat<(int_x86_avx512_mask_storeu_pd_512 addr:$ptr, (v8f64 VR512:$src),
1388 (VMOVUPDZmrk addr:$ptr, (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)),
1391 defm VMOVDQA32: avx512_load<0x6F, VR512, VK16WM, i512mem, alignedloadv16i32,
1392 "vmovdqa32", SSEPackedInt, v16i32>,
1393 avx512_store<0x7F, VR512, VK16WM, i512mem, alignedstore512,
1394 "vmovdqa32", SSEPackedInt, v16i32>,
1395 PD, EVEX_V512, EVEX_CD8<32, CD8VF>;
1396 defm VMOVDQA64: avx512_load<0x6F, VR512, VK8WM, i512mem, alignedloadv8i64,
1397 "vmovdqa64", SSEPackedInt, v8i64>,
1398 avx512_store<0x7F, VR512, VK8WM, i512mem, alignedstore512,
1399 "vmovdqa64", SSEPackedInt, v8i64>,
1400 PD, VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
1401 defm VMOVDQU32: avx512_load<0x6F, VR512, VK16WM, i512mem, load,
1402 "vmovdqu32", SSEPackedInt, v16i32>,
1403 avx512_store<0x7F, VR512, VK16WM, i512mem, store,
1404 "vmovdqu32", SSEPackedInt, v16i32>,
1405 XS, EVEX_V512, EVEX_CD8<32, CD8VF>;
1406 defm VMOVDQU64: avx512_load<0x6F, VR512, VK8WM, i512mem, load,
1407 "vmovdqu64", SSEPackedInt, v8i64>,
1408 avx512_store<0x7F, VR512, VK8WM, i512mem, store,
1409 "vmovdqu64", SSEPackedInt, v8i64>,
1410 XS, VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
1412 def: Pat<(v16i32 (int_x86_avx512_mask_loadu_d_512 addr:$ptr,
1413 (v16i32 immAllZerosV), GR16:$mask)),
1414 (VMOVDQU32rmkz (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)), addr:$ptr)>;
1416 def: Pat<(v8i64 (int_x86_avx512_mask_loadu_q_512 addr:$ptr,
1417 (bc_v8i64 (v16i32 immAllZerosV)), GR8:$mask)),
1418 (VMOVDQU64rmkz (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)), addr:$ptr)>;
1420 def: Pat<(int_x86_avx512_mask_storeu_d_512 addr:$ptr, (v16i32 VR512:$src),
1422 (VMOVDQU32mrk addr:$ptr, (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)),
1424 def: Pat<(int_x86_avx512_mask_storeu_q_512 addr:$ptr, (v8i64 VR512:$src),
1426 (VMOVDQU64mrk addr:$ptr, (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)),
1429 let AddedComplexity = 20 in {
1430 def : Pat<(v8i64 (vselect VK8WM:$mask, (v8i64 VR512:$src),
1431 (bc_v8i64 (v16i32 immAllZerosV)))),
1432 (VMOVDQU64rrkz VK8WM:$mask, VR512:$src)>;
1434 def : Pat<(v8i64 (vselect VK8WM:$mask, (bc_v8i64 (v16i32 immAllZerosV)),
1435 (v8i64 VR512:$src))),
1436 (VMOVDQU64rrkz (COPY_TO_REGCLASS (KNOTWrr (COPY_TO_REGCLASS VK8:$mask, VK16)),
1439 def : Pat<(v16i32 (vselect VK16WM:$mask, (v16i32 VR512:$src),
1440 (v16i32 immAllZerosV))),
1441 (VMOVDQU32rrkz VK16WM:$mask, VR512:$src)>;
1443 def : Pat<(v16i32 (vselect VK16WM:$mask, (v16i32 immAllZerosV),
1444 (v16i32 VR512:$src))),
1445 (VMOVDQU32rrkz (KNOTWrr VK16WM:$mask), VR512:$src)>;
1447 def : Pat<(v16f32 (vselect VK16WM:$mask, (v16f32 VR512:$src1),
1448 (v16f32 VR512:$src2))),
1449 (VMOVUPSZrrk VR512:$src2, VK16WM:$mask, VR512:$src1)>;
1450 def : Pat<(v8f64 (vselect VK8WM:$mask, (v8f64 VR512:$src1),
1451 (v8f64 VR512:$src2))),
1452 (VMOVUPDZrrk VR512:$src2, VK8WM:$mask, VR512:$src1)>;
1453 def : Pat<(v16i32 (vselect VK16WM:$mask, (v16i32 VR512:$src1),
1454 (v16i32 VR512:$src2))),
1455 (VMOVDQU32rrk VR512:$src2, VK16WM:$mask, VR512:$src1)>;
1456 def : Pat<(v8i64 (vselect VK8WM:$mask, (v8i64 VR512:$src1),
1457 (v8i64 VR512:$src2))),
1458 (VMOVDQU64rrk VR512:$src2, VK8WM:$mask, VR512:$src1)>;
1460 // Move Int Doubleword to Packed Double Int
1462 def VMOVDI2PDIZrr : AVX512BI<0x6E, MRMSrcReg, (outs VR128X:$dst), (ins GR32:$src),
1463 "vmovd\t{$src, $dst|$dst, $src}",
1465 (v4i32 (scalar_to_vector GR32:$src)))], IIC_SSE_MOVDQ>,
1467 def VMOVDI2PDIZrm : AVX512BI<0x6E, MRMSrcMem, (outs VR128X:$dst), (ins i32mem:$src),
1468 "vmovd\t{$src, $dst|$dst, $src}",
1470 (v4i32 (scalar_to_vector (loadi32 addr:$src))))],
1471 IIC_SSE_MOVDQ>, EVEX, VEX_LIG, EVEX_CD8<32, CD8VT1>;
1472 def VMOV64toPQIZrr : AVX512BI<0x6E, MRMSrcReg, (outs VR128X:$dst), (ins GR64:$src),
1473 "vmovq\t{$src, $dst|$dst, $src}",
1475 (v2i64 (scalar_to_vector GR64:$src)))],
1476 IIC_SSE_MOVDQ>, EVEX, VEX_W, VEX_LIG;
1477 let isCodeGenOnly = 1 in {
1478 def VMOV64toSDZrr : AVX512BI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
1479 "vmovq\t{$src, $dst|$dst, $src}",
1480 [(set FR64:$dst, (bitconvert GR64:$src))],
1481 IIC_SSE_MOVDQ>, EVEX, VEX_W, Sched<[WriteMove]>;
1482 def VMOVSDto64Zrr : AVX512BI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
1483 "vmovq\t{$src, $dst|$dst, $src}",
1484 [(set GR64:$dst, (bitconvert FR64:$src))],
1485 IIC_SSE_MOVDQ>, EVEX, VEX_W, Sched<[WriteMove]>;
1487 def VMOVSDto64Zmr : AVX512BI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
1488 "vmovq\t{$src, $dst|$dst, $src}",
1489 [(store (i64 (bitconvert FR64:$src)), addr:$dst)],
1490 IIC_SSE_MOVDQ>, EVEX, VEX_W, Sched<[WriteStore]>,
1491 EVEX_CD8<64, CD8VT1>;
1493 // Move Int Doubleword to Single Scalar
1495 let isCodeGenOnly = 1 in {
1496 def VMOVDI2SSZrr : AVX512BI<0x6E, MRMSrcReg, (outs FR32X:$dst), (ins GR32:$src),
1497 "vmovd\t{$src, $dst|$dst, $src}",
1498 [(set FR32X:$dst, (bitconvert GR32:$src))],
1499 IIC_SSE_MOVDQ>, EVEX, VEX_LIG;
1501 def VMOVDI2SSZrm : AVX512BI<0x6E, MRMSrcMem, (outs FR32X:$dst), (ins i32mem:$src),
1502 "vmovd\t{$src, $dst|$dst, $src}",
1503 [(set FR32X:$dst, (bitconvert (loadi32 addr:$src)))],
1504 IIC_SSE_MOVDQ>, EVEX, VEX_LIG, EVEX_CD8<32, CD8VT1>;
1507 // Move doubleword from xmm register to r/m32
1509 def VMOVPDI2DIZrr : AVX512BI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128X:$src),
1510 "vmovd\t{$src, $dst|$dst, $src}",
1511 [(set GR32:$dst, (vector_extract (v4i32 VR128X:$src),
1512 (iPTR 0)))], IIC_SSE_MOVD_ToGP>,
1514 def VMOVPDI2DIZmr : AVX512BI<0x7E, MRMDestMem, (outs),
1515 (ins i32mem:$dst, VR128X:$src),
1516 "vmovd\t{$src, $dst|$dst, $src}",
1517 [(store (i32 (vector_extract (v4i32 VR128X:$src),
1518 (iPTR 0))), addr:$dst)], IIC_SSE_MOVDQ>,
1519 EVEX, VEX_LIG, EVEX_CD8<32, CD8VT1>;
1521 // Move quadword from xmm1 register to r/m64
1523 def VMOVPQIto64Zrr : I<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128X:$src),
1524 "vmovq\t{$src, $dst|$dst, $src}",
1525 [(set GR64:$dst, (extractelt (v2i64 VR128X:$src),
1527 IIC_SSE_MOVD_ToGP>, PD, EVEX, VEX_LIG, VEX_W,
1528 Requires<[HasAVX512, In64BitMode]>;
1530 def VMOVPQIto64Zmr : I<0xD6, MRMDestMem, (outs),
1531 (ins i64mem:$dst, VR128X:$src),
1532 "vmovq\t{$src, $dst|$dst, $src}",
1533 [(store (extractelt (v2i64 VR128X:$src), (iPTR 0)),
1534 addr:$dst)], IIC_SSE_MOVDQ>,
1535 EVEX, PD, VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>,
1536 Sched<[WriteStore]>, Requires<[HasAVX512, In64BitMode]>;
1538 // Move Scalar Single to Double Int
1540 let isCodeGenOnly = 1 in {
1541 def VMOVSS2DIZrr : AVX512BI<0x7E, MRMDestReg, (outs GR32:$dst),
1543 "vmovd\t{$src, $dst|$dst, $src}",
1544 [(set GR32:$dst, (bitconvert FR32X:$src))],
1545 IIC_SSE_MOVD_ToGP>, EVEX, VEX_LIG;
1546 def VMOVSS2DIZmr : AVX512BI<0x7E, MRMDestMem, (outs),
1547 (ins i32mem:$dst, FR32X:$src),
1548 "vmovd\t{$src, $dst|$dst, $src}",
1549 [(store (i32 (bitconvert FR32X:$src)), addr:$dst)],
1550 IIC_SSE_MOVDQ>, EVEX, VEX_LIG, EVEX_CD8<32, CD8VT1>;
1553 // Move Quadword Int to Packed Quadword Int
1555 def VMOVQI2PQIZrm : AVX512BI<0x6E, MRMSrcMem, (outs VR128X:$dst),
1557 "vmovq\t{$src, $dst|$dst, $src}",
1559 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>,
1560 EVEX, VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
1562 //===----------------------------------------------------------------------===//
1563 // AVX-512 MOVSS, MOVSD
1564 //===----------------------------------------------------------------------===//
1566 multiclass avx512_move_scalar <string asm, RegisterClass RC,
1567 SDNode OpNode, ValueType vt,
1568 X86MemOperand x86memop, PatFrag mem_pat> {
1569 let hasSideEffects = 0 in {
1570 def rr : SI<0x10, MRMSrcReg, (outs VR128X:$dst), (ins VR128X:$src1, RC:$src2),
1571 !strconcat(asm, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1572 [(set VR128X:$dst, (vt (OpNode VR128X:$src1,
1573 (scalar_to_vector RC:$src2))))],
1574 IIC_SSE_MOV_S_RR>, EVEX_4V, VEX_LIG;
1575 let Constraints = "$src1 = $dst" in
1576 def rrk : SI<0x10, MRMSrcReg, (outs VR128X:$dst),
1577 (ins VR128X:$src1, VK1WM:$mask, RC:$src2, RC:$src3),
1579 " \t{$src3, $src2, $dst {${mask}}|$dst {${mask}}, $src2, $src3}"),
1580 [], IIC_SSE_MOV_S_RR>, EVEX_4V, VEX_LIG, EVEX_K;
1581 def rm : SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
1582 !strconcat(asm, " \t{$src, $dst|$dst, $src}"),
1583 [(set RC:$dst, (mem_pat addr:$src))], IIC_SSE_MOV_S_RM>,
1585 def mr: SI<0x11, MRMDestMem, (outs), (ins x86memop:$dst, RC:$src),
1586 !strconcat(asm, " \t{$src, $dst|$dst, $src}"),
1587 [(store RC:$src, addr:$dst)], IIC_SSE_MOV_S_MR>,
1589 } //hasSideEffects = 0
1592 let ExeDomain = SSEPackedSingle in
1593 defm VMOVSSZ : avx512_move_scalar<"movss", FR32X, X86Movss, v4f32, f32mem,
1594 loadf32>, XS, EVEX_CD8<32, CD8VT1>;
1596 let ExeDomain = SSEPackedDouble in
1597 defm VMOVSDZ : avx512_move_scalar<"movsd", FR64X, X86Movsd, v2f64, f64mem,
1598 loadf64>, XD, VEX_W, EVEX_CD8<64, CD8VT1>;
1600 def : Pat<(f32 (X86select VK1WM:$mask, (f32 FR32X:$src1), (f32 FR32X:$src2))),
1601 (COPY_TO_REGCLASS (VMOVSSZrrk (COPY_TO_REGCLASS FR32X:$src2, VR128X),
1602 VK1WM:$mask, (f32 (IMPLICIT_DEF)), FR32X:$src1), FR32X)>;
1604 def : Pat<(f64 (X86select VK1WM:$mask, (f64 FR64X:$src1), (f64 FR64X:$src2))),
1605 (COPY_TO_REGCLASS (VMOVSDZrrk (COPY_TO_REGCLASS FR64X:$src2, VR128X),
1606 VK1WM:$mask, (f64 (IMPLICIT_DEF)), FR64X:$src1), FR64X)>;
1608 // For the disassembler
1609 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in {
1610 def VMOVSSZrr_REV : SI<0x11, MRMDestReg, (outs VR128X:$dst),
1611 (ins VR128X:$src1, FR32X:$src2),
1612 "movss\t{$src2, $src1, $dst|$dst, $src1, $src2}", [],
1614 XS, EVEX_4V, VEX_LIG;
1615 def VMOVSDZrr_REV : SI<0x11, MRMDestReg, (outs VR128X:$dst),
1616 (ins VR128X:$src1, FR64X:$src2),
1617 "movsd\t{$src2, $src1, $dst|$dst, $src1, $src2}", [],
1619 XD, EVEX_4V, VEX_LIG, VEX_W;
1622 let Predicates = [HasAVX512] in {
1623 let AddedComplexity = 15 in {
1624 // Move scalar to XMM zero-extended, zeroing a VR128X then do a
1625 // MOVS{S,D} to the lower bits.
1626 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32X:$src)))),
1627 (VMOVSSZrr (v4f32 (V_SET0)), FR32X:$src)>;
1628 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128X:$src))),
1629 (VMOVSSZrr (v4f32 (V_SET0)), (COPY_TO_REGCLASS VR128X:$src, FR32X))>;
1630 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128X:$src))),
1631 (VMOVSSZrr (v4i32 (V_SET0)), (COPY_TO_REGCLASS VR128X:$src, FR32X))>;
1632 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64X:$src)))),
1633 (VMOVSDZrr (v2f64 (V_SET0)), FR64X:$src)>;
1635 // Move low f32 and clear high bits.
1636 def : Pat<(v8f32 (X86vzmovl (v8f32 VR256X:$src))),
1637 (SUBREG_TO_REG (i32 0),
1638 (VMOVSSZrr (v4f32 (V_SET0)),
1639 (EXTRACT_SUBREG (v8f32 VR256X:$src), sub_xmm)), sub_xmm)>;
1640 def : Pat<(v8i32 (X86vzmovl (v8i32 VR256X:$src))),
1641 (SUBREG_TO_REG (i32 0),
1642 (VMOVSSZrr (v4i32 (V_SET0)),
1643 (EXTRACT_SUBREG (v8i32 VR256X:$src), sub_xmm)), sub_xmm)>;
1646 let AddedComplexity = 20 in {
1647 // MOVSSrm zeros the high parts of the register; represent this
1648 // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
1649 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
1650 (COPY_TO_REGCLASS (VMOVSSZrm addr:$src), VR128X)>;
1651 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
1652 (COPY_TO_REGCLASS (VMOVSSZrm addr:$src), VR128X)>;
1653 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
1654 (COPY_TO_REGCLASS (VMOVSSZrm addr:$src), VR128X)>;
1656 // MOVSDrm zeros the high parts of the register; represent this
1657 // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
1658 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
1659 (COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>;
1660 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
1661 (COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>;
1662 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
1663 (COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>;
1664 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
1665 (COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>;
1666 def : Pat<(v2f64 (X86vzload addr:$src)),
1667 (COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>;
1669 // Represent the same patterns above but in the form they appear for
1671 def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
1672 (v4i32 (scalar_to_vector (loadi32 addr:$src))), (iPTR 0)))),
1673 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrm addr:$src), sub_xmm)>;
1674 def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
1675 (v4f32 (scalar_to_vector (loadf32 addr:$src))), (iPTR 0)))),
1676 (SUBREG_TO_REG (i32 0), (VMOVSSZrm addr:$src), sub_xmm)>;
1677 def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,
1678 (v2f64 (scalar_to_vector (loadf64 addr:$src))), (iPTR 0)))),
1679 (SUBREG_TO_REG (i32 0), (VMOVSDZrm addr:$src), sub_xmm)>;
1681 def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
1682 (v4f32 (scalar_to_vector FR32X:$src)), (iPTR 0)))),
1683 (SUBREG_TO_REG (i32 0), (v4f32 (VMOVSSZrr (v4f32 (V_SET0)),
1684 FR32X:$src)), sub_xmm)>;
1685 def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,
1686 (v2f64 (scalar_to_vector FR64X:$src)), (iPTR 0)))),
1687 (SUBREG_TO_REG (i64 0), (v2f64 (VMOVSDZrr (v2f64 (V_SET0)),
1688 FR64X:$src)), sub_xmm)>;
1689 def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
1690 (v2i64 (scalar_to_vector (loadi64 addr:$src))), (iPTR 0)))),
1691 (SUBREG_TO_REG (i64 0), (VMOVQI2PQIZrm addr:$src), sub_xmm)>;
1693 // Move low f64 and clear high bits.
1694 def : Pat<(v4f64 (X86vzmovl (v4f64 VR256X:$src))),
1695 (SUBREG_TO_REG (i32 0),
1696 (VMOVSDZrr (v2f64 (V_SET0)),
1697 (EXTRACT_SUBREG (v4f64 VR256X:$src), sub_xmm)), sub_xmm)>;
1699 def : Pat<(v4i64 (X86vzmovl (v4i64 VR256X:$src))),
1700 (SUBREG_TO_REG (i32 0), (VMOVSDZrr (v2i64 (V_SET0)),
1701 (EXTRACT_SUBREG (v4i64 VR256X:$src), sub_xmm)), sub_xmm)>;
1703 // Extract and store.
1704 def : Pat<(store (f32 (vector_extract (v4f32 VR128X:$src), (iPTR 0))),
1706 (VMOVSSZmr addr:$dst, (COPY_TO_REGCLASS (v4f32 VR128X:$src), FR32X))>;
1707 def : Pat<(store (f64 (vector_extract (v2f64 VR128X:$src), (iPTR 0))),
1709 (VMOVSDZmr addr:$dst, (COPY_TO_REGCLASS (v2f64 VR128X:$src), FR64X))>;
1711 // Shuffle with VMOVSS
1712 def : Pat<(v4i32 (X86Movss VR128X:$src1, VR128X:$src2)),
1713 (VMOVSSZrr (v4i32 VR128X:$src1),
1714 (COPY_TO_REGCLASS (v4i32 VR128X:$src2), FR32X))>;
1715 def : Pat<(v4f32 (X86Movss VR128X:$src1, VR128X:$src2)),
1716 (VMOVSSZrr (v4f32 VR128X:$src1),
1717 (COPY_TO_REGCLASS (v4f32 VR128X:$src2), FR32X))>;
1720 def : Pat<(v8i32 (X86Movss VR256X:$src1, VR256X:$src2)),
1721 (SUBREG_TO_REG (i32 0),
1722 (VMOVSSZrr (EXTRACT_SUBREG (v8i32 VR256X:$src1), sub_xmm),
1723 (EXTRACT_SUBREG (v8i32 VR256X:$src2), sub_xmm)),
1725 def : Pat<(v8f32 (X86Movss VR256X:$src1, VR256X:$src2)),
1726 (SUBREG_TO_REG (i32 0),
1727 (VMOVSSZrr (EXTRACT_SUBREG (v8f32 VR256X:$src1), sub_xmm),
1728 (EXTRACT_SUBREG (v8f32 VR256X:$src2), sub_xmm)),
1731 // Shuffle with VMOVSD
1732 def : Pat<(v2i64 (X86Movsd VR128X:$src1, VR128X:$src2)),
1733 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
1734 def : Pat<(v2f64 (X86Movsd VR128X:$src1, VR128X:$src2)),
1735 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
1736 def : Pat<(v4f32 (X86Movsd VR128X:$src1, VR128X:$src2)),
1737 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
1738 def : Pat<(v4i32 (X86Movsd VR128X:$src1, VR128X:$src2)),
1739 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
1742 def : Pat<(v4i64 (X86Movsd VR256X:$src1, VR256X:$src2)),
1743 (SUBREG_TO_REG (i32 0),
1744 (VMOVSDZrr (EXTRACT_SUBREG (v4i64 VR256X:$src1), sub_xmm),
1745 (EXTRACT_SUBREG (v4i64 VR256X:$src2), sub_xmm)),
1747 def : Pat<(v4f64 (X86Movsd VR256X:$src1, VR256X:$src2)),
1748 (SUBREG_TO_REG (i32 0),
1749 (VMOVSDZrr (EXTRACT_SUBREG (v4f64 VR256X:$src1), sub_xmm),
1750 (EXTRACT_SUBREG (v4f64 VR256X:$src2), sub_xmm)),
1753 def : Pat<(v2f64 (X86Movlpd VR128X:$src1, VR128X:$src2)),
1754 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
1755 def : Pat<(v2i64 (X86Movlpd VR128X:$src1, VR128X:$src2)),
1756 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
1757 def : Pat<(v4f32 (X86Movlps VR128X:$src1, VR128X:$src2)),
1758 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
1759 def : Pat<(v4i32 (X86Movlps VR128X:$src1, VR128X:$src2)),
1760 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
1763 let AddedComplexity = 15 in
1764 def VMOVZPQILo2PQIZrr : AVX512XSI<0x7E, MRMSrcReg, (outs VR128X:$dst),
1766 "vmovq\t{$src, $dst|$dst, $src}",
1767 [(set VR128X:$dst, (v2i64 (X86vzmovl
1768 (v2i64 VR128X:$src))))],
1769 IIC_SSE_MOVQ_RR>, EVEX, VEX_W;
1771 let AddedComplexity = 20 in
1772 def VMOVZPQILo2PQIZrm : AVX512XSI<0x7E, MRMSrcMem, (outs VR128X:$dst),
1774 "vmovq\t{$src, $dst|$dst, $src}",
1775 [(set VR128X:$dst, (v2i64 (X86vzmovl
1776 (loadv2i64 addr:$src))))],
1777 IIC_SSE_MOVDQ>, EVEX, VEX_W,
1778 EVEX_CD8<8, CD8VT8>;
1780 let Predicates = [HasAVX512] in {
1781 // AVX 128-bit movd/movq instruction write zeros in the high 128-bit part.
1782 let AddedComplexity = 20 in {
1783 def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector (loadi32 addr:$src))))),
1784 (VMOVDI2PDIZrm addr:$src)>;
1785 def : Pat<(v2i64 (X86vzmovl (v2i64 (scalar_to_vector GR64:$src)))),
1786 (VMOV64toPQIZrr GR64:$src)>;
1787 def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector GR32:$src)))),
1788 (VMOVDI2PDIZrr GR32:$src)>;
1790 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
1791 (VMOVDI2PDIZrm addr:$src)>;
1792 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
1793 (VMOVDI2PDIZrm addr:$src)>;
1794 def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
1795 (VMOVZPQILo2PQIZrm addr:$src)>;
1796 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128X:$src))),
1797 (VMOVZPQILo2PQIZrr VR128X:$src)>;
1798 def : Pat<(v2i64 (X86vzload addr:$src)),
1799 (VMOVZPQILo2PQIZrm addr:$src)>;
1802 // Use regular 128-bit instructions to match 256-bit scalar_to_vec+zext.
1803 def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
1804 (v4i32 (scalar_to_vector GR32:$src)),(iPTR 0)))),
1805 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrr GR32:$src), sub_xmm)>;
1806 def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
1807 (v2i64 (scalar_to_vector GR64:$src)),(iPTR 0)))),
1808 (SUBREG_TO_REG (i64 0), (VMOV64toPQIZrr GR64:$src), sub_xmm)>;
1811 def : Pat<(v16i32 (X86Vinsert (v16i32 immAllZerosV), GR32:$src2, (iPTR 0))),
1812 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrr GR32:$src2), sub_xmm)>;
1814 def : Pat<(v8i64 (X86Vinsert (bc_v8i64 (v16i32 immAllZerosV)), GR64:$src2, (iPTR 0))),
1815 (SUBREG_TO_REG (i32 0), (VMOV64toPQIZrr GR64:$src2), sub_xmm)>;
1817 def : Pat<(v16i32 (X86Vinsert undef, GR32:$src2, (iPTR 0))),
1818 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrr GR32:$src2), sub_xmm)>;
1820 def : Pat<(v8i64 (X86Vinsert undef, GR64:$src2, (iPTR 0))),
1821 (SUBREG_TO_REG (i32 0), (VMOV64toPQIZrr GR64:$src2), sub_xmm)>;
1823 //===----------------------------------------------------------------------===//
1824 // AVX-512 - Non-temporals
1825 //===----------------------------------------------------------------------===//
1827 def VMOVNTDQAZrm : AVX5128I<0x2A, MRMSrcMem, (outs VR512:$dst),
1829 "vmovntdqa\t{$src, $dst|$dst, $src}",
1831 (int_x86_avx512_movntdqa addr:$src))]>,
1832 EVEX, EVEX_V512, EVEX_CD8<64, CD8VF>;
1834 // Prefer non-temporal over temporal versions
1835 let AddedComplexity = 400, SchedRW = [WriteStore] in {
1837 def VMOVNTPSZmr : AVX512PSI<0x2B, MRMDestMem, (outs),
1838 (ins f512mem:$dst, VR512:$src),
1839 "vmovntps\t{$src, $dst|$dst, $src}",
1840 [(alignednontemporalstore (v16f32 VR512:$src),
1843 EVEX, EVEX_V512, EVEX_CD8<32, CD8VF>;
1845 def VMOVNTPDZmr : AVX512PDI<0x2B, MRMDestMem, (outs),
1846 (ins f512mem:$dst, VR512:$src),
1847 "vmovntpd\t{$src, $dst|$dst, $src}",
1848 [(alignednontemporalstore (v8f64 VR512:$src),
1851 EVEX, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
1854 def VMOVNTDQZmr : AVX512BI<0xE7, MRMDestMem, (outs),
1855 (ins i512mem:$dst, VR512:$src),
1856 "vmovntdq\t{$src, $dst|$dst, $src}",
1857 [(alignednontemporalstore (v8i64 VR512:$src),
1860 EVEX, EVEX_V512, EVEX_CD8<64, CD8VF>;
1863 //===----------------------------------------------------------------------===//
1864 // AVX-512 - Integer arithmetic
1866 multiclass avx512_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
1867 ValueType OpVT, RegisterClass KRC,
1868 RegisterClass RC, PatFrag memop_frag,
1869 X86MemOperand x86memop, PatFrag scalar_mfrag,
1870 X86MemOperand x86scalar_mop, string BrdcstStr,
1871 OpndItins itins, bit IsCommutable = 0> {
1872 let isCommutable = IsCommutable in
1873 def rr : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
1874 (ins RC:$src1, RC:$src2),
1875 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1876 [(set RC:$dst, (OpVT (OpNode (OpVT RC:$src1), (OpVT RC:$src2))))],
1878 let AddedComplexity = 30 in {
1879 let Constraints = "$src0 = $dst" in
1880 def rrk : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
1881 (ins RC:$src0, KRC:$mask, RC:$src1, RC:$src2),
1882 !strconcat(OpcodeStr,
1883 " \t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
1884 [(set RC:$dst, (OpVT (vselect KRC:$mask,
1885 (OpNode (OpVT RC:$src1), (OpVT RC:$src2)),
1887 itins.rr>, EVEX_4V, EVEX_K;
1888 def rrkz : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
1889 (ins KRC:$mask, RC:$src1, RC:$src2),
1890 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst {${mask}} {z}" ,
1891 "|$dst {${mask}} {z}, $src1, $src2}"),
1892 [(set RC:$dst, (OpVT (vselect KRC:$mask,
1893 (OpNode (OpVT RC:$src1), (OpVT RC:$src2)),
1894 (OpVT immAllZerosV))))],
1895 itins.rr>, EVEX_4V, EVEX_KZ;
1898 let mayLoad = 1 in {
1899 def rm : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
1900 (ins RC:$src1, x86memop:$src2),
1901 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1902 [(set RC:$dst, (OpVT (OpNode (OpVT RC:$src1), (memop_frag addr:$src2))))],
1904 let AddedComplexity = 30 in {
1905 let Constraints = "$src0 = $dst" in
1906 def rmk : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
1907 (ins RC:$src0, KRC:$mask, RC:$src1, x86memop:$src2),
1908 !strconcat(OpcodeStr,
1909 " \t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
1910 [(set RC:$dst, (OpVT (vselect KRC:$mask,
1911 (OpNode (OpVT RC:$src1), (memop_frag addr:$src2)),
1913 itins.rm>, EVEX_4V, EVEX_K;
1914 def rmkz : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
1915 (ins KRC:$mask, RC:$src1, x86memop:$src2),
1916 !strconcat(OpcodeStr,
1917 " \t{$src2, $src1, $dst {${mask}} {z}|$dst {${mask}} {z}, $src1, $src2}"),
1918 [(set RC:$dst, (OpVT (vselect KRC:$mask,
1919 (OpNode (OpVT RC:$src1), (memop_frag addr:$src2)),
1920 (OpVT immAllZerosV))))],
1921 itins.rm>, EVEX_4V, EVEX_KZ;
1923 def rmb : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
1924 (ins RC:$src1, x86scalar_mop:$src2),
1925 !strconcat(OpcodeStr, " \t{${src2}", BrdcstStr,
1926 ", $src1, $dst|$dst, $src1, ${src2}", BrdcstStr, "}"),
1927 [(set RC:$dst, (OpNode RC:$src1,
1928 (OpVT (X86VBroadcast (scalar_mfrag addr:$src2)))))],
1929 itins.rm>, EVEX_4V, EVEX_B;
1930 let AddedComplexity = 30 in {
1931 let Constraints = "$src0 = $dst" in
1932 def rmbk : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
1933 (ins RC:$src0, KRC:$mask, RC:$src1, x86scalar_mop:$src2),
1934 !strconcat(OpcodeStr, " \t{${src2}", BrdcstStr,
1935 ", $src1, $dst {${mask}}|$dst {${mask}}, $src1, ${src2}",
1937 [(set RC:$dst, (OpVT (vselect KRC:$mask,
1938 (OpNode (OpVT RC:$src1),
1939 (OpVT (X86VBroadcast (scalar_mfrag addr:$src2)))),
1941 itins.rm>, EVEX_4V, EVEX_B, EVEX_K;
1942 def rmbkz : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
1943 (ins KRC:$mask, RC:$src1, x86scalar_mop:$src2),
1944 !strconcat(OpcodeStr, " \t{${src2}", BrdcstStr,
1945 ", $src1, $dst {${mask}} {z}|$dst {${mask}} {z}, $src1, ${src2}",
1947 [(set RC:$dst, (OpVT (vselect KRC:$mask,
1948 (OpNode (OpVT RC:$src1),
1949 (OpVT (X86VBroadcast (scalar_mfrag addr:$src2)))),
1950 (OpVT immAllZerosV))))],
1951 itins.rm>, EVEX_4V, EVEX_B, EVEX_KZ;
1956 multiclass avx512_binop_rm2<bits<8> opc, string OpcodeStr, ValueType DstVT,
1957 ValueType SrcVT, RegisterClass KRC, RegisterClass RC,
1958 PatFrag memop_frag, X86MemOperand x86memop,
1959 PatFrag scalar_mfrag, X86MemOperand x86scalar_mop,
1960 string BrdcstStr, OpndItins itins, bit IsCommutable = 0> {
1961 let isCommutable = IsCommutable in
1963 def rr : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
1964 (ins RC:$src1, RC:$src2),
1965 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1967 def rrk : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
1968 (ins KRC:$mask, RC:$src1, RC:$src2),
1969 !strconcat(OpcodeStr,
1970 " \t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
1971 [], itins.rr>, EVEX_4V, EVEX_K;
1972 def rrkz : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
1973 (ins KRC:$mask, RC:$src1, RC:$src2),
1974 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst {${mask}} {z}" ,
1975 "|$dst {${mask}} {z}, $src1, $src2}"),
1976 [], itins.rr>, EVEX_4V, EVEX_KZ;
1978 let mayLoad = 1 in {
1979 def rm : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
1980 (ins RC:$src1, x86memop:$src2),
1981 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1983 def rmk : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
1984 (ins KRC:$mask, RC:$src1, x86memop:$src2),
1985 !strconcat(OpcodeStr,
1986 " \t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
1987 [], itins.rm>, EVEX_4V, EVEX_K;
1988 def rmkz : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
1989 (ins KRC:$mask, RC:$src1, x86memop:$src2),
1990 !strconcat(OpcodeStr,
1991 " \t{$src2, $src1, $dst {${mask}} {z}|$dst {${mask}} {z}, $src1, $src2}"),
1992 [], itins.rm>, EVEX_4V, EVEX_KZ;
1993 def rmb : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
1994 (ins RC:$src1, x86scalar_mop:$src2),
1995 !strconcat(OpcodeStr, " \t{${src2}", BrdcstStr,
1996 ", $src1, $dst|$dst, $src1, ${src2}", BrdcstStr, "}"),
1997 [], itins.rm>, EVEX_4V, EVEX_B;
1998 def rmbk : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
1999 (ins KRC:$mask, RC:$src1, x86scalar_mop:$src2),
2000 !strconcat(OpcodeStr, " \t{${src2}", BrdcstStr,
2001 ", $src1, $dst {${mask}}|$dst {${mask}}, $src1, ${src2}",
2003 [], itins.rm>, EVEX_4V, EVEX_B, EVEX_K;
2004 def rmbkz : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
2005 (ins KRC:$mask, RC:$src1, x86scalar_mop:$src2),
2006 !strconcat(OpcodeStr, " \t{${src2}", BrdcstStr,
2007 ", $src1, $dst {${mask}} {z}|$dst {${mask}} {z}, $src1, ${src2}",
2009 [], itins.rm>, EVEX_4V, EVEX_B, EVEX_KZ;
2013 defm VPADDDZ : avx512_binop_rm<0xFE, "vpaddd", add, v16i32, VK16WM, VR512,
2014 memopv16i32, i512mem, loadi32, i32mem, "{1to16}",
2015 SSE_INTALU_ITINS_P, 1>, EVEX_V512, EVEX_CD8<32, CD8VF>;
2017 defm VPSUBDZ : avx512_binop_rm<0xFA, "vpsubd", sub, v16i32, VK16WM, VR512,
2018 memopv16i32, i512mem, loadi32, i32mem, "{1to16}",
2019 SSE_INTALU_ITINS_P, 0>, EVEX_V512, EVEX_CD8<32, CD8VF>;
2021 defm VPMULLDZ : avx512_binop_rm<0x40, "vpmulld", mul, v16i32, VK16WM, VR512,
2022 memopv16i32, i512mem, loadi32, i32mem, "{1to16}",
2023 SSE_INTALU_ITINS_P, 1>, T8PD, EVEX_V512, EVEX_CD8<32, CD8VF>;
2025 defm VPADDQZ : avx512_binop_rm<0xD4, "vpaddq", add, v8i64, VK8WM, VR512,
2026 memopv8i64, i512mem, loadi64, i64mem, "{1to8}",
2027 SSE_INTALU_ITINS_P, 1>, EVEX_CD8<64, CD8VF>, EVEX_V512, VEX_W;
2029 defm VPSUBQZ : avx512_binop_rm<0xFB, "vpsubq", sub, v8i64, VK8WM, VR512,
2030 memopv8i64, i512mem, loadi64, i64mem, "{1to8}",
2031 SSE_INTALU_ITINS_P, 0>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
2033 defm VPMULDQZ : avx512_binop_rm2<0x28, "vpmuldq", v8i64, v16i32, VK8WM, VR512,
2034 memopv8i64, i512mem, loadi64, i64mem, "{1to8}",
2035 SSE_INTALU_ITINS_P, 1>, T8PD, EVEX_V512,
2036 EVEX_CD8<64, CD8VF>, VEX_W;
2038 defm VPMULUDQZ : avx512_binop_rm2<0xF4, "vpmuludq", v8i64, v16i32, VK8WM, VR512,
2039 memopv8i64, i512mem, loadi64, i64mem, "{1to8}",
2040 SSE_INTMUL_ITINS_P, 1>, EVEX_V512, EVEX_CD8<64, CD8VF>, VEX_W;
2042 def : Pat<(v8i64 (X86pmuludq (v16i32 VR512:$src1), (v16i32 VR512:$src2))),
2043 (VPMULUDQZrr VR512:$src1, VR512:$src2)>;
2045 def : Pat<(v8i64 (int_x86_avx512_mask_pmulu_dq_512 (v16i32 VR512:$src1),
2046 (v16i32 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
2047 (VPMULUDQZrr VR512:$src1, VR512:$src2)>;
2048 def : Pat<(v8i64 (int_x86_avx512_mask_pmul_dq_512 (v16i32 VR512:$src1),
2049 (v16i32 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
2050 (VPMULDQZrr VR512:$src1, VR512:$src2)>;
2052 defm VPMAXUDZ : avx512_binop_rm<0x3F, "vpmaxud", X86umax, v16i32, VK16WM, VR512,
2053 memopv16i32, i512mem, loadi32, i32mem, "{1to16}",
2054 SSE_INTALU_ITINS_P, 1>,
2055 T8PD, EVEX_V512, EVEX_CD8<32, CD8VF>;
2056 defm VPMAXUQZ : avx512_binop_rm<0x3F, "vpmaxuq", X86umax, v8i64, VK8WM, VR512,
2057 memopv8i64, i512mem, loadi64, i64mem, "{1to8}",
2058 SSE_INTALU_ITINS_P, 0>,
2059 T8PD, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
2061 defm VPMAXSDZ : avx512_binop_rm<0x3D, "vpmaxsd", X86smax, v16i32, VK16WM, VR512,
2062 memopv16i32, i512mem, loadi32, i32mem, "{1to16}",
2063 SSE_INTALU_ITINS_P, 1>,
2064 T8PD, EVEX_V512, EVEX_CD8<32, CD8VF>;
2065 defm VPMAXSQZ : avx512_binop_rm<0x3D, "vpmaxsq", X86smax, v8i64, VK8WM, VR512,
2066 memopv8i64, i512mem, loadi64, i64mem, "{1to8}",
2067 SSE_INTALU_ITINS_P, 0>,
2068 T8PD, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
2070 defm VPMINUDZ : avx512_binop_rm<0x3B, "vpminud", X86umin, v16i32, VK16WM, VR512,
2071 memopv16i32, i512mem, loadi32, i32mem, "{1to16}",
2072 SSE_INTALU_ITINS_P, 1>,
2073 T8PD, EVEX_V512, EVEX_CD8<32, CD8VF>;
2074 defm VPMINUQZ : avx512_binop_rm<0x3B, "vpminuq", X86umin, v8i64, VK8WM, VR512,
2075 memopv8i64, i512mem, loadi64, i64mem, "{1to8}",
2076 SSE_INTALU_ITINS_P, 0>,
2077 T8PD, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
2079 defm VPMINSDZ : avx512_binop_rm<0x39, "vpminsd", X86smin, v16i32, VK16WM, VR512,
2080 memopv16i32, i512mem, loadi32, i32mem, "{1to16}",
2081 SSE_INTALU_ITINS_P, 1>,
2082 T8PD, EVEX_V512, EVEX_CD8<32, CD8VF>;
2083 defm VPMINSQZ : avx512_binop_rm<0x39, "vpminsq", X86smin, v8i64, VK8WM, VR512,
2084 memopv8i64, i512mem, loadi64, i64mem, "{1to8}",
2085 SSE_INTALU_ITINS_P, 0>,
2086 T8PD, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
2088 def : Pat <(v16i32 (int_x86_avx512_mask_pmaxs_d_512 (v16i32 VR512:$src1),
2089 (v16i32 VR512:$src2), (v16i32 immAllZerosV), (i16 -1))),
2090 (VPMAXSDZrr VR512:$src1, VR512:$src2)>;
2091 def : Pat <(v16i32 (int_x86_avx512_mask_pmaxu_d_512 (v16i32 VR512:$src1),
2092 (v16i32 VR512:$src2), (v16i32 immAllZerosV), (i16 -1))),
2093 (VPMAXUDZrr VR512:$src1, VR512:$src2)>;
2094 def : Pat <(v8i64 (int_x86_avx512_mask_pmaxs_q_512 (v8i64 VR512:$src1),
2095 (v8i64 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
2096 (VPMAXSQZrr VR512:$src1, VR512:$src2)>;
2097 def : Pat <(v8i64 (int_x86_avx512_mask_pmaxu_q_512 (v8i64 VR512:$src1),
2098 (v8i64 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
2099 (VPMAXUQZrr VR512:$src1, VR512:$src2)>;
2100 def : Pat <(v16i32 (int_x86_avx512_mask_pmins_d_512 (v16i32 VR512:$src1),
2101 (v16i32 VR512:$src2), (v16i32 immAllZerosV), (i16 -1))),
2102 (VPMINSDZrr VR512:$src1, VR512:$src2)>;
2103 def : Pat <(v16i32 (int_x86_avx512_mask_pminu_d_512 (v16i32 VR512:$src1),
2104 (v16i32 VR512:$src2), (v16i32 immAllZerosV), (i16 -1))),
2105 (VPMINUDZrr VR512:$src1, VR512:$src2)>;
2106 def : Pat <(v8i64 (int_x86_avx512_mask_pmins_q_512 (v8i64 VR512:$src1),
2107 (v8i64 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
2108 (VPMINSQZrr VR512:$src1, VR512:$src2)>;
2109 def : Pat <(v8i64 (int_x86_avx512_mask_pminu_q_512 (v8i64 VR512:$src1),
2110 (v8i64 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
2111 (VPMINUQZrr VR512:$src1, VR512:$src2)>;
2112 //===----------------------------------------------------------------------===//
2113 // AVX-512 - Unpack Instructions
2114 //===----------------------------------------------------------------------===//
2116 multiclass avx512_unpack_fp<bits<8> opc, SDNode OpNode, ValueType vt,
2117 PatFrag mem_frag, RegisterClass RC,
2118 X86MemOperand x86memop, string asm,
2120 def rr : AVX512PI<opc, MRMSrcReg,
2121 (outs RC:$dst), (ins RC:$src1, RC:$src2),
2123 (vt (OpNode RC:$src1, RC:$src2)))],
2125 def rm : AVX512PI<opc, MRMSrcMem,
2126 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
2128 (vt (OpNode RC:$src1,
2129 (bitconvert (mem_frag addr:$src2)))))],
2133 defm VUNPCKHPSZ: avx512_unpack_fp<0x15, X86Unpckh, v16f32, memopv8f64,
2134 VR512, f512mem, "vunpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2135 SSEPackedSingle>, PS, EVEX_V512, EVEX_CD8<32, CD8VF>;
2136 defm VUNPCKHPDZ: avx512_unpack_fp<0x15, X86Unpckh, v8f64, memopv8f64,
2137 VR512, f512mem, "vunpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2138 SSEPackedDouble>, PD, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
2139 defm VUNPCKLPSZ: avx512_unpack_fp<0x14, X86Unpckl, v16f32, memopv8f64,
2140 VR512, f512mem, "vunpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2141 SSEPackedSingle>, PS, EVEX_V512, EVEX_CD8<32, CD8VF>;
2142 defm VUNPCKLPDZ: avx512_unpack_fp<0x14, X86Unpckl, v8f64, memopv8f64,
2143 VR512, f512mem, "vunpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2144 SSEPackedDouble>, PD, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
2146 multiclass avx512_unpack_int<bits<8> opc, string OpcodeStr, SDNode OpNode,
2147 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
2148 X86MemOperand x86memop> {
2149 def rr : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
2150 (ins RC:$src1, RC:$src2),
2151 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2152 [(set RC:$dst, (OpVT (OpNode (OpVT RC:$src1), (OpVT RC:$src2))))],
2153 IIC_SSE_UNPCK>, EVEX_4V;
2154 def rm : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
2155 (ins RC:$src1, x86memop:$src2),
2156 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2157 [(set RC:$dst, (OpVT (OpNode (OpVT RC:$src1),
2158 (bitconvert (memop_frag addr:$src2)))))],
2159 IIC_SSE_UNPCK>, EVEX_4V;
2161 defm VPUNPCKLDQZ : avx512_unpack_int<0x62, "vpunpckldq", X86Unpckl, v16i32,
2162 VR512, memopv16i32, i512mem>, EVEX_V512,
2163 EVEX_CD8<32, CD8VF>;
2164 defm VPUNPCKLQDQZ : avx512_unpack_int<0x6C, "vpunpcklqdq", X86Unpckl, v8i64,
2165 VR512, memopv8i64, i512mem>, EVEX_V512,
2166 VEX_W, EVEX_CD8<64, CD8VF>;
2167 defm VPUNPCKHDQZ : avx512_unpack_int<0x6A, "vpunpckhdq", X86Unpckh, v16i32,
2168 VR512, memopv16i32, i512mem>, EVEX_V512,
2169 EVEX_CD8<32, CD8VF>;
2170 defm VPUNPCKHQDQZ : avx512_unpack_int<0x6D, "vpunpckhqdq", X86Unpckh, v8i64,
2171 VR512, memopv8i64, i512mem>, EVEX_V512,
2172 VEX_W, EVEX_CD8<64, CD8VF>;
2173 //===----------------------------------------------------------------------===//
2177 multiclass avx512_pshuf_imm<bits<8> opc, string OpcodeStr, RegisterClass RC,
2178 SDNode OpNode, PatFrag mem_frag,
2179 X86MemOperand x86memop, ValueType OpVT> {
2180 def ri : AVX512Ii8<opc, MRMSrcReg, (outs RC:$dst),
2181 (ins RC:$src1, i8imm:$src2),
2182 !strconcat(OpcodeStr,
2183 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2185 (OpVT (OpNode RC:$src1, (i8 imm:$src2))))]>,
2187 def mi : AVX512Ii8<opc, MRMSrcMem, (outs RC:$dst),
2188 (ins x86memop:$src1, i8imm:$src2),
2189 !strconcat(OpcodeStr,
2190 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2192 (OpVT (OpNode (mem_frag addr:$src1),
2193 (i8 imm:$src2))))]>, EVEX;
2196 defm VPSHUFDZ : avx512_pshuf_imm<0x70, "vpshufd", VR512, X86PShufd, memopv16i32,
2197 i512mem, v16i32>, PD, EVEX_V512, EVEX_CD8<32, CD8VF>;
2199 let ExeDomain = SSEPackedSingle in
2200 defm VPERMILPSZ : avx512_pshuf_imm<0x04, "vpermilps", VR512, X86VPermilp,
2201 memopv16f32, i512mem, v16f32>, TAPD, EVEX_V512,
2202 EVEX_CD8<32, CD8VF>;
2203 let ExeDomain = SSEPackedDouble in
2204 defm VPERMILPDZ : avx512_pshuf_imm<0x05, "vpermilpd", VR512, X86VPermilp,
2205 memopv8f64, i512mem, v8f64>, TAPD, EVEX_V512,
2206 VEX_W, EVEX_CD8<32, CD8VF>;
2208 def : Pat<(v16i32 (X86VPermilp VR512:$src1, (i8 imm:$imm))),
2209 (VPERMILPSZri VR512:$src1, imm:$imm)>;
2210 def : Pat<(v8i64 (X86VPermilp VR512:$src1, (i8 imm:$imm))),
2211 (VPERMILPDZri VR512:$src1, imm:$imm)>;
2213 //===----------------------------------------------------------------------===//
2214 // AVX-512 Logical Instructions
2215 //===----------------------------------------------------------------------===//
2217 defm VPANDDZ : avx512_binop_rm<0xDB, "vpandd", and, v16i32, VK16WM, VR512, memopv16i32,
2218 i512mem, loadi32, i32mem, "{1to16}", SSE_BIT_ITINS_P, 1>,
2219 EVEX_V512, EVEX_CD8<32, CD8VF>;
2220 defm VPANDQZ : avx512_binop_rm<0xDB, "vpandq", and, v8i64, VK8WM, VR512, memopv8i64,
2221 i512mem, loadi64, i64mem, "{1to8}", SSE_BIT_ITINS_P, 1>,
2222 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
2223 defm VPORDZ : avx512_binop_rm<0xEB, "vpord", or, v16i32, VK16WM, VR512, memopv16i32,
2224 i512mem, loadi32, i32mem, "{1to16}", SSE_BIT_ITINS_P, 1>,
2225 EVEX_V512, EVEX_CD8<32, CD8VF>;
2226 defm VPORQZ : avx512_binop_rm<0xEB, "vporq", or, v8i64, VK8WM, VR512, memopv8i64,
2227 i512mem, loadi64, i64mem, "{1to8}", SSE_BIT_ITINS_P, 1>,
2228 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
2229 defm VPXORDZ : avx512_binop_rm<0xEF, "vpxord", xor, v16i32, VK16WM, VR512, memopv16i32,
2230 i512mem, loadi32, i32mem, "{1to16}", SSE_BIT_ITINS_P, 1>,
2231 EVEX_V512, EVEX_CD8<32, CD8VF>;
2232 defm VPXORQZ : avx512_binop_rm<0xEF, "vpxorq", xor, v8i64, VK8WM, VR512, memopv8i64,
2233 i512mem, loadi64, i64mem, "{1to8}", SSE_BIT_ITINS_P, 1>,
2234 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
2235 defm VPANDNDZ : avx512_binop_rm<0xDF, "vpandnd", X86andnp, v16i32, VK16WM, VR512,
2236 memopv16i32, i512mem, loadi32, i32mem, "{1to16}",
2237 SSE_BIT_ITINS_P, 0>, EVEX_V512, EVEX_CD8<32, CD8VF>;
2238 defm VPANDNQZ : avx512_binop_rm<0xDF, "vpandnq", X86andnp, v8i64, VK8WM, VR512,
2239 memopv8i64, i512mem, loadi64, i64mem, "{1to8}",
2240 SSE_BIT_ITINS_P, 0>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
2242 //===----------------------------------------------------------------------===//
2243 // AVX-512 FP arithmetic
2244 //===----------------------------------------------------------------------===//
2246 multiclass avx512_binop_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
2248 defm SSZ : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"), OpNode, FR32X,
2249 f32mem, itins.s, 0>, XS, EVEX_4V, VEX_LIG,
2250 EVEX_CD8<32, CD8VT1>;
2251 defm SDZ : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd"), OpNode, FR64X,
2252 f64mem, itins.d, 0>, XD, VEX_W, EVEX_4V, VEX_LIG,
2253 EVEX_CD8<64, CD8VT1>;
2256 let isCommutable = 1 in {
2257 defm VADD : avx512_binop_s<0x58, "add", fadd, SSE_ALU_ITINS_S>;
2258 defm VMUL : avx512_binop_s<0x59, "mul", fmul, SSE_ALU_ITINS_S>;
2259 defm VMIN : avx512_binop_s<0x5D, "min", X86fmin, SSE_ALU_ITINS_S>;
2260 defm VMAX : avx512_binop_s<0x5F, "max", X86fmax, SSE_ALU_ITINS_S>;
2262 let isCommutable = 0 in {
2263 defm VSUB : avx512_binop_s<0x5C, "sub", fsub, SSE_ALU_ITINS_S>;
2264 defm VDIV : avx512_binop_s<0x5E, "div", fdiv, SSE_ALU_ITINS_S>;
2267 multiclass avx512_fp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
2269 RegisterClass RC, ValueType vt,
2270 X86MemOperand x86memop, PatFrag mem_frag,
2271 X86MemOperand x86scalar_mop, PatFrag scalar_mfrag,
2273 Domain d, OpndItins itins, bit commutable> {
2274 let isCommutable = commutable in {
2275 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
2276 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2277 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], itins.rr, d>,
2280 def rrk: PI<opc, MRMSrcReg, (outs RC:$dst), (ins KRC:$mask, RC:$src1, RC:$src2),
2281 !strconcat(OpcodeStr,
2282 " \t{$src2, $src1, $dst {${mask}} |$dst {${mask}}, $src1, $src2}"),
2283 [], itins.rr, d>, EVEX_4V, EVEX_K;
2285 def rrkz: PI<opc, MRMSrcReg, (outs RC:$dst), (ins KRC:$mask, RC:$src1, RC:$src2),
2286 !strconcat(OpcodeStr,
2287 " \t{$src2, $src1, $dst {${mask}} {z}|$dst {${mask}} {z}, $src1, $src2}"),
2288 [], itins.rr, d>, EVEX_4V, EVEX_KZ;
2291 let mayLoad = 1 in {
2292 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
2293 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2294 [(set RC:$dst, (OpNode RC:$src1, (mem_frag addr:$src2)))],
2295 itins.rm, d>, EVEX_4V;
2297 def rmb : PI<opc, MRMSrcMem, (outs RC:$dst),
2298 (ins RC:$src1, x86scalar_mop:$src2),
2299 !strconcat(OpcodeStr, " \t{${src2}", BrdcstStr,
2300 ", $src1, $dst|$dst, $src1, ${src2}", BrdcstStr, "}"),
2301 [(set RC:$dst, (OpNode RC:$src1,
2302 (vt (X86VBroadcast (scalar_mfrag addr:$src2)))))],
2303 itins.rm, d>, EVEX_4V, EVEX_B;
2305 def rmk : PI<opc, MRMSrcMem, (outs RC:$dst),
2306 (ins KRC:$mask, RC:$src1, x86memop:$src2), !strconcat(OpcodeStr,
2307 "\t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
2308 [], itins.rm, d>, EVEX_4V, EVEX_K;
2310 def rmkz : PI<opc, MRMSrcMem, (outs RC:$dst),
2311 (ins KRC:$mask, RC:$src1, x86memop:$src2), !strconcat(OpcodeStr,
2312 "\t{$src2, $src1, $dst {${mask}} {z}|$dst {${mask}} {z}, $src1, $src2}"),
2313 [], itins.rm, d>, EVEX_4V, EVEX_KZ;
2315 def rmbk : PI<opc, MRMSrcMem, (outs RC:$dst),
2316 (ins KRC:$mask, RC:$src1, x86scalar_mop:$src2), !strconcat(OpcodeStr,
2317 " \t{${src2}", BrdcstStr,
2318 ", $src1, $dst {${mask}}|$dst {${mask}}, $src1, ${src2}", BrdcstStr, "}"),
2319 [], itins.rm, d>, EVEX_4V, EVEX_B, EVEX_K;
2321 def rmbkz : PI<opc, MRMSrcMem, (outs RC:$dst),
2322 (ins KRC:$mask, RC:$src1, x86scalar_mop:$src2), !strconcat(OpcodeStr,
2323 " \t{${src2}", BrdcstStr,
2324 ", $src1, $dst {${mask}} {z}|$dst {${mask}} {z}, $src1, ${src2}",
2326 [], itins.rm, d>, EVEX_4V, EVEX_B, EVEX_KZ;
2330 defm VADDPSZ : avx512_fp_packed<0x58, "addps", fadd, VK16WM, VR512, v16f32, f512mem,
2331 memopv16f32, f32mem, loadf32, "{1to16}", SSEPackedSingle,
2332 SSE_ALU_ITINS_P.s, 1>, EVEX_V512, PS, EVEX_CD8<32, CD8VF>;
2334 defm VADDPDZ : avx512_fp_packed<0x58, "addpd", fadd, VK8WM, VR512, v8f64, f512mem,
2335 memopv8f64, f64mem, loadf64, "{1to8}", SSEPackedDouble,
2336 SSE_ALU_ITINS_P.d, 1>,
2337 EVEX_V512, PD, VEX_W, EVEX_CD8<64, CD8VF>;
2339 defm VMULPSZ : avx512_fp_packed<0x59, "mulps", fmul, VK16WM, VR512, v16f32, f512mem,
2340 memopv16f32, f32mem, loadf32, "{1to16}", SSEPackedSingle,
2341 SSE_ALU_ITINS_P.s, 1>, EVEX_V512, PS, EVEX_CD8<32, CD8VF>;
2342 defm VMULPDZ : avx512_fp_packed<0x59, "mulpd", fmul, VK8WM, VR512, v8f64, f512mem,
2343 memopv8f64, f64mem, loadf64, "{1to8}", SSEPackedDouble,
2344 SSE_ALU_ITINS_P.d, 1>,
2345 EVEX_V512, PD, VEX_W, EVEX_CD8<64, CD8VF>;
2347 defm VMINPSZ : avx512_fp_packed<0x5D, "minps", X86fmin, VK16WM, VR512, v16f32, f512mem,
2348 memopv16f32, f32mem, loadf32, "{1to16}", SSEPackedSingle,
2349 SSE_ALU_ITINS_P.s, 1>,
2350 EVEX_V512, PS, EVEX_CD8<32, CD8VF>;
2351 defm VMAXPSZ : avx512_fp_packed<0x5F, "maxps", X86fmax, VK16WM, VR512, v16f32, f512mem,
2352 memopv16f32, f32mem, loadf32, "{1to16}", SSEPackedSingle,
2353 SSE_ALU_ITINS_P.s, 1>,
2354 EVEX_V512, PS, EVEX_CD8<32, CD8VF>;
2356 defm VMINPDZ : avx512_fp_packed<0x5D, "minpd", X86fmin, VK8WM, VR512, v8f64, f512mem,
2357 memopv8f64, f64mem, loadf64, "{1to8}", SSEPackedDouble,
2358 SSE_ALU_ITINS_P.d, 1>,
2359 EVEX_V512, PD, VEX_W, EVEX_CD8<64, CD8VF>;
2360 defm VMAXPDZ : avx512_fp_packed<0x5F, "maxpd", X86fmax, VK8WM, VR512, v8f64, f512mem,
2361 memopv8f64, f64mem, loadf64, "{1to8}", SSEPackedDouble,
2362 SSE_ALU_ITINS_P.d, 1>,
2363 EVEX_V512, PD, VEX_W, EVEX_CD8<64, CD8VF>;
2365 defm VSUBPSZ : avx512_fp_packed<0x5C, "subps", fsub, VK16WM, VR512, v16f32, f512mem,
2366 memopv16f32, f32mem, loadf32, "{1to16}", SSEPackedSingle,
2367 SSE_ALU_ITINS_P.s, 0>, EVEX_V512, PS, EVEX_CD8<32, CD8VF>;
2368 defm VDIVPSZ : avx512_fp_packed<0x5E, "divps", fdiv, VK16WM, VR512, v16f32, f512mem,
2369 memopv16f32, f32mem, loadf32, "{1to16}", SSEPackedSingle,
2370 SSE_ALU_ITINS_P.s, 0>, EVEX_V512, PS, EVEX_CD8<32, CD8VF>;
2372 defm VSUBPDZ : avx512_fp_packed<0x5C, "subpd", fsub, VK8WM, VR512, v8f64, f512mem,
2373 memopv8f64, f64mem, loadf64, "{1to8}", SSEPackedDouble,
2374 SSE_ALU_ITINS_P.d, 0>,
2375 EVEX_V512, PD, VEX_W, EVEX_CD8<64, CD8VF>;
2376 defm VDIVPDZ : avx512_fp_packed<0x5E, "divpd", fdiv, VK8WM, VR512, v8f64, f512mem,
2377 memopv8f64, f64mem, loadf64, "{1to8}", SSEPackedDouble,
2378 SSE_ALU_ITINS_P.d, 0>,
2379 EVEX_V512, PD, VEX_W, EVEX_CD8<64, CD8VF>;
2381 def : Pat<(v16f32 (int_x86_avx512_mask_max_ps_512 (v16f32 VR512:$src1),
2382 (v16f32 VR512:$src2), (bc_v16f32 (v16i32 immAllZerosV)),
2383 (i16 -1), FROUND_CURRENT)),
2384 (VMAXPSZrr VR512:$src1, VR512:$src2)>;
2386 def : Pat<(v8f64 (int_x86_avx512_mask_max_pd_512 (v8f64 VR512:$src1),
2387 (v8f64 VR512:$src2), (bc_v8f64 (v16i32 immAllZerosV)),
2388 (i8 -1), FROUND_CURRENT)),
2389 (VMAXPDZrr VR512:$src1, VR512:$src2)>;
2391 def : Pat<(v16f32 (int_x86_avx512_mask_min_ps_512 (v16f32 VR512:$src1),
2392 (v16f32 VR512:$src2), (bc_v16f32 (v16i32 immAllZerosV)),
2393 (i16 -1), FROUND_CURRENT)),
2394 (VMINPSZrr VR512:$src1, VR512:$src2)>;
2396 def : Pat<(v8f64 (int_x86_avx512_mask_min_pd_512 (v8f64 VR512:$src1),
2397 (v8f64 VR512:$src2), (bc_v8f64 (v16i32 immAllZerosV)),
2398 (i8 -1), FROUND_CURRENT)),
2399 (VMINPDZrr VR512:$src1, VR512:$src2)>;
2400 //===----------------------------------------------------------------------===//
2401 // AVX-512 VPTESTM instructions
2402 //===----------------------------------------------------------------------===//
2404 multiclass avx512_vptest<bits<8> opc, string OpcodeStr, RegisterClass KRC,
2405 RegisterClass RC, X86MemOperand x86memop, PatFrag memop_frag,
2406 SDNode OpNode, ValueType vt> {
2407 def rr : AVX512PI<opc, MRMSrcReg,
2408 (outs KRC:$dst), (ins RC:$src1, RC:$src2),
2409 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2410 [(set KRC:$dst, (OpNode (vt RC:$src1), (vt RC:$src2)))],
2411 SSEPackedInt>, EVEX_4V;
2412 def rm : AVX512PI<opc, MRMSrcMem,
2413 (outs KRC:$dst), (ins RC:$src1, x86memop:$src2),
2414 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2415 [(set KRC:$dst, (OpNode (vt RC:$src1),
2416 (bitconvert (memop_frag addr:$src2))))], SSEPackedInt>, EVEX_4V;
2419 defm VPTESTMDZ : avx512_vptest<0x27, "vptestmd", VK16, VR512, f512mem,
2420 memopv16i32, X86testm, v16i32>, T8PD, EVEX_V512,
2421 EVEX_CD8<32, CD8VF>;
2422 defm VPTESTMQZ : avx512_vptest<0x27, "vptestmq", VK8, VR512, f512mem,
2423 memopv8i64, X86testm, v8i64>, T8PD, EVEX_V512, VEX_W,
2424 EVEX_CD8<64, CD8VF>;
2426 let Predicates = [HasCDI] in {
2427 defm VPTESTNMDZ : avx512_vptest<0x27, "vptestnmd", VK16, VR512, f512mem,
2428 memopv16i32, X86testnm, v16i32>, T8XS, EVEX_V512,
2429 EVEX_CD8<32, CD8VF>;
2430 defm VPTESTNMQZ : avx512_vptest<0x27, "vptestnmq", VK8, VR512, f512mem,
2431 memopv8i64, X86testnm, v8i64>, T8XS, EVEX_V512, VEX_W,
2432 EVEX_CD8<64, CD8VF>;
2435 def : Pat <(i16 (int_x86_avx512_mask_ptestm_d_512 (v16i32 VR512:$src1),
2436 (v16i32 VR512:$src2), (i16 -1))),
2437 (COPY_TO_REGCLASS (VPTESTMDZrr VR512:$src1, VR512:$src2), GR16)>;
2439 def : Pat <(i8 (int_x86_avx512_mask_ptestm_q_512 (v8i64 VR512:$src1),
2440 (v8i64 VR512:$src2), (i8 -1))),
2441 (COPY_TO_REGCLASS (VPTESTMQZrr VR512:$src1, VR512:$src2), GR8)>;
2442 //===----------------------------------------------------------------------===//
2443 // AVX-512 Shift instructions
2444 //===----------------------------------------------------------------------===//
2445 multiclass avx512_shift_rmi<bits<8> opc, Format ImmFormR, Format ImmFormM,
2446 string OpcodeStr, SDNode OpNode, RegisterClass RC,
2447 ValueType vt, X86MemOperand x86memop, PatFrag mem_frag,
2448 RegisterClass KRC> {
2449 def ri : AVX512BIi8<opc, ImmFormR, (outs RC:$dst),
2450 (ins RC:$src1, i8imm:$src2),
2451 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2452 [(set RC:$dst, (vt (OpNode RC:$src1, (i8 imm:$src2))))],
2453 SSE_INTSHIFT_ITINS_P.rr>, EVEX_4V;
2454 def rik : AVX512BIi8<opc, ImmFormR, (outs RC:$dst),
2455 (ins KRC:$mask, RC:$src1, i8imm:$src2),
2456 !strconcat(OpcodeStr,
2457 " \t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
2458 [], SSE_INTSHIFT_ITINS_P.rr>, EVEX_4V, EVEX_K;
2459 def mi: AVX512BIi8<opc, ImmFormM, (outs RC:$dst),
2460 (ins x86memop:$src1, i8imm:$src2),
2461 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2462 [(set RC:$dst, (OpNode (mem_frag addr:$src1),
2463 (i8 imm:$src2)))], SSE_INTSHIFT_ITINS_P.rm>, EVEX_4V;
2464 def mik: AVX512BIi8<opc, ImmFormM, (outs RC:$dst),
2465 (ins KRC:$mask, x86memop:$src1, i8imm:$src2),
2466 !strconcat(OpcodeStr,
2467 " \t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
2468 [], SSE_INTSHIFT_ITINS_P.rm>, EVEX_4V, EVEX_K;
2471 multiclass avx512_shift_rrm<bits<8> opc, string OpcodeStr, SDNode OpNode,
2472 RegisterClass RC, ValueType vt, ValueType SrcVT,
2473 PatFrag bc_frag, RegisterClass KRC> {
2474 // src2 is always 128-bit
2475 def rr : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
2476 (ins RC:$src1, VR128X:$src2),
2477 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2478 [(set RC:$dst, (vt (OpNode RC:$src1, (SrcVT VR128X:$src2))))],
2479 SSE_INTSHIFT_ITINS_P.rr>, EVEX_4V;
2480 def rrk : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
2481 (ins KRC:$mask, RC:$src1, VR128X:$src2),
2482 !strconcat(OpcodeStr,
2483 " \t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
2484 [], SSE_INTSHIFT_ITINS_P.rr>, EVEX_4V, EVEX_K;
2485 def rm : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
2486 (ins RC:$src1, i128mem:$src2),
2487 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2488 [(set RC:$dst, (vt (OpNode RC:$src1,
2489 (bc_frag (memopv2i64 addr:$src2)))))],
2490 SSE_INTSHIFT_ITINS_P.rm>, EVEX_4V;
2491 def rmk : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
2492 (ins KRC:$mask, RC:$src1, i128mem:$src2),
2493 !strconcat(OpcodeStr,
2494 " \t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
2495 [], SSE_INTSHIFT_ITINS_P.rm>, EVEX_4V, EVEX_K;
2498 defm VPSRLDZ : avx512_shift_rmi<0x72, MRM2r, MRM2m, "vpsrld", X86vsrli,
2499 VR512, v16i32, i512mem, memopv16i32, VK16WM>,
2500 EVEX_V512, EVEX_CD8<32, CD8VF>;
2501 defm VPSRLDZ : avx512_shift_rrm<0xD2, "vpsrld", X86vsrl,
2502 VR512, v16i32, v4i32, bc_v4i32, VK16WM>, EVEX_V512,
2503 EVEX_CD8<32, CD8VQ>;
2505 defm VPSRLQZ : avx512_shift_rmi<0x73, MRM2r, MRM2m, "vpsrlq", X86vsrli,
2506 VR512, v8i64, i512mem, memopv8i64, VK8WM>, EVEX_V512,
2507 EVEX_CD8<64, CD8VF>, VEX_W;
2508 defm VPSRLQZ : avx512_shift_rrm<0xD3, "vpsrlq", X86vsrl,
2509 VR512, v8i64, v2i64, bc_v2i64, VK8WM>, EVEX_V512,
2510 EVEX_CD8<64, CD8VQ>, VEX_W;
2512 defm VPSLLDZ : avx512_shift_rmi<0x72, MRM6r, MRM6m, "vpslld", X86vshli,
2513 VR512, v16i32, i512mem, memopv16i32, VK16WM>, EVEX_V512,
2514 EVEX_CD8<32, CD8VF>;
2515 defm VPSLLDZ : avx512_shift_rrm<0xF2, "vpslld", X86vshl,
2516 VR512, v16i32, v4i32, bc_v4i32, VK16WM>, EVEX_V512,
2517 EVEX_CD8<32, CD8VQ>;
2519 defm VPSLLQZ : avx512_shift_rmi<0x73, MRM6r, MRM6m, "vpsllq", X86vshli,
2520 VR512, v8i64, i512mem, memopv8i64, VK8WM>, EVEX_V512,
2521 EVEX_CD8<64, CD8VF>, VEX_W;
2522 defm VPSLLQZ : avx512_shift_rrm<0xF3, "vpsllq", X86vshl,
2523 VR512, v8i64, v2i64, bc_v2i64, VK8WM>, EVEX_V512,
2524 EVEX_CD8<64, CD8VQ>, VEX_W;
2526 defm VPSRADZ : avx512_shift_rmi<0x72, MRM4r, MRM4m, "vpsrad", X86vsrai,
2527 VR512, v16i32, i512mem, memopv16i32, VK16WM>,
2528 EVEX_V512, EVEX_CD8<32, CD8VF>;
2529 defm VPSRADZ : avx512_shift_rrm<0xE2, "vpsrad", X86vsra,
2530 VR512, v16i32, v4i32, bc_v4i32, VK16WM>, EVEX_V512,
2531 EVEX_CD8<32, CD8VQ>;
2533 defm VPSRAQZ : avx512_shift_rmi<0x72, MRM4r, MRM4m, "vpsraq", X86vsrai,
2534 VR512, v8i64, i512mem, memopv8i64, VK8WM>, EVEX_V512,
2535 EVEX_CD8<64, CD8VF>, VEX_W;
2536 defm VPSRAQZ : avx512_shift_rrm<0xE2, "vpsraq", X86vsra,
2537 VR512, v8i64, v2i64, bc_v2i64, VK8WM>, EVEX_V512,
2538 EVEX_CD8<64, CD8VQ>, VEX_W;
2540 //===-------------------------------------------------------------------===//
2541 // Variable Bit Shifts
2542 //===-------------------------------------------------------------------===//
2543 multiclass avx512_var_shift<bits<8> opc, string OpcodeStr, SDNode OpNode,
2544 RegisterClass RC, ValueType vt,
2545 X86MemOperand x86memop, PatFrag mem_frag> {
2546 def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
2547 (ins RC:$src1, RC:$src2),
2548 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2550 (vt (OpNode RC:$src1, (vt RC:$src2))))]>,
2552 def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
2553 (ins RC:$src1, x86memop:$src2),
2554 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2556 (vt (OpNode RC:$src1, (mem_frag addr:$src2))))]>,
2560 defm VPSLLVDZ : avx512_var_shift<0x47, "vpsllvd", shl, VR512, v16i32,
2561 i512mem, memopv16i32>, EVEX_V512,
2562 EVEX_CD8<32, CD8VF>;
2563 defm VPSLLVQZ : avx512_var_shift<0x47, "vpsllvq", shl, VR512, v8i64,
2564 i512mem, memopv8i64>, EVEX_V512, VEX_W,
2565 EVEX_CD8<64, CD8VF>;
2566 defm VPSRLVDZ : avx512_var_shift<0x45, "vpsrlvd", srl, VR512, v16i32,
2567 i512mem, memopv16i32>, EVEX_V512,
2568 EVEX_CD8<32, CD8VF>;
2569 defm VPSRLVQZ : avx512_var_shift<0x45, "vpsrlvq", srl, VR512, v8i64,
2570 i512mem, memopv8i64>, EVEX_V512, VEX_W,
2571 EVEX_CD8<64, CD8VF>;
2572 defm VPSRAVDZ : avx512_var_shift<0x46, "vpsravd", sra, VR512, v16i32,
2573 i512mem, memopv16i32>, EVEX_V512,
2574 EVEX_CD8<32, CD8VF>;
2575 defm VPSRAVQZ : avx512_var_shift<0x46, "vpsravq", sra, VR512, v8i64,
2576 i512mem, memopv8i64>, EVEX_V512, VEX_W,
2577 EVEX_CD8<64, CD8VF>;
2579 //===----------------------------------------------------------------------===//
2580 // AVX-512 - MOVDDUP
2581 //===----------------------------------------------------------------------===//
2583 multiclass avx512_movddup<string OpcodeStr, RegisterClass RC, ValueType VT,
2584 X86MemOperand x86memop, PatFrag memop_frag> {
2585 def rr : AVX512PDI<0x12, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
2586 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
2587 [(set RC:$dst, (VT (X86Movddup RC:$src)))]>, EVEX;
2588 def rm : AVX512PDI<0x12, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
2589 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
2591 (VT (X86Movddup (memop_frag addr:$src))))]>, EVEX;
2594 defm VMOVDDUPZ : avx512_movddup<"vmovddup", VR512, v8f64, f512mem, memopv8f64>,
2595 VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
2596 def : Pat<(X86Movddup (v8f64 (scalar_to_vector (loadf64 addr:$src)))),
2597 (VMOVDDUPZrm addr:$src)>;
2599 //===---------------------------------------------------------------------===//
2600 // Replicate Single FP - MOVSHDUP and MOVSLDUP
2601 //===---------------------------------------------------------------------===//
2602 multiclass avx512_replicate_sfp<bits<8> op, SDNode OpNode, string OpcodeStr,
2603 ValueType vt, RegisterClass RC, PatFrag mem_frag,
2604 X86MemOperand x86memop> {
2605 def rr : AVX512XSI<op, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
2606 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
2607 [(set RC:$dst, (vt (OpNode RC:$src)))]>, EVEX;
2609 def rm : AVX512XSI<op, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
2610 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
2611 [(set RC:$dst, (OpNode (mem_frag addr:$src)))]>, EVEX;
2614 defm VMOVSHDUPZ : avx512_replicate_sfp<0x16, X86Movshdup, "vmovshdup",
2615 v16f32, VR512, memopv16f32, f512mem>, EVEX_V512,
2616 EVEX_CD8<32, CD8VF>;
2617 defm VMOVSLDUPZ : avx512_replicate_sfp<0x12, X86Movsldup, "vmovsldup",
2618 v16f32, VR512, memopv16f32, f512mem>, EVEX_V512,
2619 EVEX_CD8<32, CD8VF>;
2621 def : Pat<(v16i32 (X86Movshdup VR512:$src)), (VMOVSHDUPZrr VR512:$src)>;
2622 def : Pat<(v16i32 (X86Movshdup (memopv16i32 addr:$src))),
2623 (VMOVSHDUPZrm addr:$src)>;
2624 def : Pat<(v16i32 (X86Movsldup VR512:$src)), (VMOVSLDUPZrr VR512:$src)>;
2625 def : Pat<(v16i32 (X86Movsldup (memopv16i32 addr:$src))),
2626 (VMOVSLDUPZrm addr:$src)>;
2628 //===----------------------------------------------------------------------===//
2629 // Move Low to High and High to Low packed FP Instructions
2630 //===----------------------------------------------------------------------===//
2631 def VMOVLHPSZrr : AVX512PSI<0x16, MRMSrcReg, (outs VR128X:$dst),
2632 (ins VR128X:$src1, VR128X:$src2),
2633 "vmovlhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2634 [(set VR128X:$dst, (v4f32 (X86Movlhps VR128X:$src1, VR128X:$src2)))],
2635 IIC_SSE_MOV_LH>, EVEX_4V;
2636 def VMOVHLPSZrr : AVX512PSI<0x12, MRMSrcReg, (outs VR128X:$dst),
2637 (ins VR128X:$src1, VR128X:$src2),
2638 "vmovhlps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2639 [(set VR128X:$dst, (v4f32 (X86Movhlps VR128X:$src1, VR128X:$src2)))],
2640 IIC_SSE_MOV_LH>, EVEX_4V;
2642 let Predicates = [HasAVX512] in {
2644 def : Pat<(v4i32 (X86Movlhps VR128X:$src1, VR128X:$src2)),
2645 (VMOVLHPSZrr VR128X:$src1, VR128X:$src2)>;
2646 def : Pat<(v2i64 (X86Movlhps VR128X:$src1, VR128X:$src2)),
2647 (VMOVLHPSZrr (v2i64 VR128X:$src1), VR128X:$src2)>;
2650 def : Pat<(v4i32 (X86Movhlps VR128X:$src1, VR128X:$src2)),
2651 (VMOVHLPSZrr VR128X:$src1, VR128X:$src2)>;
2654 //===----------------------------------------------------------------------===//
2655 // FMA - Fused Multiply Operations
2657 let Constraints = "$src1 = $dst" in {
2658 multiclass avx512_fma3p_rm<bits<8> opc, string OpcodeStr,
2659 RegisterClass RC, X86MemOperand x86memop,
2660 PatFrag mem_frag, X86MemOperand x86scalar_mop, PatFrag scalar_mfrag,
2661 string BrdcstStr, SDNode OpNode, ValueType OpVT> {
2662 def r: AVX512FMA3<opc, MRMSrcReg, (outs RC:$dst),
2663 (ins RC:$src1, RC:$src2, RC:$src3),
2664 !strconcat(OpcodeStr," \t{$src3, $src2, $dst|$dst, $src2, $src3}"),
2665 [(set RC:$dst, (OpVT(OpNode RC:$src1, RC:$src2, RC:$src3)))]>;
2668 def m: AVX512FMA3<opc, MRMSrcMem, (outs RC:$dst),
2669 (ins RC:$src1, RC:$src2, x86memop:$src3),
2670 !strconcat(OpcodeStr, " \t{$src3, $src2, $dst|$dst, $src2, $src3}"),
2671 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2,
2672 (mem_frag addr:$src3))))]>;
2673 def mb: AVX512FMA3<opc, MRMSrcMem, (outs RC:$dst),
2674 (ins RC:$src1, RC:$src2, x86scalar_mop:$src3),
2675 !strconcat(OpcodeStr, " \t{${src3}", BrdcstStr,
2676 ", $src2, $dst|$dst, $src2, ${src3}", BrdcstStr, "}"),
2677 [(set RC:$dst, (OpNode RC:$src1, RC:$src2,
2678 (OpVT (X86VBroadcast (scalar_mfrag addr:$src3)))))]>, EVEX_B;
2680 } // Constraints = "$src1 = $dst"
2682 let ExeDomain = SSEPackedSingle in {
2683 defm VFMADD213PSZ : avx512_fma3p_rm<0xA8, "vfmadd213ps", VR512, f512mem,
2684 memopv16f32, f32mem, loadf32, "{1to16}",
2685 X86Fmadd, v16f32>, EVEX_V512,
2686 EVEX_CD8<32, CD8VF>;
2687 defm VFMSUB213PSZ : avx512_fma3p_rm<0xAA, "vfmsub213ps", VR512, f512mem,
2688 memopv16f32, f32mem, loadf32, "{1to16}",
2689 X86Fmsub, v16f32>, EVEX_V512,
2690 EVEX_CD8<32, CD8VF>;
2691 defm VFMADDSUB213PSZ : avx512_fma3p_rm<0xA6, "vfmaddsub213ps", VR512, f512mem,
2692 memopv16f32, f32mem, loadf32, "{1to16}",
2693 X86Fmaddsub, v16f32>,
2694 EVEX_V512, EVEX_CD8<32, CD8VF>;
2695 defm VFMSUBADD213PSZ : avx512_fma3p_rm<0xA7, "vfmsubadd213ps", VR512, f512mem,
2696 memopv16f32, f32mem, loadf32, "{1to16}",
2697 X86Fmsubadd, v16f32>,
2698 EVEX_V512, EVEX_CD8<32, CD8VF>;
2699 defm VFNMADD213PSZ : avx512_fma3p_rm<0xAC, "vfnmadd213ps", VR512, f512mem,
2700 memopv16f32, f32mem, loadf32, "{1to16}",
2701 X86Fnmadd, v16f32>, EVEX_V512,
2702 EVEX_CD8<32, CD8VF>;
2703 defm VFNMSUB213PSZ : avx512_fma3p_rm<0xAE, "vfnmsub213ps", VR512, f512mem,
2704 memopv16f32, f32mem, loadf32, "{1to16}",
2705 X86Fnmsub, v16f32>, EVEX_V512,
2706 EVEX_CD8<32, CD8VF>;
2708 let ExeDomain = SSEPackedDouble in {
2709 defm VFMADD213PDZ : avx512_fma3p_rm<0xA8, "vfmadd213pd", VR512, f512mem,
2710 memopv8f64, f64mem, loadf64, "{1to8}",
2711 X86Fmadd, v8f64>, EVEX_V512,
2712 VEX_W, EVEX_CD8<64, CD8VF>;
2713 defm VFMSUB213PDZ : avx512_fma3p_rm<0xAA, "vfmsub213pd", VR512, f512mem,
2714 memopv8f64, f64mem, loadf64, "{1to8}",
2715 X86Fmsub, v8f64>, EVEX_V512, VEX_W,
2716 EVEX_CD8<64, CD8VF>;
2717 defm VFMADDSUB213PDZ : avx512_fma3p_rm<0xA6, "vfmaddsub213pd", VR512, f512mem,
2718 memopv8f64, f64mem, loadf64, "{1to8}",
2719 X86Fmaddsub, v8f64>, EVEX_V512, VEX_W,
2720 EVEX_CD8<64, CD8VF>;
2721 defm VFMSUBADD213PDZ : avx512_fma3p_rm<0xA7, "vfmsubadd213pd", VR512, f512mem,
2722 memopv8f64, f64mem, loadf64, "{1to8}",
2723 X86Fmsubadd, v8f64>, EVEX_V512, VEX_W,
2724 EVEX_CD8<64, CD8VF>;
2725 defm VFNMADD213PDZ : avx512_fma3p_rm<0xAC, "vfnmadd213pd", VR512, f512mem,
2726 memopv8f64, f64mem, loadf64, "{1to8}",
2727 X86Fnmadd, v8f64>, EVEX_V512, VEX_W,
2728 EVEX_CD8<64, CD8VF>;
2729 defm VFNMSUB213PDZ : avx512_fma3p_rm<0xAE, "vfnmsub213pd", VR512, f512mem,
2730 memopv8f64, f64mem, loadf64, "{1to8}",
2731 X86Fnmsub, v8f64>, EVEX_V512, VEX_W,
2732 EVEX_CD8<64, CD8VF>;
2735 let Constraints = "$src1 = $dst" in {
2736 multiclass avx512_fma3p_m132<bits<8> opc, string OpcodeStr,
2737 RegisterClass RC, X86MemOperand x86memop,
2738 PatFrag mem_frag, X86MemOperand x86scalar_mop, PatFrag scalar_mfrag,
2739 string BrdcstStr, SDNode OpNode, ValueType OpVT> {
2741 def m: AVX512FMA3<opc, MRMSrcMem, (outs RC:$dst),
2742 (ins RC:$src1, RC:$src3, x86memop:$src2),
2743 !strconcat(OpcodeStr, " \t{$src2, $src3, $dst|$dst, $src3, $src2}"),
2744 [(set RC:$dst, (OpVT (OpNode RC:$src1, (mem_frag addr:$src2), RC:$src3)))]>;
2745 def mb: AVX512FMA3<opc, MRMSrcMem, (outs RC:$dst),
2746 (ins RC:$src1, RC:$src3, x86scalar_mop:$src2),
2747 !strconcat(OpcodeStr, " \t{${src2}", BrdcstStr,
2748 ", $src3, $dst|$dst, $src3, ${src2}", BrdcstStr, "}"),
2749 [(set RC:$dst, (OpNode RC:$src1,
2750 (OpVT (X86VBroadcast (scalar_mfrag addr:$src2))), RC:$src3))]>, EVEX_B;
2752 } // Constraints = "$src1 = $dst"
2755 let ExeDomain = SSEPackedSingle in {
2756 defm VFMADD132PSZ : avx512_fma3p_m132<0x98, "vfmadd132ps", VR512, f512mem,
2757 memopv16f32, f32mem, loadf32, "{1to16}",
2758 X86Fmadd, v16f32>, EVEX_V512,
2759 EVEX_CD8<32, CD8VF>;
2760 defm VFMSUB132PSZ : avx512_fma3p_m132<0x9A, "vfmsub132ps", VR512, f512mem,
2761 memopv16f32, f32mem, loadf32, "{1to16}",
2762 X86Fmsub, v16f32>, EVEX_V512,
2763 EVEX_CD8<32, CD8VF>;
2764 defm VFMADDSUB132PSZ : avx512_fma3p_m132<0x96, "vfmaddsub132ps", VR512, f512mem,
2765 memopv16f32, f32mem, loadf32, "{1to16}",
2766 X86Fmaddsub, v16f32>,
2767 EVEX_V512, EVEX_CD8<32, CD8VF>;
2768 defm VFMSUBADD132PSZ : avx512_fma3p_m132<0x97, "vfmsubadd132ps", VR512, f512mem,
2769 memopv16f32, f32mem, loadf32, "{1to16}",
2770 X86Fmsubadd, v16f32>,
2771 EVEX_V512, EVEX_CD8<32, CD8VF>;
2772 defm VFNMADD132PSZ : avx512_fma3p_m132<0x9C, "vfnmadd132ps", VR512, f512mem,
2773 memopv16f32, f32mem, loadf32, "{1to16}",
2774 X86Fnmadd, v16f32>, EVEX_V512,
2775 EVEX_CD8<32, CD8VF>;
2776 defm VFNMSUB132PSZ : avx512_fma3p_m132<0x9E, "vfnmsub132ps", VR512, f512mem,
2777 memopv16f32, f32mem, loadf32, "{1to16}",
2778 X86Fnmsub, v16f32>, EVEX_V512,
2779 EVEX_CD8<32, CD8VF>;
2781 let ExeDomain = SSEPackedDouble in {
2782 defm VFMADD132PDZ : avx512_fma3p_m132<0x98, "vfmadd132pd", VR512, f512mem,
2783 memopv8f64, f64mem, loadf64, "{1to8}",
2784 X86Fmadd, v8f64>, EVEX_V512,
2785 VEX_W, EVEX_CD8<64, CD8VF>;
2786 defm VFMSUB132PDZ : avx512_fma3p_m132<0x9A, "vfmsub132pd", VR512, f512mem,
2787 memopv8f64, f64mem, loadf64, "{1to8}",
2788 X86Fmsub, v8f64>, EVEX_V512, VEX_W,
2789 EVEX_CD8<64, CD8VF>;
2790 defm VFMADDSUB132PDZ : avx512_fma3p_m132<0x96, "vfmaddsub132pd", VR512, f512mem,
2791 memopv8f64, f64mem, loadf64, "{1to8}",
2792 X86Fmaddsub, v8f64>, EVEX_V512, VEX_W,
2793 EVEX_CD8<64, CD8VF>;
2794 defm VFMSUBADD132PDZ : avx512_fma3p_m132<0x97, "vfmsubadd132pd", VR512, f512mem,
2795 memopv8f64, f64mem, loadf64, "{1to8}",
2796 X86Fmsubadd, v8f64>, EVEX_V512, VEX_W,
2797 EVEX_CD8<64, CD8VF>;
2798 defm VFNMADD132PDZ : avx512_fma3p_m132<0x9C, "vfnmadd132pd", VR512, f512mem,
2799 memopv8f64, f64mem, loadf64, "{1to8}",
2800 X86Fnmadd, v8f64>, EVEX_V512, VEX_W,
2801 EVEX_CD8<64, CD8VF>;
2802 defm VFNMSUB132PDZ : avx512_fma3p_m132<0x9E, "vfnmsub132pd", VR512, f512mem,
2803 memopv8f64, f64mem, loadf64, "{1to8}",
2804 X86Fnmsub, v8f64>, EVEX_V512, VEX_W,
2805 EVEX_CD8<64, CD8VF>;
2809 let Constraints = "$src1 = $dst" in {
2810 multiclass avx512_fma3s_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
2811 RegisterClass RC, ValueType OpVT,
2812 X86MemOperand x86memop, Operand memop,
2814 let isCommutable = 1 in
2815 def r : AVX512FMA3<opc, MRMSrcReg, (outs RC:$dst),
2816 (ins RC:$src1, RC:$src2, RC:$src3),
2817 !strconcat(OpcodeStr,
2818 " \t{$src3, $src2, $dst|$dst, $src2, $src3}"),
2820 (OpVT (OpNode RC:$src2, RC:$src1, RC:$src3)))]>;
2822 def m : AVX512FMA3<opc, MRMSrcMem, (outs RC:$dst),
2823 (ins RC:$src1, RC:$src2, f128mem:$src3),
2824 !strconcat(OpcodeStr,
2825 " \t{$src3, $src2, $dst|$dst, $src2, $src3}"),
2827 (OpVT (OpNode RC:$src2, RC:$src1,
2828 (mem_frag addr:$src3))))]>;
2831 } // Constraints = "$src1 = $dst"
2833 defm VFMADDSSZ : avx512_fma3s_rm<0xA9, "vfmadd213ss", X86Fmadd, FR32X,
2834 f32, f32mem, ssmem, loadf32>, EVEX_CD8<32, CD8VT1>;
2835 defm VFMADDSDZ : avx512_fma3s_rm<0xA9, "vfmadd213sd", X86Fmadd, FR64X,
2836 f64, f64mem, sdmem, loadf64>, VEX_W, EVEX_CD8<64, CD8VT1>;
2837 defm VFMSUBSSZ : avx512_fma3s_rm<0xAB, "vfmsub213ss", X86Fmsub, FR32X,
2838 f32, f32mem, ssmem, loadf32>, EVEX_CD8<32, CD8VT1>;
2839 defm VFMSUBSDZ : avx512_fma3s_rm<0xAB, "vfmsub213sd", X86Fmsub, FR64X,
2840 f64, f64mem, sdmem, loadf64>, VEX_W, EVEX_CD8<64, CD8VT1>;
2841 defm VFNMADDSSZ : avx512_fma3s_rm<0xAD, "vfnmadd213ss", X86Fnmadd, FR32X,
2842 f32, f32mem, ssmem, loadf32>, EVEX_CD8<32, CD8VT1>;
2843 defm VFNMADDSDZ : avx512_fma3s_rm<0xAD, "vfnmadd213sd", X86Fnmadd, FR64X,
2844 f64, f64mem, sdmem, loadf64>, VEX_W, EVEX_CD8<64, CD8VT1>;
2845 defm VFNMSUBSSZ : avx512_fma3s_rm<0xAF, "vfnmsub213ss", X86Fnmsub, FR32X,
2846 f32, f32mem, ssmem, loadf32>, EVEX_CD8<32, CD8VT1>;
2847 defm VFNMSUBSDZ : avx512_fma3s_rm<0xAF, "vfnmsub213sd", X86Fnmsub, FR64X,
2848 f64, f64mem, sdmem, loadf64>, VEX_W, EVEX_CD8<64, CD8VT1>;
2850 //===----------------------------------------------------------------------===//
2851 // AVX-512 Scalar convert from sign integer to float/double
2852 //===----------------------------------------------------------------------===//
2854 multiclass avx512_vcvtsi<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
2855 X86MemOperand x86memop, string asm> {
2856 let hasSideEffects = 0 in {
2857 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src),
2858 !strconcat(asm," \t{$src, $src1, $dst|$dst, $src1, $src}"), []>,
2861 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
2862 (ins DstRC:$src1, x86memop:$src),
2863 !strconcat(asm," \t{$src, $src1, $dst|$dst, $src1, $src}"), []>,
2865 } // hasSideEffects = 0
2867 let Predicates = [HasAVX512] in {
2868 defm VCVTSI2SSZ : avx512_vcvtsi<0x2A, GR32, FR32X, i32mem, "cvtsi2ss{l}">,
2869 XS, VEX_LIG, EVEX_CD8<32, CD8VT1>;
2870 defm VCVTSI642SSZ : avx512_vcvtsi<0x2A, GR64, FR32X, i64mem, "cvtsi2ss{q}">,
2871 XS, VEX_W, VEX_LIG, EVEX_CD8<64, CD8VT1>;
2872 defm VCVTSI2SDZ : avx512_vcvtsi<0x2A, GR32, FR64X, i32mem, "cvtsi2sd{l}">,
2873 XD, VEX_LIG, EVEX_CD8<32, CD8VT1>;
2874 defm VCVTSI642SDZ : avx512_vcvtsi<0x2A, GR64, FR64X, i64mem, "cvtsi2sd{q}">,
2875 XD, VEX_W, VEX_LIG, EVEX_CD8<64, CD8VT1>;
2877 def : Pat<(f32 (sint_to_fp (loadi32 addr:$src))),
2878 (VCVTSI2SSZrm (f32 (IMPLICIT_DEF)), addr:$src)>;
2879 def : Pat<(f32 (sint_to_fp (loadi64 addr:$src))),
2880 (VCVTSI642SSZrm (f32 (IMPLICIT_DEF)), addr:$src)>;
2881 def : Pat<(f64 (sint_to_fp (loadi32 addr:$src))),
2882 (VCVTSI2SDZrm (f64 (IMPLICIT_DEF)), addr:$src)>;
2883 def : Pat<(f64 (sint_to_fp (loadi64 addr:$src))),
2884 (VCVTSI642SDZrm (f64 (IMPLICIT_DEF)), addr:$src)>;
2886 def : Pat<(f32 (sint_to_fp GR32:$src)),
2887 (VCVTSI2SSZrr (f32 (IMPLICIT_DEF)), GR32:$src)>;
2888 def : Pat<(f32 (sint_to_fp GR64:$src)),
2889 (VCVTSI642SSZrr (f32 (IMPLICIT_DEF)), GR64:$src)>;
2890 def : Pat<(f64 (sint_to_fp GR32:$src)),
2891 (VCVTSI2SDZrr (f64 (IMPLICIT_DEF)), GR32:$src)>;
2892 def : Pat<(f64 (sint_to_fp GR64:$src)),
2893 (VCVTSI642SDZrr (f64 (IMPLICIT_DEF)), GR64:$src)>;
2895 defm VCVTUSI2SSZ : avx512_vcvtsi<0x7B, GR32, FR32X, i32mem, "cvtusi2ss{l}">,
2896 XS, VEX_LIG, EVEX_CD8<32, CD8VT1>;
2897 defm VCVTUSI642SSZ : avx512_vcvtsi<0x7B, GR64, FR32X, i64mem, "cvtusi2ss{q}">,
2898 XS, VEX_W, VEX_LIG, EVEX_CD8<64, CD8VT1>;
2899 defm VCVTUSI2SDZ : avx512_vcvtsi<0x7B, GR32, FR64X, i32mem, "cvtusi2sd{l}">,
2900 XD, VEX_LIG, EVEX_CD8<32, CD8VT1>;
2901 defm VCVTUSI642SDZ : avx512_vcvtsi<0x7B, GR64, FR64X, i64mem, "cvtusi2sd{q}">,
2902 XD, VEX_W, VEX_LIG, EVEX_CD8<64, CD8VT1>;
2904 def : Pat<(f32 (uint_to_fp (loadi32 addr:$src))),
2905 (VCVTUSI2SSZrm (f32 (IMPLICIT_DEF)), addr:$src)>;
2906 def : Pat<(f32 (uint_to_fp (loadi64 addr:$src))),
2907 (VCVTUSI642SSZrm (f32 (IMPLICIT_DEF)), addr:$src)>;
2908 def : Pat<(f64 (uint_to_fp (loadi32 addr:$src))),
2909 (VCVTUSI2SDZrm (f64 (IMPLICIT_DEF)), addr:$src)>;
2910 def : Pat<(f64 (uint_to_fp (loadi64 addr:$src))),
2911 (VCVTUSI642SDZrm (f64 (IMPLICIT_DEF)), addr:$src)>;
2913 def : Pat<(f32 (uint_to_fp GR32:$src)),
2914 (VCVTUSI2SSZrr (f32 (IMPLICIT_DEF)), GR32:$src)>;
2915 def : Pat<(f32 (uint_to_fp GR64:$src)),
2916 (VCVTUSI642SSZrr (f32 (IMPLICIT_DEF)), GR64:$src)>;
2917 def : Pat<(f64 (uint_to_fp GR32:$src)),
2918 (VCVTUSI2SDZrr (f64 (IMPLICIT_DEF)), GR32:$src)>;
2919 def : Pat<(f64 (uint_to_fp GR64:$src)),
2920 (VCVTUSI642SDZrr (f64 (IMPLICIT_DEF)), GR64:$src)>;
2923 //===----------------------------------------------------------------------===//
2924 // AVX-512 Scalar convert from float/double to integer
2925 //===----------------------------------------------------------------------===//
2926 multiclass avx512_cvt_s_int<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
2927 Intrinsic Int, Operand memop, ComplexPattern mem_cpat,
2929 let hasSideEffects = 0 in {
2930 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
2931 !strconcat(asm," \t{$src, $dst|$dst, $src}"),
2932 [(set DstRC:$dst, (Int SrcRC:$src))]>, EVEX, VEX_LIG,
2933 Requires<[HasAVX512]>;
2935 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins memop:$src),
2936 !strconcat(asm," \t{$src, $dst|$dst, $src}"), []>, EVEX, VEX_LIG,
2937 Requires<[HasAVX512]>;
2938 } // hasSideEffects = 0
2940 let Predicates = [HasAVX512] in {
2941 // Convert float/double to signed/unsigned int 32/64
2942 defm VCVTSS2SIZ: avx512_cvt_s_int<0x2D, VR128X, GR32, int_x86_sse_cvtss2si,
2943 ssmem, sse_load_f32, "cvtss2si">,
2944 XS, EVEX_CD8<32, CD8VT1>;
2945 defm VCVTSS2SI64Z: avx512_cvt_s_int<0x2D, VR128X, GR64, int_x86_sse_cvtss2si64,
2946 ssmem, sse_load_f32, "cvtss2si">,
2947 XS, VEX_W, EVEX_CD8<32, CD8VT1>;
2948 defm VCVTSS2USIZ: avx512_cvt_s_int<0x79, VR128X, GR32, int_x86_avx512_cvtss2usi,
2949 ssmem, sse_load_f32, "cvtss2usi">,
2950 XS, EVEX_CD8<32, CD8VT1>;
2951 defm VCVTSS2USI64Z: avx512_cvt_s_int<0x79, VR128X, GR64,
2952 int_x86_avx512_cvtss2usi64, ssmem,
2953 sse_load_f32, "cvtss2usi">, XS, VEX_W,
2954 EVEX_CD8<32, CD8VT1>;
2955 defm VCVTSD2SIZ: avx512_cvt_s_int<0x2D, VR128X, GR32, int_x86_sse2_cvtsd2si,
2956 sdmem, sse_load_f64, "cvtsd2si">,
2957 XD, EVEX_CD8<64, CD8VT1>;
2958 defm VCVTSD2SI64Z: avx512_cvt_s_int<0x2D, VR128X, GR64, int_x86_sse2_cvtsd2si64,
2959 sdmem, sse_load_f64, "cvtsd2si">,
2960 XD, VEX_W, EVEX_CD8<64, CD8VT1>;
2961 defm VCVTSD2USIZ: avx512_cvt_s_int<0x79, VR128X, GR32, int_x86_avx512_cvtsd2usi,
2962 sdmem, sse_load_f64, "cvtsd2usi">,
2963 XD, EVEX_CD8<64, CD8VT1>;
2964 defm VCVTSD2USI64Z: avx512_cvt_s_int<0x79, VR128X, GR64,
2965 int_x86_avx512_cvtsd2usi64, sdmem,
2966 sse_load_f64, "cvtsd2usi">, XD, VEX_W,
2967 EVEX_CD8<64, CD8VT1>;
2969 let isCodeGenOnly = 1 in {
2970 defm Int_VCVTSI2SSZ : sse12_cvt_sint_3addr<0x2A, GR32, VR128X,
2971 int_x86_sse_cvtsi2ss, i32mem, loadi32, "cvtsi2ss{l}",
2972 SSE_CVT_Scalar, 0>, XS, EVEX_4V;
2973 defm Int_VCVTSI2SS64Z : sse12_cvt_sint_3addr<0x2A, GR64, VR128X,
2974 int_x86_sse_cvtsi642ss, i64mem, loadi64, "cvtsi2ss{q}",
2975 SSE_CVT_Scalar, 0>, XS, EVEX_4V, VEX_W;
2976 defm Int_VCVTSI2SDZ : sse12_cvt_sint_3addr<0x2A, GR32, VR128X,
2977 int_x86_sse2_cvtsi2sd, i32mem, loadi32, "cvtsi2sd{l}",
2978 SSE_CVT_Scalar, 0>, XD, EVEX_4V;
2979 defm Int_VCVTSI2SD64Z : sse12_cvt_sint_3addr<0x2A, GR64, VR128X,
2980 int_x86_sse2_cvtsi642sd, i64mem, loadi64, "cvtsi2sd{q}",
2981 SSE_CVT_Scalar, 0>, XD, EVEX_4V, VEX_W;
2983 defm Int_VCVTUSI2SSZ : sse12_cvt_sint_3addr<0x2A, GR32, VR128X,
2984 int_x86_avx512_cvtusi2ss, i32mem, loadi32, "cvtusi2ss{l}",
2985 SSE_CVT_Scalar, 0>, XS, EVEX_4V;
2986 defm Int_VCVTUSI2SS64Z : sse12_cvt_sint_3addr<0x2A, GR64, VR128X,
2987 int_x86_avx512_cvtusi642ss, i64mem, loadi64, "cvtusi2ss{q}",
2988 SSE_CVT_Scalar, 0>, XS, EVEX_4V, VEX_W;
2989 defm Int_VCVTUSI2SDZ : sse12_cvt_sint_3addr<0x2A, GR32, VR128X,
2990 int_x86_avx512_cvtusi2sd, i32mem, loadi32, "cvtusi2sd{l}",
2991 SSE_CVT_Scalar, 0>, XD, EVEX_4V;
2992 defm Int_VCVTUSI2SD64Z : sse12_cvt_sint_3addr<0x2A, GR64, VR128X,
2993 int_x86_avx512_cvtusi642sd, i64mem, loadi64, "cvtusi2sd{q}",
2994 SSE_CVT_Scalar, 0>, XD, EVEX_4V, VEX_W;
2995 } // isCodeGenOnly = 1
2997 // Convert float/double to signed/unsigned int 32/64 with truncation
2998 let isCodeGenOnly = 1 in {
2999 defm Int_VCVTTSS2SIZ : avx512_cvt_s_int<0x2C, VR128X, GR32, int_x86_sse_cvttss2si,
3000 ssmem, sse_load_f32, "cvttss2si">,
3001 XS, EVEX_CD8<32, CD8VT1>;
3002 defm Int_VCVTTSS2SI64Z : avx512_cvt_s_int<0x2C, VR128X, GR64,
3003 int_x86_sse_cvttss2si64, ssmem, sse_load_f32,
3004 "cvttss2si">, XS, VEX_W,
3005 EVEX_CD8<32, CD8VT1>;
3006 defm Int_VCVTTSD2SIZ : avx512_cvt_s_int<0x2C, VR128X, GR32, int_x86_sse2_cvttsd2si,
3007 sdmem, sse_load_f64, "cvttsd2si">, XD,
3008 EVEX_CD8<64, CD8VT1>;
3009 defm Int_VCVTTSD2SI64Z : avx512_cvt_s_int<0x2C, VR128X, GR64,
3010 int_x86_sse2_cvttsd2si64, sdmem, sse_load_f64,
3011 "cvttsd2si">, XD, VEX_W,
3012 EVEX_CD8<64, CD8VT1>;
3013 defm Int_VCVTTSS2USIZ : avx512_cvt_s_int<0x78, VR128X, GR32,
3014 int_x86_avx512_cvttss2usi, ssmem, sse_load_f32,
3015 "cvttss2usi">, XS, EVEX_CD8<32, CD8VT1>;
3016 defm Int_VCVTTSS2USI64Z : avx512_cvt_s_int<0x78, VR128X, GR64,
3017 int_x86_avx512_cvttss2usi64, ssmem,
3018 sse_load_f32, "cvttss2usi">, XS, VEX_W,
3019 EVEX_CD8<32, CD8VT1>;
3020 defm Int_VCVTTSD2USIZ : avx512_cvt_s_int<0x78, VR128X, GR32,
3021 int_x86_avx512_cvttsd2usi,
3022 sdmem, sse_load_f64, "cvttsd2usi">, XD,
3023 EVEX_CD8<64, CD8VT1>;
3024 defm Int_VCVTTSD2USI64Z : avx512_cvt_s_int<0x78, VR128X, GR64,
3025 int_x86_avx512_cvttsd2usi64, sdmem,
3026 sse_load_f64, "cvttsd2usi">, XD, VEX_W,
3027 EVEX_CD8<64, CD8VT1>;
3028 } // isCodeGenOnly = 1
3030 multiclass avx512_cvt_s<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
3031 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
3033 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
3034 !strconcat(asm," \t{$src, $dst|$dst, $src}"),
3035 [(set DstRC:$dst, (OpNode SrcRC:$src))]>, EVEX;
3036 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
3037 !strconcat(asm," \t{$src, $dst|$dst, $src}"),
3038 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))]>, EVEX;
3041 defm VCVTTSS2SIZ : avx512_cvt_s<0x2C, FR32X, GR32, fp_to_sint, f32mem,
3042 loadf32, "cvttss2si">, XS,
3043 EVEX_CD8<32, CD8VT1>;
3044 defm VCVTTSS2USIZ : avx512_cvt_s<0x78, FR32X, GR32, fp_to_uint, f32mem,
3045 loadf32, "cvttss2usi">, XS,
3046 EVEX_CD8<32, CD8VT1>;
3047 defm VCVTTSS2SI64Z : avx512_cvt_s<0x2C, FR32X, GR64, fp_to_sint, f32mem,
3048 loadf32, "cvttss2si">, XS, VEX_W,
3049 EVEX_CD8<32, CD8VT1>;
3050 defm VCVTTSS2USI64Z : avx512_cvt_s<0x78, FR32X, GR64, fp_to_uint, f32mem,
3051 loadf32, "cvttss2usi">, XS, VEX_W,
3052 EVEX_CD8<32, CD8VT1>;
3053 defm VCVTTSD2SIZ : avx512_cvt_s<0x2C, FR64X, GR32, fp_to_sint, f64mem,
3054 loadf64, "cvttsd2si">, XD,
3055 EVEX_CD8<64, CD8VT1>;
3056 defm VCVTTSD2USIZ : avx512_cvt_s<0x78, FR64X, GR32, fp_to_uint, f64mem,
3057 loadf64, "cvttsd2usi">, XD,
3058 EVEX_CD8<64, CD8VT1>;
3059 defm VCVTTSD2SI64Z : avx512_cvt_s<0x2C, FR64X, GR64, fp_to_sint, f64mem,
3060 loadf64, "cvttsd2si">, XD, VEX_W,
3061 EVEX_CD8<64, CD8VT1>;
3062 defm VCVTTSD2USI64Z : avx512_cvt_s<0x78, FR64X, GR64, fp_to_uint, f64mem,
3063 loadf64, "cvttsd2usi">, XD, VEX_W,
3064 EVEX_CD8<64, CD8VT1>;
3066 //===----------------------------------------------------------------------===//
3067 // AVX-512 Convert form float to double and back
3068 //===----------------------------------------------------------------------===//
3069 let hasSideEffects = 0 in {
3070 def VCVTSS2SDZrr : AVX512XSI<0x5A, MRMSrcReg, (outs FR64X:$dst),
3071 (ins FR32X:$src1, FR32X:$src2),
3072 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3073 []>, EVEX_4V, VEX_LIG, Sched<[WriteCvtF2F]>;
3075 def VCVTSS2SDZrm : AVX512XSI<0x5A, MRMSrcMem, (outs FR64X:$dst),
3076 (ins FR32X:$src1, f32mem:$src2),
3077 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3078 []>, EVEX_4V, VEX_LIG, Sched<[WriteCvtF2FLd, ReadAfterLd]>,
3079 EVEX_CD8<32, CD8VT1>;
3081 // Convert scalar double to scalar single
3082 def VCVTSD2SSZrr : AVX512XDI<0x5A, MRMSrcReg, (outs FR32X:$dst),
3083 (ins FR64X:$src1, FR64X:$src2),
3084 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3085 []>, EVEX_4V, VEX_LIG, VEX_W, Sched<[WriteCvtF2F]>;
3087 def VCVTSD2SSZrm : AVX512XDI<0x5A, MRMSrcMem, (outs FR32X:$dst),
3088 (ins FR64X:$src1, f64mem:$src2),
3089 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3090 []>, EVEX_4V, VEX_LIG, VEX_W,
3091 Sched<[WriteCvtF2FLd, ReadAfterLd]>, EVEX_CD8<64, CD8VT1>;
3094 def : Pat<(f64 (fextend FR32X:$src)), (VCVTSS2SDZrr FR32X:$src, FR32X:$src)>,
3095 Requires<[HasAVX512]>;
3096 def : Pat<(fextend (loadf32 addr:$src)),
3097 (VCVTSS2SDZrm (f32 (IMPLICIT_DEF)), addr:$src)>, Requires<[HasAVX512]>;
3099 def : Pat<(extloadf32 addr:$src),
3100 (VCVTSS2SDZrm (f32 (IMPLICIT_DEF)), addr:$src)>,
3101 Requires<[HasAVX512, OptForSize]>;
3103 def : Pat<(extloadf32 addr:$src),
3104 (VCVTSS2SDZrr (f32 (IMPLICIT_DEF)), (VMOVSSZrm addr:$src))>,
3105 Requires<[HasAVX512, OptForSpeed]>;
3107 def : Pat<(f32 (fround FR64X:$src)), (VCVTSD2SSZrr FR64X:$src, FR64X:$src)>,
3108 Requires<[HasAVX512]>;
3110 multiclass avx512_vcvt_fp_with_rc<bits<8> opc, string asm, RegisterClass SrcRC,
3111 RegisterClass DstRC, SDNode OpNode, PatFrag mem_frag,
3112 X86MemOperand x86memop, ValueType OpVT, ValueType InVT,
3114 let hasSideEffects = 0 in {
3115 def rr : AVX512PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
3116 !strconcat(asm," \t{$src, $dst|$dst, $src}"),
3118 (OpVT (OpNode (InVT SrcRC:$src))))], d>, EVEX;
3119 def rrb : AVX512PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src, AVX512RC:$rc),
3120 !strconcat(asm," \t{$rc, $src, $dst|$dst, $src, $rc}"),
3121 [], d>, EVEX, EVEX_B, EVEX_RC;
3123 def rm : AVX512PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
3124 !strconcat(asm," \t{$src, $dst|$dst, $src}"),
3126 (OpVT (OpNode (InVT (bitconvert (mem_frag addr:$src))))))], d>, EVEX;
3127 } // hasSideEffects = 0
3130 multiclass avx512_vcvt_fp<bits<8> opc, string asm, RegisterClass SrcRC,
3131 RegisterClass DstRC, SDNode OpNode, PatFrag mem_frag,
3132 X86MemOperand x86memop, ValueType OpVT, ValueType InVT,
3134 let hasSideEffects = 0 in {
3135 def rr : AVX512PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
3136 !strconcat(asm," \t{$src, $dst|$dst, $src}"),
3138 (OpVT (OpNode (InVT SrcRC:$src))))], d>, EVEX;
3140 def rm : AVX512PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
3141 !strconcat(asm," \t{$src, $dst|$dst, $src}"),
3143 (OpVT (OpNode (InVT (bitconvert (mem_frag addr:$src))))))], d>, EVEX;
3144 } // hasSideEffects = 0
3147 defm VCVTPD2PSZ : avx512_vcvt_fp_with_rc<0x5A, "vcvtpd2ps", VR512, VR256X, fround,
3148 memopv8f64, f512mem, v8f32, v8f64,
3149 SSEPackedSingle>, EVEX_V512, VEX_W, PD,
3150 EVEX_CD8<64, CD8VF>;
3152 defm VCVTPS2PDZ : avx512_vcvt_fp<0x5A, "vcvtps2pd", VR256X, VR512, fextend,
3153 memopv4f64, f256mem, v8f64, v8f32,
3154 SSEPackedDouble>, EVEX_V512, PS,
3155 EVEX_CD8<32, CD8VH>;
3156 def : Pat<(v8f64 (extloadv8f32 addr:$src)),
3157 (VCVTPS2PDZrm addr:$src)>;
3159 def : Pat<(v8f32 (int_x86_avx512_mask_cvtpd2ps_512 (v8f64 VR512:$src),
3160 (bc_v8f32(v8i32 immAllZerosV)), (i8 -1), (i32 FROUND_CURRENT))),
3161 (VCVTPD2PSZrr VR512:$src)>;
3163 def : Pat<(v8f32 (int_x86_avx512_mask_cvtpd2ps_512 (v8f64 VR512:$src),
3164 (bc_v8f32(v8i32 immAllZerosV)), (i8 -1), imm:$rc)),
3165 (VCVTPD2PSZrrb VR512:$src, imm:$rc)>;
3167 //===----------------------------------------------------------------------===//
3168 // AVX-512 Vector convert from sign integer to float/double
3169 //===----------------------------------------------------------------------===//
3171 defm VCVTDQ2PSZ : avx512_vcvt_fp_with_rc<0x5B, "vcvtdq2ps", VR512, VR512, sint_to_fp,
3172 memopv8i64, i512mem, v16f32, v16i32,
3173 SSEPackedSingle>, EVEX_V512, PS,
3174 EVEX_CD8<32, CD8VF>;
3176 defm VCVTDQ2PDZ : avx512_vcvt_fp<0xE6, "vcvtdq2pd", VR256X, VR512, sint_to_fp,
3177 memopv4i64, i256mem, v8f64, v8i32,
3178 SSEPackedDouble>, EVEX_V512, XS,
3179 EVEX_CD8<32, CD8VH>;
3181 defm VCVTTPS2DQZ : avx512_vcvt_fp<0x5B, "vcvttps2dq", VR512, VR512, fp_to_sint,
3182 memopv16f32, f512mem, v16i32, v16f32,
3183 SSEPackedSingle>, EVEX_V512, XS,
3184 EVEX_CD8<32, CD8VF>;
3186 defm VCVTTPD2DQZ : avx512_vcvt_fp<0xE6, "vcvttpd2dq", VR512, VR256X, fp_to_sint,
3187 memopv8f64, f512mem, v8i32, v8f64,
3188 SSEPackedDouble>, EVEX_V512, PD, VEX_W,
3189 EVEX_CD8<64, CD8VF>;
3191 defm VCVTTPS2UDQZ : avx512_vcvt_fp<0x78, "vcvttps2udq", VR512, VR512, fp_to_uint,
3192 memopv16f32, f512mem, v16i32, v16f32,
3193 SSEPackedSingle>, EVEX_V512, PS,
3194 EVEX_CD8<32, CD8VF>;
3196 // cvttps2udq (src, 0, mask-all-ones, sae-current)
3197 def : Pat<(v16i32 (int_x86_avx512_mask_cvttps2udq_512 (v16f32 VR512:$src),
3198 (v16i32 immAllZerosV), (i16 -1), FROUND_CURRENT)),
3199 (VCVTTPS2UDQZrr VR512:$src)>;
3201 defm VCVTTPD2UDQZ : avx512_vcvt_fp<0x78, "vcvttpd2udq", VR512, VR256X, fp_to_uint,
3202 memopv8f64, f512mem, v8i32, v8f64,
3203 SSEPackedDouble>, EVEX_V512, PS, VEX_W,
3204 EVEX_CD8<64, CD8VF>;
3206 // cvttpd2udq (src, 0, mask-all-ones, sae-current)
3207 def : Pat<(v8i32 (int_x86_avx512_mask_cvttpd2udq_512 (v8f64 VR512:$src),
3208 (v8i32 immAllZerosV), (i8 -1), FROUND_CURRENT)),
3209 (VCVTTPD2UDQZrr VR512:$src)>;
3211 defm VCVTUDQ2PDZ : avx512_vcvt_fp<0x7A, "vcvtudq2pd", VR256X, VR512, uint_to_fp,
3212 memopv4i64, f256mem, v8f64, v8i32,
3213 SSEPackedDouble>, EVEX_V512, XS,
3214 EVEX_CD8<32, CD8VH>;
3216 defm VCVTUDQ2PSZ : avx512_vcvt_fp_with_rc<0x7A, "vcvtudq2ps", VR512, VR512, uint_to_fp,
3217 memopv16i32, f512mem, v16f32, v16i32,
3218 SSEPackedSingle>, EVEX_V512, XD,
3219 EVEX_CD8<32, CD8VF>;
3221 def : Pat<(v8i32 (fp_to_uint (v8f32 VR256X:$src1))),
3222 (EXTRACT_SUBREG (v16i32 (VCVTTPS2UDQZrr
3223 (v16f32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)))), sub_ymm)>;
3225 def : Pat<(v4i32 (fp_to_uint (v4f32 VR128X:$src1))),
3226 (EXTRACT_SUBREG (v16i32 (VCVTTPS2UDQZrr
3227 (v16f32 (SUBREG_TO_REG (i32 0), VR128X:$src1, sub_xmm)))), sub_xmm)>;
3229 def : Pat<(v8f32 (uint_to_fp (v8i32 VR256X:$src1))),
3230 (EXTRACT_SUBREG (v16f32 (VCVTUDQ2PSZrr
3231 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)))), sub_ymm)>;
3233 def : Pat<(v4f32 (uint_to_fp (v4i32 VR128X:$src1))),
3234 (EXTRACT_SUBREG (v16f32 (VCVTUDQ2PSZrr
3235 (v16i32 (SUBREG_TO_REG (i32 0), VR128X:$src1, sub_xmm)))), sub_xmm)>;
3237 def : Pat<(v4f64 (uint_to_fp (v4i32 VR128X:$src1))),
3238 (EXTRACT_SUBREG (v8f64 (VCVTUDQ2PDZrr
3239 (v8i32 (SUBREG_TO_REG (i32 0), VR128X:$src1, sub_xmm)))), sub_ymm)>;
3241 def : Pat<(v16f32 (int_x86_avx512_mask_cvtdq2ps_512 (v16i32 VR512:$src),
3242 (bc_v16f32 (v16i32 immAllZerosV)), (i16 -1), imm:$rc)),
3243 (VCVTDQ2PSZrrb VR512:$src, imm:$rc)>;
3244 def : Pat<(v8f64 (int_x86_avx512_mask_cvtdq2pd_512 (v8i32 VR256X:$src),
3245 (bc_v8f64 (v16i32 immAllZerosV)), (i8 -1))),
3246 (VCVTDQ2PDZrr VR256X:$src)>;
3247 def : Pat<(v16f32 (int_x86_avx512_mask_cvtudq2ps_512 (v16i32 VR512:$src),
3248 (bc_v16f32 (v16i32 immAllZerosV)), (i16 -1), imm:$rc)),
3249 (VCVTUDQ2PSZrrb VR512:$src, imm:$rc)>;
3250 def : Pat<(v8f64 (int_x86_avx512_mask_cvtudq2pd_512 (v8i32 VR256X:$src),
3251 (bc_v8f64 (v16i32 immAllZerosV)), (i8 -1))),
3252 (VCVTUDQ2PDZrr VR256X:$src)>;
3254 multiclass avx512_vcvt_fp2int<bits<8> opc, string asm, RegisterClass SrcRC,
3255 RegisterClass DstRC, PatFrag mem_frag,
3256 X86MemOperand x86memop, Domain d> {
3257 let hasSideEffects = 0 in {
3258 def rr : AVX512PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
3259 !strconcat(asm," \t{$src, $dst|$dst, $src}"),
3261 def rrb : AVX512PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src, AVX512RC:$rc),
3262 !strconcat(asm," \t{$rc, $src, $dst|$dst, $src, $rc}"),
3263 [], d>, EVEX, EVEX_B, EVEX_RC;
3265 def rm : AVX512PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
3266 !strconcat(asm," \t{$src, $dst|$dst, $src}"),
3268 } // hasSideEffects = 0
3271 defm VCVTPS2DQZ : avx512_vcvt_fp2int<0x5B, "vcvtps2dq", VR512, VR512,
3272 memopv16f32, f512mem, SSEPackedSingle>, PD,
3273 EVEX_V512, EVEX_CD8<32, CD8VF>;
3274 defm VCVTPD2DQZ : avx512_vcvt_fp2int<0xE6, "vcvtpd2dq", VR512, VR256X,
3275 memopv8f64, f512mem, SSEPackedDouble>, XD, VEX_W,
3276 EVEX_V512, EVEX_CD8<64, CD8VF>;
3278 def : Pat <(v16i32 (int_x86_avx512_mask_cvtps2dq_512 (v16f32 VR512:$src),
3279 (v16i32 immAllZerosV), (i16 -1), imm:$rc)),
3280 (VCVTPS2DQZrrb VR512:$src, imm:$rc)>;
3282 def : Pat <(v8i32 (int_x86_avx512_mask_cvtpd2dq_512 (v8f64 VR512:$src),
3283 (v8i32 immAllZerosV), (i8 -1), imm:$rc)),
3284 (VCVTPD2DQZrrb VR512:$src, imm:$rc)>;
3286 defm VCVTPS2UDQZ : avx512_vcvt_fp2int<0x79, "vcvtps2udq", VR512, VR512,
3287 memopv16f32, f512mem, SSEPackedSingle>,
3288 PS, EVEX_V512, EVEX_CD8<32, CD8VF>;
3289 defm VCVTPD2UDQZ : avx512_vcvt_fp2int<0x79, "vcvtpd2udq", VR512, VR256X,
3290 memopv8f64, f512mem, SSEPackedDouble>, VEX_W,
3291 PS, EVEX_V512, EVEX_CD8<64, CD8VF>;
3293 def : Pat <(v16i32 (int_x86_avx512_mask_cvtps2udq_512 (v16f32 VR512:$src),
3294 (v16i32 immAllZerosV), (i16 -1), imm:$rc)),
3295 (VCVTPS2UDQZrrb VR512:$src, imm:$rc)>;
3297 def : Pat <(v8i32 (int_x86_avx512_mask_cvtpd2udq_512 (v8f64 VR512:$src),
3298 (v8i32 immAllZerosV), (i8 -1), imm:$rc)),
3299 (VCVTPD2UDQZrrb VR512:$src, imm:$rc)>;
3301 let Predicates = [HasAVX512] in {
3302 def : Pat<(v8f32 (fround (loadv8f64 addr:$src))),
3303 (VCVTPD2PSZrm addr:$src)>;
3304 def : Pat<(v8f64 (extloadv8f32 addr:$src)),
3305 (VCVTPS2PDZrm addr:$src)>;
3308 //===----------------------------------------------------------------------===//
3309 // Half precision conversion instructions
3310 //===----------------------------------------------------------------------===//
3311 multiclass avx512_cvtph2ps<RegisterClass destRC, RegisterClass srcRC,
3312 X86MemOperand x86memop> {
3313 def rr : AVX5128I<0x13, MRMSrcReg, (outs destRC:$dst), (ins srcRC:$src),
3314 "vcvtph2ps\t{$src, $dst|$dst, $src}",
3316 let hasSideEffects = 0, mayLoad = 1 in
3317 def rm : AVX5128I<0x13, MRMSrcMem, (outs destRC:$dst), (ins x86memop:$src),
3318 "vcvtph2ps\t{$src, $dst|$dst, $src}", []>, EVEX;
3321 multiclass avx512_cvtps2ph<RegisterClass destRC, RegisterClass srcRC,
3322 X86MemOperand x86memop> {
3323 def rr : AVX512AIi8<0x1D, MRMDestReg, (outs destRC:$dst),
3324 (ins srcRC:$src1, i32i8imm:$src2),
3325 "vcvtps2ph \t{$src2, $src1, $dst|$dst, $src1, $src2}",
3327 let hasSideEffects = 0, mayStore = 1 in
3328 def mr : AVX512AIi8<0x1D, MRMDestMem, (outs),
3329 (ins x86memop:$dst, srcRC:$src1, i32i8imm:$src2),
3330 "vcvtps2ph \t{$src2, $src1, $dst|$dst, $src1, $src2}", []>, EVEX;
3333 defm VCVTPH2PSZ : avx512_cvtph2ps<VR512, VR256X, f256mem>, EVEX_V512,
3334 EVEX_CD8<32, CD8VH>;
3335 defm VCVTPS2PHZ : avx512_cvtps2ph<VR256X, VR512, f256mem>, EVEX_V512,
3336 EVEX_CD8<32, CD8VH>;
3338 def : Pat<(v16i16 (int_x86_avx512_mask_vcvtps2ph_512 (v16f32 VR512:$src),
3339 imm:$rc, (bc_v16i16(v8i32 immAllZerosV)), (i16 -1))),
3340 (VCVTPS2PHZrr VR512:$src, imm:$rc)>;
3342 def : Pat<(v16f32 (int_x86_avx512_mask_vcvtph2ps_512 (v16i16 VR256X:$src),
3343 (bc_v16f32(v16i32 immAllZerosV)), (i16 -1), (i32 FROUND_CURRENT))),
3344 (VCVTPH2PSZrr VR256X:$src)>;
3346 let Defs = [EFLAGS], Predicates = [HasAVX512] in {
3347 defm VUCOMISSZ : sse12_ord_cmp<0x2E, FR32X, X86cmp, f32, f32mem, loadf32,
3348 "ucomiss">, PS, EVEX, VEX_LIG,
3349 EVEX_CD8<32, CD8VT1>;
3350 defm VUCOMISDZ : sse12_ord_cmp<0x2E, FR64X, X86cmp, f64, f64mem, loadf64,
3351 "ucomisd">, PD, EVEX,
3352 VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
3353 let Pattern = []<dag> in {
3354 defm VCOMISSZ : sse12_ord_cmp<0x2F, VR128X, undef, v4f32, f128mem, load,
3355 "comiss">, PS, EVEX, VEX_LIG,
3356 EVEX_CD8<32, CD8VT1>;
3357 defm VCOMISDZ : sse12_ord_cmp<0x2F, VR128X, undef, v2f64, f128mem, load,
3358 "comisd">, PD, EVEX,
3359 VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
3361 let isCodeGenOnly = 1 in {
3362 defm Int_VUCOMISSZ : sse12_ord_cmp<0x2E, VR128X, X86ucomi, v4f32, f128mem,
3363 load, "ucomiss">, PS, EVEX, VEX_LIG,
3364 EVEX_CD8<32, CD8VT1>;
3365 defm Int_VUCOMISDZ : sse12_ord_cmp<0x2E, VR128X, X86ucomi, v2f64, f128mem,
3366 load, "ucomisd">, PD, EVEX,
3367 VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
3369 defm Int_VCOMISSZ : sse12_ord_cmp<0x2F, VR128X, X86comi, v4f32, f128mem,
3370 load, "comiss">, PS, EVEX, VEX_LIG,
3371 EVEX_CD8<32, CD8VT1>;
3372 defm Int_VCOMISDZ : sse12_ord_cmp<0x2F, VR128X, X86comi, v2f64, f128mem,
3373 load, "comisd">, PD, EVEX,
3374 VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
3378 /// avx512_fp14_s rcp14ss, rcp14sd, rsqrt14ss, rsqrt14sd
3379 multiclass avx512_fp14_s<bits<8> opc, string OpcodeStr, RegisterClass RC,
3380 X86MemOperand x86memop> {
3381 let hasSideEffects = 0 in {
3382 def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
3383 (ins RC:$src1, RC:$src2),
3384 !strconcat(OpcodeStr,
3385 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>, EVEX_4V;
3386 let mayLoad = 1 in {
3387 def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
3388 (ins RC:$src1, x86memop:$src2),
3389 !strconcat(OpcodeStr,
3390 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>, EVEX_4V;
3395 defm VRCP14SS : avx512_fp14_s<0x4D, "vrcp14ss", FR32X, f32mem>,
3396 EVEX_CD8<32, CD8VT1>;
3397 defm VRCP14SD : avx512_fp14_s<0x4D, "vrcp14sd", FR64X, f64mem>,
3398 VEX_W, EVEX_CD8<64, CD8VT1>;
3399 defm VRSQRT14SS : avx512_fp14_s<0x4F, "vrsqrt14ss", FR32X, f32mem>,
3400 EVEX_CD8<32, CD8VT1>;
3401 defm VRSQRT14SD : avx512_fp14_s<0x4F, "vrsqrt14sd", FR64X, f64mem>,
3402 VEX_W, EVEX_CD8<64, CD8VT1>;
3404 def : Pat <(v4f32 (int_x86_avx512_rcp14_ss (v4f32 VR128X:$src1),
3405 (v4f32 VR128X:$src2), (bc_v4f32 (v4i32 immAllZerosV)), (i8 -1))),
3406 (COPY_TO_REGCLASS (VRCP14SSrr (COPY_TO_REGCLASS VR128X:$src1, FR32X),
3407 (COPY_TO_REGCLASS VR128X:$src2, FR32X)), VR128X)>;
3409 def : Pat <(v2f64 (int_x86_avx512_rcp14_sd (v2f64 VR128X:$src1),
3410 (v2f64 VR128X:$src2), (bc_v2f64 (v4i32 immAllZerosV)), (i8 -1))),
3411 (COPY_TO_REGCLASS (VRCP14SDrr (COPY_TO_REGCLASS VR128X:$src1, FR64X),
3412 (COPY_TO_REGCLASS VR128X:$src2, FR64X)), VR128X)>;
3414 def : Pat <(v4f32 (int_x86_avx512_rsqrt14_ss (v4f32 VR128X:$src1),
3415 (v4f32 VR128X:$src2), (bc_v4f32 (v4i32 immAllZerosV)), (i8 -1))),
3416 (COPY_TO_REGCLASS (VRSQRT14SSrr (COPY_TO_REGCLASS VR128X:$src1, FR32X),
3417 (COPY_TO_REGCLASS VR128X:$src2, FR32X)), VR128X)>;
3419 def : Pat <(v2f64 (int_x86_avx512_rsqrt14_sd (v2f64 VR128X:$src1),
3420 (v2f64 VR128X:$src2), (bc_v2f64 (v4i32 immAllZerosV)), (i8 -1))),
3421 (COPY_TO_REGCLASS (VRSQRT14SDrr (COPY_TO_REGCLASS VR128X:$src1, FR64X),
3422 (COPY_TO_REGCLASS VR128X:$src2, FR64X)), VR128X)>;
3424 /// avx512_fp14_p rcp14ps, rcp14pd, rsqrt14ps, rsqrt14pd
3425 multiclass avx512_fp14_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
3426 RegisterClass RC, X86MemOperand x86memop,
3427 PatFrag mem_frag, ValueType OpVt> {
3428 def r : AVX5128I<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
3429 !strconcat(OpcodeStr,
3430 " \t{$src, $dst|$dst, $src}"),
3431 [(set RC:$dst, (OpVt (OpNode RC:$src)))]>,
3433 def m : AVX5128I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
3434 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
3435 [(set RC:$dst, (OpVt (OpNode (mem_frag addr:$src))))]>,
3438 defm VRSQRT14PSZ : avx512_fp14_p<0x4E, "vrsqrt14ps", X86frsqrt, VR512, f512mem,
3439 memopv16f32, v16f32>, EVEX_V512, EVEX_CD8<32, CD8VF>;
3440 defm VRSQRT14PDZ : avx512_fp14_p<0x4E, "vrsqrt14pd", X86frsqrt, VR512, f512mem,
3441 memopv8f64, v8f64>, VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
3442 defm VRCP14PSZ : avx512_fp14_p<0x4C, "vrcp14ps", X86frcp, VR512, f512mem,
3443 memopv16f32, v16f32>, EVEX_V512, EVEX_CD8<32, CD8VF>;
3444 defm VRCP14PDZ : avx512_fp14_p<0x4C, "vrcp14pd", X86frcp, VR512, f512mem,
3445 memopv8f64, v8f64>, VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
3447 def : Pat <(v16f32 (int_x86_avx512_rsqrt14_ps_512 (v16f32 VR512:$src),
3448 (bc_v16f32 (v16i32 immAllZerosV)), (i16 -1))),
3449 (VRSQRT14PSZr VR512:$src)>;
3450 def : Pat <(v8f64 (int_x86_avx512_rsqrt14_pd_512 (v8f64 VR512:$src),
3451 (bc_v8f64 (v16i32 immAllZerosV)), (i8 -1))),
3452 (VRSQRT14PDZr VR512:$src)>;
3454 def : Pat <(v16f32 (int_x86_avx512_rcp14_ps_512 (v16f32 VR512:$src),
3455 (bc_v16f32 (v16i32 immAllZerosV)), (i16 -1))),
3456 (VRCP14PSZr VR512:$src)>;
3457 def : Pat <(v8f64 (int_x86_avx512_rcp14_pd_512 (v8f64 VR512:$src),
3458 (bc_v8f64 (v16i32 immAllZerosV)), (i8 -1))),
3459 (VRCP14PDZr VR512:$src)>;
3461 /// avx512_fp28_s rcp28ss, rcp28sd, rsqrt28ss, rsqrt28sd
3462 multiclass avx512_fp28_s<bits<8> opc, string OpcodeStr, RegisterClass RC,
3463 X86MemOperand x86memop> {
3464 let hasSideEffects = 0, Predicates = [HasERI] in {
3465 def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
3466 (ins RC:$src1, RC:$src2),
3467 !strconcat(OpcodeStr,
3468 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>, EVEX_4V;
3469 def rrb : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
3470 (ins RC:$src1, RC:$src2),
3471 !strconcat(OpcodeStr,
3472 " \t{{sae}, $src2, $src1, $dst|$dst, $src1, $src2, {sae}}"),
3473 []>, EVEX_4V, EVEX_B;
3474 let mayLoad = 1 in {
3475 def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
3476 (ins RC:$src1, x86memop:$src2),
3477 !strconcat(OpcodeStr,
3478 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>, EVEX_4V;
3483 defm VRCP28SS : avx512_fp28_s<0xCB, "vrcp28ss", FR32X, f32mem>,
3484 EVEX_CD8<32, CD8VT1>;
3485 defm VRCP28SD : avx512_fp28_s<0xCB, "vrcp28sd", FR64X, f64mem>,
3486 VEX_W, EVEX_CD8<64, CD8VT1>;
3487 defm VRSQRT28SS : avx512_fp28_s<0xCD, "vrsqrt28ss", FR32X, f32mem>,
3488 EVEX_CD8<32, CD8VT1>;
3489 defm VRSQRT28SD : avx512_fp28_s<0xCD, "vrsqrt28sd", FR64X, f64mem>,
3490 VEX_W, EVEX_CD8<64, CD8VT1>;
3492 def : Pat <(v4f32 (int_x86_avx512_rcp28_ss (v4f32 VR128X:$src1),
3493 (v4f32 VR128X:$src2), (bc_v4f32 (v4i32 immAllZerosV)), (i8 -1),
3495 (COPY_TO_REGCLASS (VRCP28SSrrb (COPY_TO_REGCLASS VR128X:$src1, FR32X),
3496 (COPY_TO_REGCLASS VR128X:$src2, FR32X)), VR128X)>;
3498 def : Pat <(v2f64 (int_x86_avx512_rcp28_sd (v2f64 VR128X:$src1),
3499 (v2f64 VR128X:$src2), (bc_v2f64 (v4i32 immAllZerosV)), (i8 -1),
3501 (COPY_TO_REGCLASS (VRCP28SDrrb (COPY_TO_REGCLASS VR128X:$src1, FR64X),
3502 (COPY_TO_REGCLASS VR128X:$src2, FR64X)), VR128X)>;
3504 def : Pat <(v4f32 (int_x86_avx512_rsqrt28_ss (v4f32 VR128X:$src1),
3505 (v4f32 VR128X:$src2), (bc_v4f32 (v4i32 immAllZerosV)), (i8 -1),
3507 (COPY_TO_REGCLASS (VRSQRT28SSrrb (COPY_TO_REGCLASS VR128X:$src1, FR32X),
3508 (COPY_TO_REGCLASS VR128X:$src2, FR32X)), VR128X)>;
3510 def : Pat <(v2f64 (int_x86_avx512_rsqrt28_sd (v2f64 VR128X:$src1),
3511 (v2f64 VR128X:$src2), (bc_v2f64 (v4i32 immAllZerosV)), (i8 -1),
3513 (COPY_TO_REGCLASS (VRSQRT28SDrrb (COPY_TO_REGCLASS VR128X:$src1, FR64X),
3514 (COPY_TO_REGCLASS VR128X:$src2, FR64X)), VR128X)>;
3516 /// avx512_fp28_p rcp28ps, rcp28pd, rsqrt28ps, rsqrt28pd
3517 multiclass avx512_fp28_p<bits<8> opc, string OpcodeStr,
3518 RegisterClass RC, X86MemOperand x86memop> {
3519 let hasSideEffects = 0, Predicates = [HasERI] in {
3520 def r : AVX5128I<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
3521 !strconcat(OpcodeStr,
3522 " \t{$src, $dst|$dst, $src}"),
3524 def rb : AVX5128I<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
3525 !strconcat(OpcodeStr,
3526 " \t{{sae}, $src, $dst|$dst, $src, {sae}}"),
3528 def m : AVX5128I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
3529 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
3533 defm VRSQRT28PSZ : avx512_fp28_p<0xCC, "vrsqrt28ps", VR512, f512mem>,
3534 EVEX_V512, EVEX_CD8<32, CD8VF>;
3535 defm VRSQRT28PDZ : avx512_fp28_p<0xCC, "vrsqrt28pd", VR512, f512mem>,
3536 VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
3537 defm VRCP28PSZ : avx512_fp28_p<0xCA, "vrcp28ps", VR512, f512mem>,
3538 EVEX_V512, EVEX_CD8<32, CD8VF>;
3539 defm VRCP28PDZ : avx512_fp28_p<0xCA, "vrcp28pd", VR512, f512mem>,
3540 VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
3542 def : Pat <(v16f32 (int_x86_avx512_rsqrt28_ps (v16f32 VR512:$src),
3543 (bc_v16f32 (v16i32 immAllZerosV)), (i16 -1), FROUND_NO_EXC)),
3544 (VRSQRT28PSZrb VR512:$src)>;
3545 def : Pat <(v8f64 (int_x86_avx512_rsqrt28_pd (v8f64 VR512:$src),
3546 (bc_v8f64 (v16i32 immAllZerosV)), (i8 -1), FROUND_NO_EXC)),
3547 (VRSQRT28PDZrb VR512:$src)>;
3549 def : Pat <(v16f32 (int_x86_avx512_rcp28_ps (v16f32 VR512:$src),
3550 (bc_v16f32 (v16i32 immAllZerosV)), (i16 -1), FROUND_NO_EXC)),
3551 (VRCP28PSZrb VR512:$src)>;
3552 def : Pat <(v8f64 (int_x86_avx512_rcp28_pd (v8f64 VR512:$src),
3553 (bc_v8f64 (v16i32 immAllZerosV)), (i8 -1), FROUND_NO_EXC)),
3554 (VRCP28PDZrb VR512:$src)>;
3556 multiclass avx512_sqrt_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
3557 Intrinsic V16F32Int, Intrinsic V8F64Int,
3558 OpndItins itins_s, OpndItins itins_d> {
3559 def PSZrr :AVX512PSI<opc, MRMSrcReg, (outs VR512:$dst), (ins VR512:$src),
3560 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
3561 [(set VR512:$dst, (v16f32 (OpNode VR512:$src)))], itins_s.rr>,
3565 def PSZrm : AVX512PSI<opc, MRMSrcMem, (outs VR512:$dst), (ins f512mem:$src),
3566 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
3568 (OpNode (v16f32 (bitconvert (memopv16f32 addr:$src)))))],
3569 itins_s.rm>, EVEX, EVEX_V512, EVEX_CD8<32, CD8VF>;
3571 def PDZrr : AVX512PDI<opc, MRMSrcReg, (outs VR512:$dst), (ins VR512:$src),
3572 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
3573 [(set VR512:$dst, (v8f64 (OpNode VR512:$src)))], itins_d.rr>,
3577 def PDZrm : AVX512PDI<opc, MRMSrcMem, (outs VR512:$dst), (ins f512mem:$src),
3578 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
3579 [(set VR512:$dst, (OpNode
3580 (v8f64 (bitconvert (memopv16f32 addr:$src)))))],
3581 itins_d.rm>, EVEX, EVEX_V512, EVEX_CD8<64, CD8VF>;
3583 let isCodeGenOnly = 1 in {
3584 def PSZr_Int : AVX512PSI<opc, MRMSrcReg, (outs VR512:$dst), (ins VR512:$src),
3585 !strconcat(OpcodeStr,
3586 "ps\t{$src, $dst|$dst, $src}"),
3587 [(set VR512:$dst, (V16F32Int VR512:$src))]>,
3589 def PSZm_Int : AVX512PSI<opc, MRMSrcMem, (outs VR512:$dst), (ins f512mem:$src),
3590 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
3592 (V16F32Int (memopv16f32 addr:$src)))]>, EVEX,
3593 EVEX_V512, EVEX_CD8<32, CD8VF>;
3594 def PDZr_Int : AVX512PDI<opc, MRMSrcReg, (outs VR512:$dst), (ins VR512:$src),
3595 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
3596 [(set VR512:$dst, (V8F64Int VR512:$src))]>,
3597 EVEX, EVEX_V512, VEX_W;
3598 def PDZm_Int : AVX512PDI<opc, MRMSrcMem, (outs VR512:$dst), (ins f512mem:$src),
3599 !strconcat(OpcodeStr,
3600 "pd\t{$src, $dst|$dst, $src}"),
3601 [(set VR512:$dst, (V8F64Int (memopv8f64 addr:$src)))]>,
3602 EVEX, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
3603 } // isCodeGenOnly = 1
3606 multiclass avx512_sqrt_scalar<bits<8> opc, string OpcodeStr,
3607 Intrinsic F32Int, Intrinsic F64Int,
3608 OpndItins itins_s, OpndItins itins_d> {
3609 def SSZr : SI<opc, MRMSrcReg, (outs FR32X:$dst),
3610 (ins FR32X:$src1, FR32X:$src2),
3611 !strconcat(OpcodeStr,
3612 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3613 [], itins_s.rr>, XS, EVEX_4V;
3614 let isCodeGenOnly = 1 in
3615 def SSZr_Int : SIi8<opc, MRMSrcReg, (outs VR128X:$dst),
3616 (ins VR128X:$src1, VR128X:$src2),
3617 !strconcat(OpcodeStr,
3618 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3620 (F32Int VR128X:$src1, VR128X:$src2))],
3621 itins_s.rr>, XS, EVEX_4V;
3622 let mayLoad = 1 in {
3623 def SSZm : SI<opc, MRMSrcMem, (outs FR32X:$dst),
3624 (ins FR32X:$src1, f32mem:$src2),
3625 !strconcat(OpcodeStr,
3626 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3627 [], itins_s.rm>, XS, EVEX_4V, EVEX_CD8<32, CD8VT1>;
3628 let isCodeGenOnly = 1 in
3629 def SSZm_Int : SIi8<opc, MRMSrcMem, (outs VR128X:$dst),
3630 (ins VR128X:$src1, ssmem:$src2),
3631 !strconcat(OpcodeStr,
3632 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3634 (F32Int VR128X:$src1, sse_load_f32:$src2))],
3635 itins_s.rm>, XS, EVEX_4V, EVEX_CD8<32, CD8VT1>;
3637 def SDZr : SI<opc, MRMSrcReg, (outs FR64X:$dst),
3638 (ins FR64X:$src1, FR64X:$src2),
3639 !strconcat(OpcodeStr,
3640 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>,
3642 let isCodeGenOnly = 1 in
3643 def SDZr_Int : SIi8<opc, MRMSrcReg, (outs VR128X:$dst),
3644 (ins VR128X:$src1, VR128X:$src2),
3645 !strconcat(OpcodeStr,
3646 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3648 (F64Int VR128X:$src1, VR128X:$src2))],
3649 itins_s.rr>, XD, EVEX_4V, VEX_W;
3650 let mayLoad = 1 in {
3651 def SDZm : SI<opc, MRMSrcMem, (outs FR64X:$dst),
3652 (ins FR64X:$src1, f64mem:$src2),
3653 !strconcat(OpcodeStr,
3654 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>,
3655 XD, EVEX_4V, VEX_W, EVEX_CD8<64, CD8VT1>;
3656 let isCodeGenOnly = 1 in
3657 def SDZm_Int : SIi8<opc, MRMSrcMem, (outs VR128X:$dst),
3658 (ins VR128X:$src1, sdmem:$src2),
3659 !strconcat(OpcodeStr,
3660 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3662 (F64Int VR128X:$src1, sse_load_f64:$src2))]>,
3663 XD, EVEX_4V, VEX_W, EVEX_CD8<64, CD8VT1>;
3668 defm VSQRT : avx512_sqrt_scalar<0x51, "sqrt",
3669 int_x86_avx512_sqrt_ss, int_x86_avx512_sqrt_sd,
3670 SSE_SQRTSS, SSE_SQRTSD>,
3671 avx512_sqrt_packed<0x51, "vsqrt", fsqrt,
3672 int_x86_avx512_sqrt_ps_512, int_x86_avx512_sqrt_pd_512,
3673 SSE_SQRTPS, SSE_SQRTPD>;
3675 let Predicates = [HasAVX512] in {
3676 def : Pat<(f32 (fsqrt FR32X:$src)),
3677 (VSQRTSSZr (f32 (IMPLICIT_DEF)), FR32X:$src)>;
3678 def : Pat<(f32 (fsqrt (load addr:$src))),
3679 (VSQRTSSZm (f32 (IMPLICIT_DEF)), addr:$src)>,
3680 Requires<[OptForSize]>;
3681 def : Pat<(f64 (fsqrt FR64X:$src)),
3682 (VSQRTSDZr (f64 (IMPLICIT_DEF)), FR64X:$src)>;
3683 def : Pat<(f64 (fsqrt (load addr:$src))),
3684 (VSQRTSDZm (f64 (IMPLICIT_DEF)), addr:$src)>,
3685 Requires<[OptForSize]>;
3687 def : Pat<(f32 (X86frsqrt FR32X:$src)),
3688 (VRSQRT14SSrr (f32 (IMPLICIT_DEF)), FR32X:$src)>;
3689 def : Pat<(f32 (X86frsqrt (load addr:$src))),
3690 (VRSQRT14SSrm (f32 (IMPLICIT_DEF)), addr:$src)>,
3691 Requires<[OptForSize]>;
3693 def : Pat<(f32 (X86frcp FR32X:$src)),
3694 (VRCP14SSrr (f32 (IMPLICIT_DEF)), FR32X:$src)>;
3695 def : Pat<(f32 (X86frcp (load addr:$src))),
3696 (VRCP14SSrm (f32 (IMPLICIT_DEF)), addr:$src)>,
3697 Requires<[OptForSize]>;
3699 def : Pat<(int_x86_sse_sqrt_ss VR128X:$src),
3700 (COPY_TO_REGCLASS (VSQRTSSZr (f32 (IMPLICIT_DEF)),
3701 (COPY_TO_REGCLASS VR128X:$src, FR32)),
3703 def : Pat<(int_x86_sse_sqrt_ss sse_load_f32:$src),
3704 (VSQRTSSZm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
3706 def : Pat<(int_x86_sse2_sqrt_sd VR128X:$src),
3707 (COPY_TO_REGCLASS (VSQRTSDZr (f64 (IMPLICIT_DEF)),
3708 (COPY_TO_REGCLASS VR128X:$src, FR64)),
3710 def : Pat<(int_x86_sse2_sqrt_sd sse_load_f64:$src),
3711 (VSQRTSDZm_Int (v2f64 (IMPLICIT_DEF)), sse_load_f64:$src)>;
3715 multiclass avx512_fp_unop_rm<bits<8> opcps, bits<8> opcpd, string OpcodeStr,
3716 X86MemOperand x86memop, RegisterClass RC,
3717 PatFrag mem_frag32, PatFrag mem_frag64,
3718 Intrinsic V4F32Int, Intrinsic V2F64Int,
3720 let ExeDomain = SSEPackedSingle in {
3721 // Intrinsic operation, reg.
3722 // Vector intrinsic operation, reg
3723 def PSr : AVX512AIi8<opcps, MRMSrcReg,
3724 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
3725 !strconcat(OpcodeStr,
3726 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3727 [(set RC:$dst, (V4F32Int RC:$src1, imm:$src2))]>;
3729 // Vector intrinsic operation, mem
3730 def PSm : AVX512AIi8<opcps, MRMSrcMem,
3731 (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
3732 !strconcat(OpcodeStr,
3733 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3735 (V4F32Int (mem_frag32 addr:$src1),imm:$src2))]>,
3736 EVEX_CD8<32, VForm>;
3737 } // ExeDomain = SSEPackedSingle
3739 let ExeDomain = SSEPackedDouble in {
3740 // Vector intrinsic operation, reg
3741 def PDr : AVX512AIi8<opcpd, MRMSrcReg,
3742 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
3743 !strconcat(OpcodeStr,
3744 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3745 [(set RC:$dst, (V2F64Int RC:$src1, imm:$src2))]>;
3747 // Vector intrinsic operation, mem
3748 def PDm : AVX512AIi8<opcpd, MRMSrcMem,
3749 (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
3750 !strconcat(OpcodeStr,
3751 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3753 (V2F64Int (mem_frag64 addr:$src1),imm:$src2))]>,
3754 EVEX_CD8<64, VForm>;
3755 } // ExeDomain = SSEPackedDouble
3758 multiclass avx512_fp_binop_rm<bits<8> opcss, bits<8> opcsd,
3762 let ExeDomain = GenericDomain in {
3764 let hasSideEffects = 0 in
3765 def SSr : AVX512AIi8<opcss, MRMSrcReg,
3766 (outs FR32X:$dst), (ins FR32X:$src1, FR32X:$src2, i32i8imm:$src3),
3767 !strconcat(OpcodeStr,
3768 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
3771 // Intrinsic operation, reg.
3772 let isCodeGenOnly = 1 in
3773 def SSr_Int : AVX512AIi8<opcss, MRMSrcReg,
3774 (outs VR128X:$dst), (ins VR128X:$src1, VR128X:$src2, i32i8imm:$src3),
3775 !strconcat(OpcodeStr,
3776 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
3777 [(set VR128X:$dst, (F32Int VR128X:$src1, VR128X:$src2, imm:$src3))]>;
3779 // Intrinsic operation, mem.
3780 def SSm : AVX512AIi8<opcss, MRMSrcMem, (outs VR128X:$dst),
3781 (ins VR128X:$src1, ssmem:$src2, i32i8imm:$src3),
3782 !strconcat(OpcodeStr,
3783 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
3784 [(set VR128X:$dst, (F32Int VR128X:$src1,
3785 sse_load_f32:$src2, imm:$src3))]>,
3786 EVEX_CD8<32, CD8VT1>;
3789 let hasSideEffects = 0 in
3790 def SDr : AVX512AIi8<opcsd, MRMSrcReg,
3791 (outs FR64X:$dst), (ins FR64X:$src1, FR64X:$src2, i32i8imm:$src3),
3792 !strconcat(OpcodeStr,
3793 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
3796 // Intrinsic operation, reg.
3797 let isCodeGenOnly = 1 in
3798 def SDr_Int : AVX512AIi8<opcsd, MRMSrcReg,
3799 (outs VR128X:$dst), (ins VR128X:$src1, VR128X:$src2, i32i8imm:$src3),
3800 !strconcat(OpcodeStr,
3801 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
3802 [(set VR128X:$dst, (F64Int VR128X:$src1, VR128X:$src2, imm:$src3))]>,
3805 // Intrinsic operation, mem.
3806 def SDm : AVX512AIi8<opcsd, MRMSrcMem,
3807 (outs VR128X:$dst), (ins VR128X:$src1, sdmem:$src2, i32i8imm:$src3),
3808 !strconcat(OpcodeStr,
3809 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
3811 (F64Int VR128X:$src1, sse_load_f64:$src2, imm:$src3))]>,
3812 VEX_W, EVEX_CD8<64, CD8VT1>;
3813 } // ExeDomain = GenericDomain
3816 multiclass avx512_rndscale<bits<8> opc, string OpcodeStr,
3817 X86MemOperand x86memop, RegisterClass RC,
3818 PatFrag mem_frag, Domain d> {
3819 let ExeDomain = d in {
3820 // Intrinsic operation, reg.
3821 // Vector intrinsic operation, reg
3822 def r : AVX512AIi8<opc, MRMSrcReg,
3823 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
3824 !strconcat(OpcodeStr,
3825 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3828 // Vector intrinsic operation, mem
3829 def m : AVX512AIi8<opc, MRMSrcMem,
3830 (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
3831 !strconcat(OpcodeStr,
3832 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3838 defm VRNDSCALEPSZ : avx512_rndscale<0x08, "vrndscaleps", f512mem, VR512,
3839 memopv16f32, SSEPackedSingle>, EVEX_V512,
3840 EVEX_CD8<32, CD8VF>;
3842 def : Pat<(v16f32 (int_x86_avx512_mask_rndscale_ps_512 (v16f32 VR512:$src1),
3843 imm:$src2, (v16f32 VR512:$src1), (i16 -1),
3845 (VRNDSCALEPSZr VR512:$src1, imm:$src2)>;
3848 defm VRNDSCALEPDZ : avx512_rndscale<0x09, "vrndscalepd", f512mem, VR512,
3849 memopv8f64, SSEPackedDouble>, EVEX_V512,
3850 VEX_W, EVEX_CD8<64, CD8VF>;
3852 def : Pat<(v8f64 (int_x86_avx512_mask_rndscale_pd_512 (v8f64 VR512:$src1),
3853 imm:$src2, (v8f64 VR512:$src1), (i8 -1),
3855 (VRNDSCALEPDZr VR512:$src1, imm:$src2)>;
3857 multiclass avx512_rndscale_scalar<bits<8> opc, string OpcodeStr,
3858 Operand x86memop, RegisterClass RC, Domain d> {
3859 let ExeDomain = d in {
3860 def r : AVX512AIi8<opc, MRMSrcReg,
3861 (outs RC:$dst), (ins RC:$src1, RC:$src2, i32i8imm:$src3),
3862 !strconcat(OpcodeStr,
3863 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3866 def m : AVX512AIi8<opc, MRMSrcMem,
3867 (outs RC:$dst), (ins RC:$src1, x86memop:$src2, i32i8imm:$src3),
3868 !strconcat(OpcodeStr,
3869 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3874 defm VRNDSCALESS : avx512_rndscale_scalar<0x0A, "vrndscaless", ssmem, FR32X,
3875 SSEPackedSingle>, EVEX_CD8<32, CD8VT1>;
3877 defm VRNDSCALESD : avx512_rndscale_scalar<0x0B, "vrndscalesd", sdmem, FR64X,
3878 SSEPackedDouble>, EVEX_CD8<64, CD8VT1>;
3880 def : Pat<(ffloor FR32X:$src),
3881 (VRNDSCALESSr (f32 (IMPLICIT_DEF)), FR32X:$src, (i32 0x1))>;
3882 def : Pat<(f64 (ffloor FR64X:$src)),
3883 (VRNDSCALESDr (f64 (IMPLICIT_DEF)), FR64X:$src, (i32 0x1))>;
3884 def : Pat<(f32 (fnearbyint FR32X:$src)),
3885 (VRNDSCALESSr (f32 (IMPLICIT_DEF)), FR32X:$src, (i32 0xC))>;
3886 def : Pat<(f64 (fnearbyint FR64X:$src)),
3887 (VRNDSCALESDr (f64 (IMPLICIT_DEF)), FR64X:$src, (i32 0xC))>;
3888 def : Pat<(f32 (fceil FR32X:$src)),
3889 (VRNDSCALESSr (f32 (IMPLICIT_DEF)), FR32X:$src, (i32 0x2))>;
3890 def : Pat<(f64 (fceil FR64X:$src)),
3891 (VRNDSCALESDr (f64 (IMPLICIT_DEF)), FR64X:$src, (i32 0x2))>;
3892 def : Pat<(f32 (frint FR32X:$src)),
3893 (VRNDSCALESSr (f32 (IMPLICIT_DEF)), FR32X:$src, (i32 0x4))>;
3894 def : Pat<(f64 (frint FR64X:$src)),
3895 (VRNDSCALESDr (f64 (IMPLICIT_DEF)), FR64X:$src, (i32 0x4))>;
3896 def : Pat<(f32 (ftrunc FR32X:$src)),
3897 (VRNDSCALESSr (f32 (IMPLICIT_DEF)), FR32X:$src, (i32 0x3))>;
3898 def : Pat<(f64 (ftrunc FR64X:$src)),
3899 (VRNDSCALESDr (f64 (IMPLICIT_DEF)), FR64X:$src, (i32 0x3))>;
3901 def : Pat<(v16f32 (ffloor VR512:$src)),
3902 (VRNDSCALEPSZr VR512:$src, (i32 0x1))>;
3903 def : Pat<(v16f32 (fnearbyint VR512:$src)),
3904 (VRNDSCALEPSZr VR512:$src, (i32 0xC))>;
3905 def : Pat<(v16f32 (fceil VR512:$src)),
3906 (VRNDSCALEPSZr VR512:$src, (i32 0x2))>;
3907 def : Pat<(v16f32 (frint VR512:$src)),
3908 (VRNDSCALEPSZr VR512:$src, (i32 0x4))>;
3909 def : Pat<(v16f32 (ftrunc VR512:$src)),
3910 (VRNDSCALEPSZr VR512:$src, (i32 0x3))>;
3912 def : Pat<(v8f64 (ffloor VR512:$src)),
3913 (VRNDSCALEPDZr VR512:$src, (i32 0x1))>;
3914 def : Pat<(v8f64 (fnearbyint VR512:$src)),
3915 (VRNDSCALEPDZr VR512:$src, (i32 0xC))>;
3916 def : Pat<(v8f64 (fceil VR512:$src)),
3917 (VRNDSCALEPDZr VR512:$src, (i32 0x2))>;
3918 def : Pat<(v8f64 (frint VR512:$src)),
3919 (VRNDSCALEPDZr VR512:$src, (i32 0x4))>;
3920 def : Pat<(v8f64 (ftrunc VR512:$src)),
3921 (VRNDSCALEPDZr VR512:$src, (i32 0x3))>;
3923 //-------------------------------------------------
3924 // Integer truncate and extend operations
3925 //-------------------------------------------------
3927 multiclass avx512_trunc_sat<bits<8> opc, string OpcodeStr,
3928 RegisterClass dstRC, RegisterClass srcRC,
3929 RegisterClass KRC, X86MemOperand x86memop> {
3930 def rr : AVX512XS8I<opc, MRMDestReg, (outs dstRC:$dst),
3932 !strconcat(OpcodeStr," \t{$src, $dst|$dst, $src}"),
3935 def rrk : AVX512XS8I<opc, MRMDestReg, (outs dstRC:$dst),
3936 (ins KRC:$mask, srcRC:$src),
3937 !strconcat(OpcodeStr,
3938 " \t{$src, ${dst} {${mask}}|${dst} {${mask}}, $src}"),
3941 def rrkz : AVX512XS8I<opc, MRMDestReg, (outs dstRC:$dst),
3942 (ins KRC:$mask, srcRC:$src),
3943 !strconcat(OpcodeStr,
3944 " \t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
3947 def mr : AVX512XS8I<opc, MRMDestMem, (outs), (ins x86memop:$dst, srcRC:$src),
3948 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
3951 def mrk : AVX512XS8I<opc, MRMDestMem, (outs),
3952 (ins x86memop:$dst, KRC:$mask, srcRC:$src),
3953 !strconcat(OpcodeStr, " \t{$src, $dst {${mask}}|${dst} {${mask}}, $src}"),
3957 defm VPMOVQB : avx512_trunc_sat<0x32, "vpmovqb", VR128X, VR512, VK8WM,
3958 i128mem>, EVEX_V512, EVEX_CD8<8, CD8VO>;
3959 defm VPMOVSQB : avx512_trunc_sat<0x22, "vpmovsqb", VR128X, VR512, VK8WM,
3960 i128mem>, EVEX_V512, EVEX_CD8<8, CD8VO>;
3961 defm VPMOVUSQB : avx512_trunc_sat<0x12, "vpmovusqb", VR128X, VR512, VK8WM,
3962 i128mem>, EVEX_V512, EVEX_CD8<8, CD8VO>;
3963 defm VPMOVQW : avx512_trunc_sat<0x34, "vpmovqw", VR128X, VR512, VK8WM,
3964 i128mem>, EVEX_V512, EVEX_CD8<16, CD8VQ>;
3965 defm VPMOVSQW : avx512_trunc_sat<0x24, "vpmovsqw", VR128X, VR512, VK8WM,
3966 i128mem>, EVEX_V512, EVEX_CD8<16, CD8VQ>;
3967 defm VPMOVUSQW : avx512_trunc_sat<0x14, "vpmovusqw", VR128X, VR512, VK8WM,
3968 i128mem>, EVEX_V512, EVEX_CD8<16, CD8VQ>;
3969 defm VPMOVQD : avx512_trunc_sat<0x35, "vpmovqd", VR256X, VR512, VK8WM,
3970 i256mem>, EVEX_V512, EVEX_CD8<32, CD8VH>;
3971 defm VPMOVSQD : avx512_trunc_sat<0x25, "vpmovsqd", VR256X, VR512, VK8WM,
3972 i256mem>, EVEX_V512, EVEX_CD8<32, CD8VH>;
3973 defm VPMOVUSQD : avx512_trunc_sat<0x15, "vpmovusqd", VR256X, VR512, VK8WM,
3974 i256mem>, EVEX_V512, EVEX_CD8<32, CD8VH>;
3975 defm VPMOVDW : avx512_trunc_sat<0x33, "vpmovdw", VR256X, VR512, VK16WM,
3976 i256mem>, EVEX_V512, EVEX_CD8<16, CD8VH>;
3977 defm VPMOVSDW : avx512_trunc_sat<0x23, "vpmovsdw", VR256X, VR512, VK16WM,
3978 i256mem>, EVEX_V512, EVEX_CD8<16, CD8VH>;
3979 defm VPMOVUSDW : avx512_trunc_sat<0x13, "vpmovusdw", VR256X, VR512, VK16WM,
3980 i256mem>, EVEX_V512, EVEX_CD8<16, CD8VH>;
3981 defm VPMOVDB : avx512_trunc_sat<0x31, "vpmovdb", VR128X, VR512, VK16WM,
3982 i128mem>, EVEX_V512, EVEX_CD8<8, CD8VQ>;
3983 defm VPMOVSDB : avx512_trunc_sat<0x21, "vpmovsdb", VR128X, VR512, VK16WM,
3984 i128mem>, EVEX_V512, EVEX_CD8<8, CD8VQ>;
3985 defm VPMOVUSDB : avx512_trunc_sat<0x11, "vpmovusdb", VR128X, VR512, VK16WM,
3986 i128mem>, EVEX_V512, EVEX_CD8<8, CD8VQ>;
3988 def : Pat<(v16i8 (X86vtrunc (v8i64 VR512:$src))), (VPMOVQBrr VR512:$src)>;
3989 def : Pat<(v8i16 (X86vtrunc (v8i64 VR512:$src))), (VPMOVQWrr VR512:$src)>;
3990 def : Pat<(v16i16 (X86vtrunc (v16i32 VR512:$src))), (VPMOVDWrr VR512:$src)>;
3991 def : Pat<(v16i8 (X86vtrunc (v16i32 VR512:$src))), (VPMOVDBrr VR512:$src)>;
3992 def : Pat<(v8i32 (X86vtrunc (v8i64 VR512:$src))), (VPMOVQDrr VR512:$src)>;
3994 def : Pat<(v16i8 (X86vtruncm VK16WM:$mask, (v16i32 VR512:$src))),
3995 (VPMOVDBrrkz VK16WM:$mask, VR512:$src)>;
3996 def : Pat<(v16i16 (X86vtruncm VK16WM:$mask, (v16i32 VR512:$src))),
3997 (VPMOVDWrrkz VK16WM:$mask, VR512:$src)>;
3998 def : Pat<(v8i16 (X86vtruncm VK8WM:$mask, (v8i64 VR512:$src))),
3999 (VPMOVQWrrkz VK8WM:$mask, VR512:$src)>;
4000 def : Pat<(v8i32 (X86vtruncm VK8WM:$mask, (v8i64 VR512:$src))),
4001 (VPMOVQDrrkz VK8WM:$mask, VR512:$src)>;
4004 multiclass avx512_extend<bits<8> opc, string OpcodeStr, RegisterClass KRC,
4005 RegisterClass DstRC, RegisterClass SrcRC, SDNode OpNode,
4006 PatFrag mem_frag, X86MemOperand x86memop,
4007 ValueType OpVT, ValueType InVT> {
4009 def rr : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst),
4011 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
4012 [(set DstRC:$dst, (OpVT (OpNode (InVT SrcRC:$src))))]>, EVEX;
4014 def rrk : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst),
4015 (ins KRC:$mask, SrcRC:$src),
4016 !strconcat(OpcodeStr, " \t{$src, $dst {${mask}} |$dst {${mask}}, $src}"),
4019 def rrkz : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst),
4020 (ins KRC:$mask, SrcRC:$src),
4021 !strconcat(OpcodeStr, " \t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}"),
4024 let mayLoad = 1 in {
4025 def rm : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst),
4026 (ins x86memop:$src),
4027 !strconcat(OpcodeStr," \t{$src, $dst|$dst, $src}"),
4029 (OpVT (OpNode (InVT (bitconvert (mem_frag addr:$src))))))]>,
4032 def rmk : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst),
4033 (ins KRC:$mask, x86memop:$src),
4034 !strconcat(OpcodeStr," \t{$src, $dst {${mask}} |$dst {${mask}}, $src}"),
4038 def rmkz : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst),
4039 (ins KRC:$mask, x86memop:$src),
4040 !strconcat(OpcodeStr," \t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}"),
4046 defm VPMOVZXBDZ: avx512_extend<0x31, "vpmovzxbd", VK16WM, VR512, VR128X, X86vzext,
4047 memopv2i64, i128mem, v16i32, v16i8>, EVEX_V512,
4049 defm VPMOVZXBQZ: avx512_extend<0x32, "vpmovzxbq", VK8WM, VR512, VR128X, X86vzext,
4050 memopv2i64, i128mem, v8i64, v16i8>, EVEX_V512,
4052 defm VPMOVZXWDZ: avx512_extend<0x33, "vpmovzxwd", VK16WM, VR512, VR256X, X86vzext,
4053 memopv4i64, i256mem, v16i32, v16i16>, EVEX_V512,
4054 EVEX_CD8<16, CD8VH>;
4055 defm VPMOVZXWQZ: avx512_extend<0x34, "vpmovzxwq", VK8WM, VR512, VR128X, X86vzext,
4056 memopv2i64, i128mem, v8i64, v8i16>, EVEX_V512,
4057 EVEX_CD8<16, CD8VQ>;
4058 defm VPMOVZXDQZ: avx512_extend<0x35, "vpmovzxdq", VK8WM, VR512, VR256X, X86vzext,
4059 memopv4i64, i256mem, v8i64, v8i32>, EVEX_V512,
4060 EVEX_CD8<32, CD8VH>;
4062 defm VPMOVSXBDZ: avx512_extend<0x21, "vpmovsxbd", VK16WM, VR512, VR128X, X86vsext,
4063 memopv2i64, i128mem, v16i32, v16i8>, EVEX_V512,
4065 defm VPMOVSXBQZ: avx512_extend<0x22, "vpmovsxbq", VK8WM, VR512, VR128X, X86vsext,
4066 memopv2i64, i128mem, v8i64, v16i8>, EVEX_V512,
4068 defm VPMOVSXWDZ: avx512_extend<0x23, "vpmovsxwd", VK16WM, VR512, VR256X, X86vsext,
4069 memopv4i64, i256mem, v16i32, v16i16>, EVEX_V512,
4070 EVEX_CD8<16, CD8VH>;
4071 defm VPMOVSXWQZ: avx512_extend<0x24, "vpmovsxwq", VK8WM, VR512, VR128X, X86vsext,
4072 memopv2i64, i128mem, v8i64, v8i16>, EVEX_V512,
4073 EVEX_CD8<16, CD8VQ>;
4074 defm VPMOVSXDQZ: avx512_extend<0x25, "vpmovsxdq", VK8WM, VR512, VR256X, X86vsext,
4075 memopv4i64, i256mem, v8i64, v8i32>, EVEX_V512,
4076 EVEX_CD8<32, CD8VH>;
4078 //===----------------------------------------------------------------------===//
4079 // GATHER - SCATTER Operations
4081 multiclass avx512_gather<bits<8> opc, string OpcodeStr, RegisterClass KRC,
4082 RegisterClass RC, X86MemOperand memop> {
4084 Constraints = "@earlyclobber $dst, $src1 = $dst, $mask = $mask_wb" in
4085 def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst, KRC:$mask_wb),
4086 (ins RC:$src1, KRC:$mask, memop:$src2),
4087 !strconcat(OpcodeStr,
4088 " \t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"),
4092 let ExeDomain = SSEPackedDouble in {
4093 defm VGATHERDPDZ : avx512_gather<0x92, "vgatherdpd", VK8WM, VR512, vy64xmem>,
4094 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
4095 defm VGATHERQPDZ : avx512_gather<0x93, "vgatherqpd", VK8WM, VR512, vz64mem>,
4096 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
4099 let ExeDomain = SSEPackedSingle in {
4100 defm VGATHERDPSZ : avx512_gather<0x92, "vgatherdps", VK16WM, VR512, vz32mem>,
4101 EVEX_V512, EVEX_CD8<32, CD8VT1>;
4102 defm VGATHERQPSZ : avx512_gather<0x93, "vgatherqps", VK8WM, VR256X, vz64mem>,
4103 EVEX_V512, EVEX_CD8<32, CD8VT1>;
4106 defm VPGATHERDQZ : avx512_gather<0x90, "vpgatherdq", VK8WM, VR512, vy64xmem>,
4107 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
4108 defm VPGATHERDDZ : avx512_gather<0x90, "vpgatherdd", VK16WM, VR512, vz32mem>,
4109 EVEX_V512, EVEX_CD8<32, CD8VT1>;
4111 defm VPGATHERQQZ : avx512_gather<0x91, "vpgatherqq", VK8WM, VR512, vz64mem>,
4112 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
4113 defm VPGATHERQDZ : avx512_gather<0x91, "vpgatherqd", VK8WM, VR256X, vz64mem>,
4114 EVEX_V512, EVEX_CD8<32, CD8VT1>;
4116 multiclass avx512_scatter<bits<8> opc, string OpcodeStr, RegisterClass KRC,
4117 RegisterClass RC, X86MemOperand memop> {
4118 let mayStore = 1, Constraints = "$mask = $mask_wb" in
4119 def mr : AVX5128I<opc, MRMDestMem, (outs KRC:$mask_wb),
4120 (ins memop:$dst, KRC:$mask, RC:$src2),
4121 !strconcat(OpcodeStr,
4122 " \t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"),
4126 let ExeDomain = SSEPackedDouble in {
4127 defm VSCATTERDPDZ : avx512_scatter<0xA2, "vscatterdpd", VK8WM, VR512, vy64xmem>,
4128 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
4129 defm VSCATTERQPDZ : avx512_scatter<0xA3, "vscatterqpd", VK8WM, VR512, vz64mem>,
4130 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
4133 let ExeDomain = SSEPackedSingle in {
4134 defm VSCATTERDPSZ : avx512_scatter<0xA2, "vscatterdps", VK16WM, VR512, vz32mem>,
4135 EVEX_V512, EVEX_CD8<32, CD8VT1>;
4136 defm VSCATTERQPSZ : avx512_scatter<0xA3, "vscatterqps", VK8WM, VR256X, vz64mem>,
4137 EVEX_V512, EVEX_CD8<32, CD8VT1>;
4140 defm VPSCATTERDQZ : avx512_scatter<0xA0, "vpscatterdq", VK8WM, VR512, vy64xmem>,
4141 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
4142 defm VPSCATTERDDZ : avx512_scatter<0xA0, "vpscatterdd", VK16WM, VR512, vz32mem>,
4143 EVEX_V512, EVEX_CD8<32, CD8VT1>;
4145 defm VPSCATTERQQZ : avx512_scatter<0xA1, "vpscatterqq", VK8WM, VR512, vz64mem>,
4146 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
4147 defm VPSCATTERQDZ : avx512_scatter<0xA1, "vpscatterqd", VK8WM, VR256X, vz64mem>,
4148 EVEX_V512, EVEX_CD8<32, CD8VT1>;
4151 multiclass avx512_gather_scatter_prefetch<bits<8> opc, Format F, string OpcodeStr,
4152 RegisterClass KRC, X86MemOperand memop> {
4153 let Predicates = [HasPFI], hasSideEffects = 1 in
4154 def m : AVX5128I<opc, F, (outs), (ins KRC:$mask, memop:$src),
4155 !strconcat(OpcodeStr, " \t{$src {${mask}}|{${mask}}, $src}"),
4159 defm VGATHERPF0DPS: avx512_gather_scatter_prefetch<0xC6, MRM1m, "vgatherpf0dps",
4160 VK16WM, vz32mem>, EVEX_V512, EVEX_CD8<32, CD8VT1>;
4162 defm VGATHERPF0QPS: avx512_gather_scatter_prefetch<0xC7, MRM1m, "vgatherpf0qps",
4163 VK8WM, vz64mem>, EVEX_V512, EVEX_CD8<64, CD8VT1>;
4165 defm VGATHERPF0DPD: avx512_gather_scatter_prefetch<0xC6, MRM1m, "vgatherpf0dpd",
4166 VK8WM, vy32mem>, EVEX_V512, VEX_W, EVEX_CD8<32, CD8VT1>;
4168 defm VGATHERPF0QPD: avx512_gather_scatter_prefetch<0xC7, MRM1m, "vgatherpf0qpd",
4169 VK8WM, vz64mem>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
4171 defm VGATHERPF1DPS: avx512_gather_scatter_prefetch<0xC6, MRM2m, "vgatherpf1dps",
4172 VK16WM, vz32mem>, EVEX_V512, EVEX_CD8<32, CD8VT1>;
4174 defm VGATHERPF1QPS: avx512_gather_scatter_prefetch<0xC7, MRM2m, "vgatherpf1qps",
4175 VK8WM, vz64mem>, EVEX_V512, EVEX_CD8<64, CD8VT1>;
4177 defm VGATHERPF1DPD: avx512_gather_scatter_prefetch<0xC6, MRM2m, "vgatherpf1dpd",
4178 VK8WM, vy32mem>, EVEX_V512, VEX_W, EVEX_CD8<32, CD8VT1>;
4180 defm VGATHERPF1QPD: avx512_gather_scatter_prefetch<0xC7, MRM2m, "vgatherpf1qpd",
4181 VK8WM, vz64mem>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
4183 defm VSCATTERPF0DPS: avx512_gather_scatter_prefetch<0xC6, MRM5m, "vscatterpf0dps",
4184 VK16WM, vz32mem>, EVEX_V512, EVEX_CD8<32, CD8VT1>;
4186 defm VSCATTERPF0QPS: avx512_gather_scatter_prefetch<0xC7, MRM5m, "vscatterpf0qps",
4187 VK8WM, vz64mem>, EVEX_V512, EVEX_CD8<64, CD8VT1>;
4189 defm VSCATTERPF0DPD: avx512_gather_scatter_prefetch<0xC6, MRM5m, "vscatterpf0dpd",
4190 VK8WM, vy32mem>, EVEX_V512, VEX_W, EVEX_CD8<32, CD8VT1>;
4192 defm VSCATTERPF0QPD: avx512_gather_scatter_prefetch<0xC7, MRM5m, "vscatterpf0qpd",
4193 VK8WM, vz64mem>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
4195 defm VSCATTERPF1DPS: avx512_gather_scatter_prefetch<0xC6, MRM6m, "vscatterpf1dps",
4196 VK16WM, vz32mem>, EVEX_V512, EVEX_CD8<32, CD8VT1>;
4198 defm VSCATTERPF1QPS: avx512_gather_scatter_prefetch<0xC7, MRM6m, "vscatterpf1qps",
4199 VK8WM, vz64mem>, EVEX_V512, EVEX_CD8<64, CD8VT1>;
4201 defm VSCATTERPF1DPD: avx512_gather_scatter_prefetch<0xC6, MRM6m, "vscatterpf1dpd",
4202 VK8WM, vy32mem>, EVEX_V512, VEX_W, EVEX_CD8<32, CD8VT1>;
4204 defm VSCATTERPF1QPD: avx512_gather_scatter_prefetch<0xC7, MRM6m, "vscatterpf1qpd",
4205 VK8WM, vz64mem>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
4206 //===----------------------------------------------------------------------===//
4207 // VSHUFPS - VSHUFPD Operations
4209 multiclass avx512_shufp<RegisterClass RC, X86MemOperand x86memop,
4210 ValueType vt, string OpcodeStr, PatFrag mem_frag,
4212 def rmi : AVX512PIi8<0xC6, MRMSrcMem, (outs RC:$dst),
4213 (ins RC:$src1, x86memop:$src2, i8imm:$src3),
4214 !strconcat(OpcodeStr,
4215 " \t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4216 [(set RC:$dst, (vt (X86Shufp RC:$src1, (mem_frag addr:$src2),
4217 (i8 imm:$src3))))], d, IIC_SSE_SHUFP>,
4218 EVEX_4V, Sched<[WriteShuffleLd, ReadAfterLd]>;
4219 def rri : AVX512PIi8<0xC6, MRMSrcReg, (outs RC:$dst),
4220 (ins RC:$src1, RC:$src2, i8imm:$src3),
4221 !strconcat(OpcodeStr,
4222 " \t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4223 [(set RC:$dst, (vt (X86Shufp RC:$src1, RC:$src2,
4224 (i8 imm:$src3))))], d, IIC_SSE_SHUFP>,
4225 EVEX_4V, Sched<[WriteShuffle]>;
4228 defm VSHUFPSZ : avx512_shufp<VR512, f512mem, v16f32, "vshufps", memopv16f32,
4229 SSEPackedSingle>, PS, EVEX_V512, EVEX_CD8<32, CD8VF>;
4230 defm VSHUFPDZ : avx512_shufp<VR512, f512mem, v8f64, "vshufpd", memopv8f64,
4231 SSEPackedDouble>, PD, VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
4233 def : Pat<(v16i32 (X86Shufp VR512:$src1, VR512:$src2, (i8 imm:$imm))),
4234 (VSHUFPSZrri VR512:$src1, VR512:$src2, imm:$imm)>;
4235 def : Pat<(v16i32 (X86Shufp VR512:$src1,
4236 (memopv16i32 addr:$src2), (i8 imm:$imm))),
4237 (VSHUFPSZrmi VR512:$src1, addr:$src2, imm:$imm)>;
4239 def : Pat<(v8i64 (X86Shufp VR512:$src1, VR512:$src2, (i8 imm:$imm))),
4240 (VSHUFPDZrri VR512:$src1, VR512:$src2, imm:$imm)>;
4241 def : Pat<(v8i64 (X86Shufp VR512:$src1,
4242 (memopv8i64 addr:$src2), (i8 imm:$imm))),
4243 (VSHUFPDZrmi VR512:$src1, addr:$src2, imm:$imm)>;
4245 multiclass avx512_alignr<string OpcodeStr, RegisterClass RC,
4246 X86MemOperand x86memop> {
4247 def rri : AVX512AIi8<0x03, MRMSrcReg, (outs RC:$dst),
4248 (ins RC:$src1, RC:$src2, i8imm:$src3),
4249 !strconcat(OpcodeStr,
4250 " \t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4253 def rmi : AVX512AIi8<0x03, MRMSrcMem, (outs RC:$dst),
4254 (ins RC:$src1, x86memop:$src2, i8imm:$src3),
4255 !strconcat(OpcodeStr,
4256 " \t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4259 defm VALIGND : avx512_alignr<"valignd", VR512, i512mem>,
4260 EVEX_V512, EVEX_CD8<32, CD8VF>;
4261 defm VALIGNQ : avx512_alignr<"valignq", VR512, i512mem>,
4262 VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
4264 def : Pat<(v16f32 (X86PAlignr VR512:$src1, VR512:$src2, (i8 imm:$imm))),
4265 (VALIGNDrri VR512:$src2, VR512:$src1, imm:$imm)>;
4266 def : Pat<(v8f64 (X86PAlignr VR512:$src1, VR512:$src2, (i8 imm:$imm))),
4267 (VALIGNQrri VR512:$src2, VR512:$src1, imm:$imm)>;
4268 def : Pat<(v16i32 (X86PAlignr VR512:$src1, VR512:$src2, (i8 imm:$imm))),
4269 (VALIGNDrri VR512:$src2, VR512:$src1, imm:$imm)>;
4270 def : Pat<(v8i64 (X86PAlignr VR512:$src1, VR512:$src2, (i8 imm:$imm))),
4271 (VALIGNQrri VR512:$src2, VR512:$src1, imm:$imm)>;
4273 // Helper fragments to match sext vXi1 to vXiY.
4274 def v16i1sextv16i32 : PatLeaf<(v16i32 (X86vsrai VR512:$src, (i8 31)))>;
4275 def v8i1sextv8i64 : PatLeaf<(v8i64 (X86vsrai VR512:$src, (i8 63)))>;
4277 multiclass avx512_vpabs<bits<8> opc, string OpcodeStr, ValueType OpVT,
4278 RegisterClass KRC, RegisterClass RC,
4279 X86MemOperand x86memop, X86MemOperand x86scalar_mop,
4281 def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
4282 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
4284 def rrk : AVX5128I<opc, MRMSrcReg, (outs RC:$dst), (ins KRC:$mask, RC:$src),
4285 !strconcat(OpcodeStr, " \t{$src, $dst {${mask}}|$dst {${mask}}, $src}"),
4287 def rrkz : AVX5128I<opc, MRMSrcReg, (outs RC:$dst), (ins KRC:$mask, RC:$src),
4288 !strconcat(OpcodeStr,
4289 " \t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}"),
4291 let mayLoad = 1 in {
4292 def rm : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst),
4293 (ins x86memop:$src),
4294 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
4296 def rmk : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst),
4297 (ins KRC:$mask, x86memop:$src),
4298 !strconcat(OpcodeStr,
4299 " \t{$src, $dst {${mask}}|$dst {${mask}}, $src}"),
4301 def rmkz : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst),
4302 (ins KRC:$mask, x86memop:$src),
4303 !strconcat(OpcodeStr,
4304 " \t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}"),
4306 def rmb : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst),
4307 (ins x86scalar_mop:$src),
4308 !strconcat(OpcodeStr, " \t{${src}", BrdcstStr,
4309 ", $dst|$dst, ${src}", BrdcstStr, "}"),
4311 def rmbk : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst),
4312 (ins KRC:$mask, x86scalar_mop:$src),
4313 !strconcat(OpcodeStr, " \t{${src}", BrdcstStr,
4314 ", $dst {${mask}}|$dst {${mask}}, ${src}", BrdcstStr, "}"),
4315 []>, EVEX, EVEX_B, EVEX_K;
4316 def rmbkz : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst),
4317 (ins KRC:$mask, x86scalar_mop:$src),
4318 !strconcat(OpcodeStr, " \t{${src}", BrdcstStr,
4319 ", $dst {${mask}} {z}|$dst {${mask}} {z}, ${src}",
4321 []>, EVEX, EVEX_B, EVEX_KZ;
4325 defm VPABSDZ : avx512_vpabs<0x1E, "vpabsd", v16i32, VK16WM, VR512,
4326 i512mem, i32mem, "{1to16}">, EVEX_V512,
4327 EVEX_CD8<32, CD8VF>;
4328 defm VPABSQZ : avx512_vpabs<0x1F, "vpabsq", v8i64, VK8WM, VR512,
4329 i512mem, i64mem, "{1to8}">, EVEX_V512, VEX_W,
4330 EVEX_CD8<64, CD8VF>;
4333 (bc_v16i32 (v16i1sextv16i32)),
4334 (bc_v16i32 (add (v16i32 VR512:$src), (v16i1sextv16i32)))),
4335 (VPABSDZrr VR512:$src)>;
4337 (bc_v8i64 (v8i1sextv8i64)),
4338 (bc_v8i64 (add (v8i64 VR512:$src), (v8i1sextv8i64)))),
4339 (VPABSQZrr VR512:$src)>;
4341 def : Pat<(v16i32 (int_x86_avx512_mask_pabs_d_512 (v16i32 VR512:$src),
4342 (v16i32 immAllZerosV), (i16 -1))),
4343 (VPABSDZrr VR512:$src)>;
4344 def : Pat<(v8i64 (int_x86_avx512_mask_pabs_q_512 (v8i64 VR512:$src),
4345 (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
4346 (VPABSQZrr VR512:$src)>;
4348 multiclass avx512_conflict<bits<8> opc, string OpcodeStr,
4349 RegisterClass RC, RegisterClass KRC,
4350 X86MemOperand x86memop,
4351 X86MemOperand x86scalar_mop, string BrdcstStr> {
4352 def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
4354 !strconcat(OpcodeStr, " \t{$src, ${dst} |${dst}, $src}"),
4356 def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
4357 (ins x86memop:$src),
4358 !strconcat(OpcodeStr, " \t{$src, ${dst}|${dst}, $src}"),
4360 def rmb : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
4361 (ins x86scalar_mop:$src),
4362 !strconcat(OpcodeStr, " \t{${src}", BrdcstStr,
4363 ", ${dst}|${dst}, ${src}", BrdcstStr, "}"),
4365 def rrkz : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
4366 (ins KRC:$mask, RC:$src),
4367 !strconcat(OpcodeStr,
4368 " \t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
4370 def rmkz : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
4371 (ins KRC:$mask, x86memop:$src),
4372 !strconcat(OpcodeStr,
4373 " \t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
4375 def rmbkz : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
4376 (ins KRC:$mask, x86scalar_mop:$src),
4377 !strconcat(OpcodeStr, " \t{${src}", BrdcstStr,
4378 ", ${dst} {${mask}} {z}|${dst} {${mask}} {z}, ${src}",
4380 []>, EVEX, EVEX_KZ, EVEX_B;
4382 let Constraints = "$src1 = $dst" in {
4383 def rrk : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
4384 (ins RC:$src1, KRC:$mask, RC:$src2),
4385 !strconcat(OpcodeStr,
4386 " \t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"),
4388 def rmk : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
4389 (ins RC:$src1, KRC:$mask, x86memop:$src2),
4390 !strconcat(OpcodeStr,
4391 " \t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"),
4393 def rmbk : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
4394 (ins RC:$src1, KRC:$mask, x86scalar_mop:$src2),
4395 !strconcat(OpcodeStr, " \t{${src2}", BrdcstStr,
4396 ", ${dst} {${mask}}|${dst} {${mask}}, ${src2}", BrdcstStr, "}"),
4397 []>, EVEX, EVEX_K, EVEX_B;
4401 let Predicates = [HasCDI] in {
4402 defm VPCONFLICTD : avx512_conflict<0xC4, "vpconflictd", VR512, VK16WM,
4403 i512mem, i32mem, "{1to16}">,
4404 EVEX_V512, EVEX_CD8<32, CD8VF>;
4407 defm VPCONFLICTQ : avx512_conflict<0xC4, "vpconflictq", VR512, VK8WM,
4408 i512mem, i64mem, "{1to8}">,
4409 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
4413 def : Pat<(int_x86_avx512_mask_conflict_d_512 VR512:$src2, VR512:$src1,
4415 (VPCONFLICTDrrk VR512:$src1,
4416 (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)), VR512:$src2)>;
4418 def : Pat<(int_x86_avx512_mask_conflict_q_512 VR512:$src2, VR512:$src1,
4420 (VPCONFLICTQrrk VR512:$src1,
4421 (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)), VR512:$src2)>;
4423 let Predicates = [HasCDI] in {
4424 defm VPLZCNTD : avx512_conflict<0x44, "vplzcntd", VR512, VK16WM,
4425 i512mem, i32mem, "{1to16}">,
4426 EVEX_V512, EVEX_CD8<32, CD8VF>;
4429 defm VPLZCNTQ : avx512_conflict<0x44, "vplzcntq", VR512, VK8WM,
4430 i512mem, i64mem, "{1to8}">,
4431 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
4435 def : Pat<(int_x86_avx512_mask_lzcnt_d_512 VR512:$src2, VR512:$src1,
4437 (VPLZCNTDrrk VR512:$src1,
4438 (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)), VR512:$src2)>;
4440 def : Pat<(int_x86_avx512_mask_lzcnt_q_512 VR512:$src2, VR512:$src1,
4442 (VPLZCNTQrrk VR512:$src1,
4443 (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)), VR512:$src2)>;
4445 def : Pat<(v16i32 (ctlz (memopv16i32 addr:$src))),
4446 (VPLZCNTDrm addr:$src)>;
4447 def : Pat<(v16i32 (ctlz (v16i32 VR512:$src))),
4448 (VPLZCNTDrr VR512:$src)>;
4449 def : Pat<(v8i64 (ctlz (memopv8i64 addr:$src))),
4450 (VPLZCNTQrm addr:$src)>;
4451 def : Pat<(v8i64 (ctlz (v8i64 VR512:$src))),
4452 (VPLZCNTQrr VR512:$src)>;
4454 def : Pat<(store (i1 -1), addr:$dst), (MOV8mi addr:$dst, (i8 1))>;
4455 def : Pat<(store (i1 1), addr:$dst), (MOV8mi addr:$dst, (i8 1))>;
4456 def : Pat<(store (i1 0), addr:$dst), (MOV8mi addr:$dst, (i8 0))>;
4458 def : Pat<(store VK1:$src, addr:$dst),
4459 (KMOVWmk addr:$dst, (COPY_TO_REGCLASS VK1:$src, VK16))>;
4461 def truncstorei1 : PatFrag<(ops node:$val, node:$ptr),
4462 (truncstore node:$val, node:$ptr), [{
4463 return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i1;
4466 def : Pat<(truncstorei1 GR8:$src, addr:$dst),
4467 (MOV8mr addr:$dst, GR8:$src)>;