1 // Common base class of AVX512_masking and AVX512_masking_3src.
2 multiclass AVX512_masking_common<bits<8> O, Format F, dag Outs, dag Ins,
3 dag MaskingIns, dag ZeroMaskingIns,
5 string AttSrcAsm, string IntelSrcAsm,
6 dag RHS, dag MaskingRHS, ValueType OpVT,
7 RegisterClass RC, RegisterClass KRC,
8 string MaskingConstraint = ""> {
9 def NAME: AVX512<O, F, Outs, Ins,
10 OpcodeStr#" \t{"#AttSrcAsm#", $dst|"#
11 "$dst, "#IntelSrcAsm#"}",
12 [(set RC:$dst, RHS)]>;
14 // Prefer over VMOV*rrk Pat<>
15 let AddedComplexity = 20 in
16 def NAME#k: AVX512<O, F, Outs, MaskingIns,
17 OpcodeStr#" \t{"#AttSrcAsm#", $dst {${mask}}|"#
18 "$dst {${mask}}, "#IntelSrcAsm#"}",
19 [(set RC:$dst, MaskingRHS)]>,
21 // In case of the 3src subclass this is overridden with a let.
22 string Constraints = MaskingConstraint;
24 let AddedComplexity = 30 in // Prefer over VMOV*rrkz Pat<>
25 def NAME#kz: AVX512<O, F, Outs, ZeroMaskingIns,
26 OpcodeStr#" \t{"#AttSrcAsm#", $dst {${mask}} {z}|"#
27 "$dst {${mask}} {z}, "#IntelSrcAsm#"}",
29 (vselect KRC:$mask, RHS,
31 (v16i32 immAllZerosV)))))]>,
35 // This multiclass generates the unconditional/non-masking, the masking and
36 // the zero-masking variant of the instruction. In the masking case, the
37 // perserved vector elements come from a new dummy input operand tied to $dst.
38 multiclass AVX512_masking<bits<8> O, Format F, dag Outs, dag Ins,
40 string AttSrcAsm, string IntelSrcAsm,
41 dag RHS, ValueType OpVT, RegisterClass RC,
43 AVX512_masking_common<O, F, Outs,
45 !con((ins RC:$src0, KRC:$mask), Ins),
46 !con((ins KRC:$mask), Ins),
47 OpcodeStr, AttSrcAsm, IntelSrcAsm, RHS,
48 (vselect KRC:$mask, RHS, RC:$src0), OpVT, RC, KRC,
51 // Similar to AVX512_masking but in this case one of the source operands
52 // ($src1) is already tied to $dst so we just use that for the preserved
53 // vector elements. NOTE that the NonTiedIns (the ins dag) should exclude
55 multiclass AVX512_masking_3src<bits<8> O, Format F, dag Outs, dag NonTiedIns,
57 string AttSrcAsm, string IntelSrcAsm,
58 dag RHS, ValueType OpVT,
59 RegisterClass RC, RegisterClass KRC> :
60 AVX512_masking_common<O, F, Outs,
61 !con((ins RC:$src1), NonTiedIns),
62 !con((ins RC:$src1), !con((ins KRC:$mask),
64 !con((ins RC:$src1), !con((ins KRC:$mask),
66 OpcodeStr, AttSrcAsm, IntelSrcAsm, RHS,
67 (vselect KRC:$mask, RHS, RC:$src1), OpVT, RC, KRC>;
69 // Bitcasts between 512-bit vector types. Return the original type since
70 // no instruction is needed for the conversion
71 let Predicates = [HasAVX512] in {
72 def : Pat<(v8f64 (bitconvert (v8i64 VR512:$src))), (v8f64 VR512:$src)>;
73 def : Pat<(v8f64 (bitconvert (v16i32 VR512:$src))), (v8f64 VR512:$src)>;
74 def : Pat<(v8f64 (bitconvert (v32i16 VR512:$src))), (v8f64 VR512:$src)>;
75 def : Pat<(v8f64 (bitconvert (v64i8 VR512:$src))), (v8f64 VR512:$src)>;
76 def : Pat<(v8f64 (bitconvert (v16f32 VR512:$src))), (v8f64 VR512:$src)>;
77 def : Pat<(v16f32 (bitconvert (v8i64 VR512:$src))), (v16f32 VR512:$src)>;
78 def : Pat<(v16f32 (bitconvert (v16i32 VR512:$src))), (v16f32 VR512:$src)>;
79 def : Pat<(v16f32 (bitconvert (v32i16 VR512:$src))), (v16f32 VR512:$src)>;
80 def : Pat<(v16f32 (bitconvert (v64i8 VR512:$src))), (v16f32 VR512:$src)>;
81 def : Pat<(v16f32 (bitconvert (v8f64 VR512:$src))), (v16f32 VR512:$src)>;
82 def : Pat<(v8i64 (bitconvert (v16i32 VR512:$src))), (v8i64 VR512:$src)>;
83 def : Pat<(v8i64 (bitconvert (v32i16 VR512:$src))), (v8i64 VR512:$src)>;
84 def : Pat<(v8i64 (bitconvert (v64i8 VR512:$src))), (v8i64 VR512:$src)>;
85 def : Pat<(v8i64 (bitconvert (v8f64 VR512:$src))), (v8i64 VR512:$src)>;
86 def : Pat<(v8i64 (bitconvert (v16f32 VR512:$src))), (v8i64 VR512:$src)>;
87 def : Pat<(v16i32 (bitconvert (v8i64 VR512:$src))), (v16i32 VR512:$src)>;
88 def : Pat<(v16i32 (bitconvert (v16f32 VR512:$src))), (v16i32 VR512:$src)>;
89 def : Pat<(v16i32 (bitconvert (v32i16 VR512:$src))), (v16i32 VR512:$src)>;
90 def : Pat<(v16i32 (bitconvert (v64i8 VR512:$src))), (v16i32 VR512:$src)>;
91 def : Pat<(v16i32 (bitconvert (v8f64 VR512:$src))), (v16i32 VR512:$src)>;
92 def : Pat<(v32i16 (bitconvert (v8i64 VR512:$src))), (v32i16 VR512:$src)>;
93 def : Pat<(v32i16 (bitconvert (v16i32 VR512:$src))), (v32i16 VR512:$src)>;
94 def : Pat<(v32i16 (bitconvert (v64i8 VR512:$src))), (v32i16 VR512:$src)>;
95 def : Pat<(v32i16 (bitconvert (v8f64 VR512:$src))), (v32i16 VR512:$src)>;
96 def : Pat<(v32i16 (bitconvert (v16f32 VR512:$src))), (v32i16 VR512:$src)>;
97 def : Pat<(v32i16 (bitconvert (v16f32 VR512:$src))), (v32i16 VR512:$src)>;
98 def : Pat<(v64i8 (bitconvert (v8i64 VR512:$src))), (v64i8 VR512:$src)>;
99 def : Pat<(v64i8 (bitconvert (v16i32 VR512:$src))), (v64i8 VR512:$src)>;
100 def : Pat<(v64i8 (bitconvert (v32i16 VR512:$src))), (v64i8 VR512:$src)>;
101 def : Pat<(v64i8 (bitconvert (v8f64 VR512:$src))), (v64i8 VR512:$src)>;
102 def : Pat<(v64i8 (bitconvert (v16f32 VR512:$src))), (v64i8 VR512:$src)>;
104 def : Pat<(v2i64 (bitconvert (v4i32 VR128X:$src))), (v2i64 VR128X:$src)>;
105 def : Pat<(v2i64 (bitconvert (v8i16 VR128X:$src))), (v2i64 VR128X:$src)>;
106 def : Pat<(v2i64 (bitconvert (v16i8 VR128X:$src))), (v2i64 VR128X:$src)>;
107 def : Pat<(v2i64 (bitconvert (v2f64 VR128X:$src))), (v2i64 VR128X:$src)>;
108 def : Pat<(v2i64 (bitconvert (v4f32 VR128X:$src))), (v2i64 VR128X:$src)>;
109 def : Pat<(v4i32 (bitconvert (v2i64 VR128X:$src))), (v4i32 VR128X:$src)>;
110 def : Pat<(v4i32 (bitconvert (v8i16 VR128X:$src))), (v4i32 VR128X:$src)>;
111 def : Pat<(v4i32 (bitconvert (v16i8 VR128X:$src))), (v4i32 VR128X:$src)>;
112 def : Pat<(v4i32 (bitconvert (v2f64 VR128X:$src))), (v4i32 VR128X:$src)>;
113 def : Pat<(v4i32 (bitconvert (v4f32 VR128X:$src))), (v4i32 VR128X:$src)>;
114 def : Pat<(v8i16 (bitconvert (v2i64 VR128X:$src))), (v8i16 VR128X:$src)>;
115 def : Pat<(v8i16 (bitconvert (v4i32 VR128X:$src))), (v8i16 VR128X:$src)>;
116 def : Pat<(v8i16 (bitconvert (v16i8 VR128X:$src))), (v8i16 VR128X:$src)>;
117 def : Pat<(v8i16 (bitconvert (v2f64 VR128X:$src))), (v8i16 VR128X:$src)>;
118 def : Pat<(v8i16 (bitconvert (v4f32 VR128X:$src))), (v8i16 VR128X:$src)>;
119 def : Pat<(v16i8 (bitconvert (v2i64 VR128X:$src))), (v16i8 VR128X:$src)>;
120 def : Pat<(v16i8 (bitconvert (v4i32 VR128X:$src))), (v16i8 VR128X:$src)>;
121 def : Pat<(v16i8 (bitconvert (v8i16 VR128X:$src))), (v16i8 VR128X:$src)>;
122 def : Pat<(v16i8 (bitconvert (v2f64 VR128X:$src))), (v16i8 VR128X:$src)>;
123 def : Pat<(v16i8 (bitconvert (v4f32 VR128X:$src))), (v16i8 VR128X:$src)>;
124 def : Pat<(v4f32 (bitconvert (v2i64 VR128X:$src))), (v4f32 VR128X:$src)>;
125 def : Pat<(v4f32 (bitconvert (v4i32 VR128X:$src))), (v4f32 VR128X:$src)>;
126 def : Pat<(v4f32 (bitconvert (v8i16 VR128X:$src))), (v4f32 VR128X:$src)>;
127 def : Pat<(v4f32 (bitconvert (v16i8 VR128X:$src))), (v4f32 VR128X:$src)>;
128 def : Pat<(v4f32 (bitconvert (v2f64 VR128X:$src))), (v4f32 VR128X:$src)>;
129 def : Pat<(v2f64 (bitconvert (v2i64 VR128X:$src))), (v2f64 VR128X:$src)>;
130 def : Pat<(v2f64 (bitconvert (v4i32 VR128X:$src))), (v2f64 VR128X:$src)>;
131 def : Pat<(v2f64 (bitconvert (v8i16 VR128X:$src))), (v2f64 VR128X:$src)>;
132 def : Pat<(v2f64 (bitconvert (v16i8 VR128X:$src))), (v2f64 VR128X:$src)>;
133 def : Pat<(v2f64 (bitconvert (v4f32 VR128X:$src))), (v2f64 VR128X:$src)>;
135 // Bitcasts between 256-bit vector types. Return the original type since
136 // no instruction is needed for the conversion
137 def : Pat<(v4f64 (bitconvert (v8f32 VR256X:$src))), (v4f64 VR256X:$src)>;
138 def : Pat<(v4f64 (bitconvert (v8i32 VR256X:$src))), (v4f64 VR256X:$src)>;
139 def : Pat<(v4f64 (bitconvert (v4i64 VR256X:$src))), (v4f64 VR256X:$src)>;
140 def : Pat<(v4f64 (bitconvert (v16i16 VR256X:$src))), (v4f64 VR256X:$src)>;
141 def : Pat<(v4f64 (bitconvert (v32i8 VR256X:$src))), (v4f64 VR256X:$src)>;
142 def : Pat<(v8f32 (bitconvert (v8i32 VR256X:$src))), (v8f32 VR256X:$src)>;
143 def : Pat<(v8f32 (bitconvert (v4i64 VR256X:$src))), (v8f32 VR256X:$src)>;
144 def : Pat<(v8f32 (bitconvert (v4f64 VR256X:$src))), (v8f32 VR256X:$src)>;
145 def : Pat<(v8f32 (bitconvert (v32i8 VR256X:$src))), (v8f32 VR256X:$src)>;
146 def : Pat<(v8f32 (bitconvert (v16i16 VR256X:$src))), (v8f32 VR256X:$src)>;
147 def : Pat<(v4i64 (bitconvert (v8f32 VR256X:$src))), (v4i64 VR256X:$src)>;
148 def : Pat<(v4i64 (bitconvert (v8i32 VR256X:$src))), (v4i64 VR256X:$src)>;
149 def : Pat<(v4i64 (bitconvert (v4f64 VR256X:$src))), (v4i64 VR256X:$src)>;
150 def : Pat<(v4i64 (bitconvert (v32i8 VR256X:$src))), (v4i64 VR256X:$src)>;
151 def : Pat<(v4i64 (bitconvert (v16i16 VR256X:$src))), (v4i64 VR256X:$src)>;
152 def : Pat<(v32i8 (bitconvert (v4f64 VR256X:$src))), (v32i8 VR256X:$src)>;
153 def : Pat<(v32i8 (bitconvert (v4i64 VR256X:$src))), (v32i8 VR256X:$src)>;
154 def : Pat<(v32i8 (bitconvert (v8f32 VR256X:$src))), (v32i8 VR256X:$src)>;
155 def : Pat<(v32i8 (bitconvert (v8i32 VR256X:$src))), (v32i8 VR256X:$src)>;
156 def : Pat<(v32i8 (bitconvert (v16i16 VR256X:$src))), (v32i8 VR256X:$src)>;
157 def : Pat<(v8i32 (bitconvert (v32i8 VR256X:$src))), (v8i32 VR256X:$src)>;
158 def : Pat<(v8i32 (bitconvert (v16i16 VR256X:$src))), (v8i32 VR256X:$src)>;
159 def : Pat<(v8i32 (bitconvert (v8f32 VR256X:$src))), (v8i32 VR256X:$src)>;
160 def : Pat<(v8i32 (bitconvert (v4i64 VR256X:$src))), (v8i32 VR256X:$src)>;
161 def : Pat<(v8i32 (bitconvert (v4f64 VR256X:$src))), (v8i32 VR256X:$src)>;
162 def : Pat<(v16i16 (bitconvert (v8f32 VR256X:$src))), (v16i16 VR256X:$src)>;
163 def : Pat<(v16i16 (bitconvert (v8i32 VR256X:$src))), (v16i16 VR256X:$src)>;
164 def : Pat<(v16i16 (bitconvert (v4i64 VR256X:$src))), (v16i16 VR256X:$src)>;
165 def : Pat<(v16i16 (bitconvert (v4f64 VR256X:$src))), (v16i16 VR256X:$src)>;
166 def : Pat<(v16i16 (bitconvert (v32i8 VR256X:$src))), (v16i16 VR256X:$src)>;
170 // AVX-512: VPXOR instruction writes zero to its upper part, it's safe build zeros.
173 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
174 isPseudo = 1, Predicates = [HasAVX512] in {
175 def AVX512_512_SET0 : I<0, Pseudo, (outs VR512:$dst), (ins), "",
176 [(set VR512:$dst, (v16f32 immAllZerosV))]>;
179 let Predicates = [HasAVX512] in {
180 def : Pat<(v8i64 immAllZerosV), (AVX512_512_SET0)>;
181 def : Pat<(v16i32 immAllZerosV), (AVX512_512_SET0)>;
182 def : Pat<(v8f64 immAllZerosV), (AVX512_512_SET0)>;
185 //===----------------------------------------------------------------------===//
186 // AVX-512 - VECTOR INSERT
189 let hasSideEffects = 0, ExeDomain = SSEPackedSingle in {
190 def VINSERTF32x4rr : AVX512AIi8<0x18, MRMSrcReg, (outs VR512:$dst),
191 (ins VR512:$src1, VR128X:$src2, i8imm:$src3),
192 "vinsertf32x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
193 []>, EVEX_4V, EVEX_V512;
195 def VINSERTF32x4rm : AVX512AIi8<0x18, MRMSrcMem, (outs VR512:$dst),
196 (ins VR512:$src1, f128mem:$src2, i8imm:$src3),
197 "vinsertf32x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
198 []>, EVEX_4V, EVEX_V512, EVEX_CD8<32, CD8VT4>;
201 // -- 64x4 fp form --
202 let hasSideEffects = 0, ExeDomain = SSEPackedDouble in {
203 def VINSERTF64x4rr : AVX512AIi8<0x1a, MRMSrcReg, (outs VR512:$dst),
204 (ins VR512:$src1, VR256X:$src2, i8imm:$src3),
205 "vinsertf64x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
206 []>, EVEX_4V, EVEX_V512, VEX_W;
208 def VINSERTF64x4rm : AVX512AIi8<0x1a, MRMSrcMem, (outs VR512:$dst),
209 (ins VR512:$src1, i256mem:$src2, i8imm:$src3),
210 "vinsertf64x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
211 []>, EVEX_4V, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT4>;
213 // -- 32x4 integer form --
214 let hasSideEffects = 0 in {
215 def VINSERTI32x4rr : AVX512AIi8<0x38, MRMSrcReg, (outs VR512:$dst),
216 (ins VR512:$src1, VR128X:$src2, i8imm:$src3),
217 "vinserti32x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
218 []>, EVEX_4V, EVEX_V512;
220 def VINSERTI32x4rm : AVX512AIi8<0x38, MRMSrcMem, (outs VR512:$dst),
221 (ins VR512:$src1, i128mem:$src2, i8imm:$src3),
222 "vinserti32x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
223 []>, EVEX_4V, EVEX_V512, EVEX_CD8<32, CD8VT4>;
226 let hasSideEffects = 0 in {
228 def VINSERTI64x4rr : AVX512AIi8<0x3a, MRMSrcReg, (outs VR512:$dst),
229 (ins VR512:$src1, VR256X:$src2, i8imm:$src3),
230 "vinserti64x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
231 []>, EVEX_4V, EVEX_V512, VEX_W;
233 def VINSERTI64x4rm : AVX512AIi8<0x3a, MRMSrcMem, (outs VR512:$dst),
234 (ins VR512:$src1, i256mem:$src2, i8imm:$src3),
235 "vinserti64x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
236 []>, EVEX_4V, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT4>;
239 def : Pat<(vinsert128_insert:$ins (v16f32 VR512:$src1), (v4f32 VR128X:$src2),
240 (iPTR imm)), (VINSERTF32x4rr VR512:$src1, VR128X:$src2,
241 (INSERT_get_vinsert128_imm VR512:$ins))>;
242 def : Pat<(vinsert128_insert:$ins (v8f64 VR512:$src1), (v2f64 VR128X:$src2),
243 (iPTR imm)), (VINSERTF32x4rr VR512:$src1, VR128X:$src2,
244 (INSERT_get_vinsert128_imm VR512:$ins))>;
245 def : Pat<(vinsert128_insert:$ins (v8i64 VR512:$src1), (v2i64 VR128X:$src2),
246 (iPTR imm)), (VINSERTI32x4rr VR512:$src1, VR128X:$src2,
247 (INSERT_get_vinsert128_imm VR512:$ins))>;
248 def : Pat<(vinsert128_insert:$ins (v16i32 VR512:$src1), (v4i32 VR128X:$src2),
249 (iPTR imm)), (VINSERTI32x4rr VR512:$src1, VR128X:$src2,
250 (INSERT_get_vinsert128_imm VR512:$ins))>;
252 def : Pat<(vinsert128_insert:$ins (v16f32 VR512:$src1), (loadv4f32 addr:$src2),
253 (iPTR imm)), (VINSERTF32x4rm VR512:$src1, addr:$src2,
254 (INSERT_get_vinsert128_imm VR512:$ins))>;
255 def : Pat<(vinsert128_insert:$ins (v16i32 VR512:$src1),
256 (bc_v4i32 (loadv2i64 addr:$src2)),
257 (iPTR imm)), (VINSERTI32x4rm VR512:$src1, addr:$src2,
258 (INSERT_get_vinsert128_imm VR512:$ins))>;
259 def : Pat<(vinsert128_insert:$ins (v8f64 VR512:$src1), (loadv2f64 addr:$src2),
260 (iPTR imm)), (VINSERTF32x4rm VR512:$src1, addr:$src2,
261 (INSERT_get_vinsert128_imm VR512:$ins))>;
262 def : Pat<(vinsert128_insert:$ins (v8i64 VR512:$src1), (loadv2i64 addr:$src2),
263 (iPTR imm)), (VINSERTI32x4rm VR512:$src1, addr:$src2,
264 (INSERT_get_vinsert128_imm VR512:$ins))>;
266 def : Pat<(vinsert256_insert:$ins (v16f32 VR512:$src1), (v8f32 VR256X:$src2),
267 (iPTR imm)), (VINSERTF64x4rr VR512:$src1, VR256X:$src2,
268 (INSERT_get_vinsert256_imm VR512:$ins))>;
269 def : Pat<(vinsert256_insert:$ins (v8f64 VR512:$src1), (v4f64 VR256X:$src2),
270 (iPTR imm)), (VINSERTF64x4rr VR512:$src1, VR256X:$src2,
271 (INSERT_get_vinsert256_imm VR512:$ins))>;
272 def : Pat<(vinsert128_insert:$ins (v8i64 VR512:$src1), (v4i64 VR256X:$src2),
273 (iPTR imm)), (VINSERTI64x4rr VR512:$src1, VR256X:$src2,
274 (INSERT_get_vinsert256_imm VR512:$ins))>;
275 def : Pat<(vinsert128_insert:$ins (v16i32 VR512:$src1), (v8i32 VR256X:$src2),
276 (iPTR imm)), (VINSERTI64x4rr VR512:$src1, VR256X:$src2,
277 (INSERT_get_vinsert256_imm VR512:$ins))>;
279 def : Pat<(vinsert256_insert:$ins (v16f32 VR512:$src1), (loadv8f32 addr:$src2),
280 (iPTR imm)), (VINSERTF64x4rm VR512:$src1, addr:$src2,
281 (INSERT_get_vinsert256_imm VR512:$ins))>;
282 def : Pat<(vinsert256_insert:$ins (v8f64 VR512:$src1), (loadv4f64 addr:$src2),
283 (iPTR imm)), (VINSERTF64x4rm VR512:$src1, addr:$src2,
284 (INSERT_get_vinsert256_imm VR512:$ins))>;
285 def : Pat<(vinsert256_insert:$ins (v8i64 VR512:$src1), (loadv4i64 addr:$src2),
286 (iPTR imm)), (VINSERTI64x4rm VR512:$src1, addr:$src2,
287 (INSERT_get_vinsert256_imm VR512:$ins))>;
288 def : Pat<(vinsert256_insert:$ins (v16i32 VR512:$src1),
289 (bc_v8i32 (loadv4i64 addr:$src2)),
290 (iPTR imm)), (VINSERTI64x4rm VR512:$src1, addr:$src2,
291 (INSERT_get_vinsert256_imm VR512:$ins))>;
293 // vinsertps - insert f32 to XMM
294 def VINSERTPSzrr : AVX512AIi8<0x21, MRMSrcReg, (outs VR128X:$dst),
295 (ins VR128X:$src1, VR128X:$src2, u32u8imm:$src3),
296 "vinsertps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
297 [(set VR128X:$dst, (X86insertps VR128X:$src1, VR128X:$src2, imm:$src3))]>,
299 def VINSERTPSzrm: AVX512AIi8<0x21, MRMSrcMem, (outs VR128X:$dst),
300 (ins VR128X:$src1, f32mem:$src2, u32u8imm:$src3),
301 "vinsertps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
302 [(set VR128X:$dst, (X86insertps VR128X:$src1,
303 (v4f32 (scalar_to_vector (loadf32 addr:$src2))),
304 imm:$src3))]>, EVEX_4V, EVEX_CD8<32, CD8VT1>;
306 //===----------------------------------------------------------------------===//
307 // AVX-512 VECTOR EXTRACT
309 let hasSideEffects = 0, ExeDomain = SSEPackedSingle in {
311 def VEXTRACTF32x4rr : AVX512AIi8<0x19, MRMDestReg, (outs VR128X:$dst),
312 (ins VR512:$src1, i8imm:$src2),
313 "vextractf32x4\t{$src2, $src1, $dst|$dst, $src1, $src2}",
314 []>, EVEX, EVEX_V512;
315 def VEXTRACTF32x4mr : AVX512AIi8<0x19, MRMDestMem, (outs),
316 (ins f128mem:$dst, VR512:$src1, i8imm:$src2),
317 "vextractf32x4\t{$src2, $src1, $dst|$dst, $src1, $src2}",
318 []>, EVEX, EVEX_V512, EVEX_CD8<32, CD8VT4>;
321 def VEXTRACTF64x4rr : AVX512AIi8<0x1b, MRMDestReg, (outs VR256X:$dst),
322 (ins VR512:$src1, i8imm:$src2),
323 "vextractf64x4\t{$src2, $src1, $dst|$dst, $src1, $src2}",
324 []>, EVEX, EVEX_V512, VEX_W;
326 def VEXTRACTF64x4mr : AVX512AIi8<0x1b, MRMDestMem, (outs),
327 (ins f256mem:$dst, VR512:$src1, i8imm:$src2),
328 "vextractf64x4\t{$src2, $src1, $dst|$dst, $src1, $src2}",
329 []>, EVEX, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT4>;
332 let hasSideEffects = 0 in {
334 def VEXTRACTI32x4rr : AVX512AIi8<0x39, MRMDestReg, (outs VR128X:$dst),
335 (ins VR512:$src1, i8imm:$src2),
336 "vextracti32x4\t{$src2, $src1, $dst|$dst, $src1, $src2}",
337 []>, EVEX, EVEX_V512;
338 def VEXTRACTI32x4mr : AVX512AIi8<0x39, MRMDestMem, (outs),
339 (ins i128mem:$dst, VR512:$src1, i8imm:$src2),
340 "vextracti32x4\t{$src2, $src1, $dst|$dst, $src1, $src2}",
341 []>, EVEX, EVEX_V512, EVEX_CD8<32, CD8VT4>;
344 def VEXTRACTI64x4rr : AVX512AIi8<0x3b, MRMDestReg, (outs VR256X:$dst),
345 (ins VR512:$src1, i8imm:$src2),
346 "vextracti64x4\t{$src2, $src1, $dst|$dst, $src1, $src2}",
347 []>, EVEX, EVEX_V512, VEX_W;
349 def VEXTRACTI64x4mr : AVX512AIi8<0x3b, MRMDestMem, (outs),
350 (ins i256mem:$dst, VR512:$src1, i8imm:$src2),
351 "vextracti64x4\t{$src2, $src1, $dst|$dst, $src1, $src2}",
352 []>, EVEX, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT4>;
355 def : Pat<(vextract128_extract:$ext (v16f32 VR512:$src1), (iPTR imm)),
356 (v4f32 (VEXTRACTF32x4rr VR512:$src1,
357 (EXTRACT_get_vextract128_imm VR128X:$ext)))>;
359 def : Pat<(vextract128_extract:$ext VR512:$src1, (iPTR imm)),
360 (v4i32 (VEXTRACTF32x4rr VR512:$src1,
361 (EXTRACT_get_vextract128_imm VR128X:$ext)))>;
363 def : Pat<(vextract128_extract:$ext (v8f64 VR512:$src1), (iPTR imm)),
364 (v2f64 (VEXTRACTF32x4rr VR512:$src1,
365 (EXTRACT_get_vextract128_imm VR128X:$ext)))>;
367 def : Pat<(vextract128_extract:$ext (v8i64 VR512:$src1), (iPTR imm)),
368 (v2i64 (VEXTRACTI32x4rr VR512:$src1,
369 (EXTRACT_get_vextract128_imm VR128X:$ext)))>;
372 def : Pat<(vextract256_extract:$ext (v16f32 VR512:$src1), (iPTR imm)),
373 (v8f32 (VEXTRACTF64x4rr VR512:$src1,
374 (EXTRACT_get_vextract256_imm VR256X:$ext)))>;
376 def : Pat<(vextract256_extract:$ext (v16i32 VR512:$src1), (iPTR imm)),
377 (v8i32 (VEXTRACTI64x4rr VR512:$src1,
378 (EXTRACT_get_vextract256_imm VR256X:$ext)))>;
380 def : Pat<(vextract256_extract:$ext (v8f64 VR512:$src1), (iPTR imm)),
381 (v4f64 (VEXTRACTF64x4rr VR512:$src1,
382 (EXTRACT_get_vextract256_imm VR256X:$ext)))>;
384 def : Pat<(vextract256_extract:$ext (v8i64 VR512:$src1), (iPTR imm)),
385 (v4i64 (VEXTRACTI64x4rr VR512:$src1,
386 (EXTRACT_get_vextract256_imm VR256X:$ext)))>;
388 // A 256-bit subvector extract from the first 512-bit vector position
389 // is a subregister copy that needs no instruction.
390 def : Pat<(v8i32 (extract_subvector (v16i32 VR512:$src), (iPTR 0))),
391 (v8i32 (EXTRACT_SUBREG (v16i32 VR512:$src), sub_ymm))>;
392 def : Pat<(v8f32 (extract_subvector (v16f32 VR512:$src), (iPTR 0))),
393 (v8f32 (EXTRACT_SUBREG (v16f32 VR512:$src), sub_ymm))>;
394 def : Pat<(v4i64 (extract_subvector (v8i64 VR512:$src), (iPTR 0))),
395 (v4i64 (EXTRACT_SUBREG (v8i64 VR512:$src), sub_ymm))>;
396 def : Pat<(v4f64 (extract_subvector (v8f64 VR512:$src), (iPTR 0))),
397 (v4f64 (EXTRACT_SUBREG (v8f64 VR512:$src), sub_ymm))>;
400 def : Pat<(v4i32 (extract_subvector (v16i32 VR512:$src), (iPTR 0))),
401 (v4i32 (EXTRACT_SUBREG (v16i32 VR512:$src), sub_xmm))>;
402 def : Pat<(v2i64 (extract_subvector (v8i64 VR512:$src), (iPTR 0))),
403 (v2i64 (EXTRACT_SUBREG (v8i64 VR512:$src), sub_xmm))>;
404 def : Pat<(v2f64 (extract_subvector (v8f64 VR512:$src), (iPTR 0))),
405 (v2f64 (EXTRACT_SUBREG (v8f64 VR512:$src), sub_xmm))>;
406 def : Pat<(v4f32 (extract_subvector (v16f32 VR512:$src), (iPTR 0))),
407 (v4f32 (EXTRACT_SUBREG (v16f32 VR512:$src), sub_xmm))>;
410 // A 128-bit subvector insert to the first 512-bit vector position
411 // is a subregister copy that needs no instruction.
412 def : Pat<(insert_subvector undef, (v2i64 VR128X:$src), (iPTR 0)),
413 (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)),
414 (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
416 def : Pat<(insert_subvector undef, (v2f64 VR128X:$src), (iPTR 0)),
417 (INSERT_SUBREG (v8f64 (IMPLICIT_DEF)),
418 (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
420 def : Pat<(insert_subvector undef, (v4i32 VR128X:$src), (iPTR 0)),
421 (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)),
422 (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
424 def : Pat<(insert_subvector undef, (v4f32 VR128X:$src), (iPTR 0)),
425 (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)),
426 (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
429 def : Pat<(insert_subvector undef, (v4i64 VR256X:$src), (iPTR 0)),
430 (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
431 def : Pat<(insert_subvector undef, (v4f64 VR256X:$src), (iPTR 0)),
432 (INSERT_SUBREG (v8f64 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
433 def : Pat<(insert_subvector undef, (v8i32 VR256X:$src), (iPTR 0)),
434 (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
435 def : Pat<(insert_subvector undef, (v8f32 VR256X:$src), (iPTR 0)),
436 (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
438 // vextractps - extract 32 bits from XMM
439 def VEXTRACTPSzrr : AVX512AIi8<0x17, MRMDestReg, (outs GR32:$dst),
440 (ins VR128X:$src1, u32u8imm:$src2),
441 "vextractps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
442 [(set GR32:$dst, (extractelt (bc_v4i32 (v4f32 VR128X:$src1)), imm:$src2))]>,
445 def VEXTRACTPSzmr : AVX512AIi8<0x17, MRMDestMem, (outs),
446 (ins f32mem:$dst, VR128X:$src1, u32u8imm:$src2),
447 "vextractps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
448 [(store (extractelt (bc_v4i32 (v4f32 VR128X:$src1)), imm:$src2),
449 addr:$dst)]>, EVEX, EVEX_CD8<32, CD8VT1>;
451 //===---------------------------------------------------------------------===//
454 multiclass avx512_fp_broadcast<bits<8> opc, string OpcodeStr,
455 RegisterClass DestRC,
456 RegisterClass SrcRC, X86MemOperand x86memop> {
457 def rr : AVX5128I<opc, MRMSrcReg, (outs DestRC:$dst), (ins SrcRC:$src),
458 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
460 def rm : AVX5128I<opc, MRMSrcMem, (outs DestRC:$dst), (ins x86memop:$src),
461 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),[]>, EVEX;
463 let ExeDomain = SSEPackedSingle in {
464 defm VBROADCASTSSZ : avx512_fp_broadcast<0x18, "vbroadcastss", VR512,
466 EVEX_V512, EVEX_CD8<32, CD8VT1>;
469 let ExeDomain = SSEPackedDouble in {
470 defm VBROADCASTSDZ : avx512_fp_broadcast<0x19, "vbroadcastsd", VR512,
472 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
475 def : Pat<(v16f32 (X86VBroadcast (loadf32 addr:$src))),
476 (VBROADCASTSSZrm addr:$src)>;
477 def : Pat<(v8f64 (X86VBroadcast (loadf64 addr:$src))),
478 (VBROADCASTSDZrm addr:$src)>;
480 def : Pat<(int_x86_avx512_vbroadcast_ss_512 addr:$src),
481 (VBROADCASTSSZrm addr:$src)>;
482 def : Pat<(int_x86_avx512_vbroadcast_sd_512 addr:$src),
483 (VBROADCASTSDZrm addr:$src)>;
485 multiclass avx512_int_broadcast_reg<bits<8> opc, string OpcodeStr,
486 RegisterClass SrcRC, RegisterClass KRC> {
487 def Zrr : AVX5128I<opc, MRMSrcReg, (outs VR512:$dst), (ins SrcRC:$src),
488 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
489 []>, EVEX, EVEX_V512;
490 def Zkrr : AVX5128I<opc, MRMSrcReg, (outs VR512:$dst),
491 (ins KRC:$mask, SrcRC:$src),
492 !strconcat(OpcodeStr,
493 " \t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}"),
494 []>, EVEX, EVEX_V512, EVEX_KZ;
497 defm VPBROADCASTDr : avx512_int_broadcast_reg<0x7C, "vpbroadcastd", GR32, VK16WM>;
498 defm VPBROADCASTQr : avx512_int_broadcast_reg<0x7C, "vpbroadcastq", GR64, VK8WM>,
501 def : Pat <(v16i32 (X86vzext VK16WM:$mask)),
502 (VPBROADCASTDrZkrr VK16WM:$mask, (i32 (MOV32ri 0x1)))>;
504 def : Pat <(v8i64 (X86vzext VK8WM:$mask)),
505 (VPBROADCASTQrZkrr VK8WM:$mask, (i64 (MOV64ri 0x1)))>;
507 def : Pat<(v16i32 (X86VBroadcast (i32 GR32:$src))),
508 (VPBROADCASTDrZrr GR32:$src)>;
509 def : Pat<(v16i32 (X86VBroadcastm VK16WM:$mask, (i32 GR32:$src))),
510 (VPBROADCASTDrZkrr VK16WM:$mask, GR32:$src)>;
511 def : Pat<(v8i64 (X86VBroadcast (i64 GR64:$src))),
512 (VPBROADCASTQrZrr GR64:$src)>;
513 def : Pat<(v8i64 (X86VBroadcastm VK8WM:$mask, (i64 GR64:$src))),
514 (VPBROADCASTQrZkrr VK8WM:$mask, GR64:$src)>;
516 def : Pat<(v16i32 (int_x86_avx512_pbroadcastd_i32_512 (i32 GR32:$src))),
517 (VPBROADCASTDrZrr GR32:$src)>;
518 def : Pat<(v8i64 (int_x86_avx512_pbroadcastq_i64_512 (i64 GR64:$src))),
519 (VPBROADCASTQrZrr GR64:$src)>;
521 def : Pat<(v16i32 (int_x86_avx512_mask_pbroadcast_d_gpr_512 (i32 GR32:$src),
522 (v16i32 immAllZerosV), (i16 GR16:$mask))),
523 (VPBROADCASTDrZkrr (COPY_TO_REGCLASS GR16:$mask, VK16WM), GR32:$src)>;
524 def : Pat<(v8i64 (int_x86_avx512_mask_pbroadcast_q_gpr_512 (i64 GR64:$src),
525 (bc_v8i64 (v16i32 immAllZerosV)), (i8 GR8:$mask))),
526 (VPBROADCASTQrZkrr (COPY_TO_REGCLASS GR8:$mask, VK8WM), GR64:$src)>;
528 multiclass avx512_int_broadcast_rm<bits<8> opc, string OpcodeStr,
529 X86MemOperand x86memop, PatFrag ld_frag,
530 RegisterClass DstRC, ValueType OpVT, ValueType SrcVT,
532 def rr : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst), (ins VR128X:$src),
533 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
535 (OpVT (X86VBroadcast (SrcVT VR128X:$src))))]>, EVEX;
536 def krr : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst), (ins KRC:$mask,
538 !strconcat(OpcodeStr,
539 " \t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
541 (OpVT (X86VBroadcastm KRC:$mask, (SrcVT VR128X:$src))))]>,
544 def rm : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
545 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
547 (OpVT (X86VBroadcast (ld_frag addr:$src))))]>, EVEX;
548 def krm : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst), (ins KRC:$mask,
550 !strconcat(OpcodeStr,
551 " \t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
552 [(set DstRC:$dst, (OpVT (X86VBroadcastm KRC:$mask,
553 (ld_frag addr:$src))))]>, EVEX, EVEX_KZ;
557 defm VPBROADCASTDZ : avx512_int_broadcast_rm<0x58, "vpbroadcastd", i32mem,
558 loadi32, VR512, v16i32, v4i32, VK16WM>,
559 EVEX_V512, EVEX_CD8<32, CD8VT1>;
560 defm VPBROADCASTQZ : avx512_int_broadcast_rm<0x59, "vpbroadcastq", i64mem,
561 loadi64, VR512, v8i64, v2i64, VK8WM>, EVEX_V512, VEX_W,
562 EVEX_CD8<64, CD8VT1>;
564 multiclass avx512_int_subvec_broadcast_rm<bits<8> opc, string OpcodeStr,
565 X86MemOperand x86memop, PatFrag ld_frag,
568 def rm : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst), (ins x86memop:$src),
569 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
571 def krm : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst), (ins KRC:$mask,
573 !strconcat(OpcodeStr,
574 " \t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
579 defm VBROADCASTI32X4 : avx512_int_subvec_broadcast_rm<0x5a, "vbroadcasti32x4",
580 i128mem, loadv2i64, VK16WM>,
581 EVEX_V512, EVEX_CD8<32, CD8VT4>;
582 defm VBROADCASTI64X4 : avx512_int_subvec_broadcast_rm<0x5b, "vbroadcasti64x4",
583 i256mem, loadv4i64, VK16WM>, VEX_W,
584 EVEX_V512, EVEX_CD8<64, CD8VT4>;
586 def : Pat<(v16i32 (int_x86_avx512_pbroadcastd_512 (v4i32 VR128X:$src))),
587 (VPBROADCASTDZrr VR128X:$src)>;
588 def : Pat<(v8i64 (int_x86_avx512_pbroadcastq_512 (v2i64 VR128X:$src))),
589 (VPBROADCASTQZrr VR128X:$src)>;
591 def : Pat<(v16f32 (X86VBroadcast (v4f32 VR128X:$src))),
592 (VBROADCASTSSZrr VR128X:$src)>;
593 def : Pat<(v8f64 (X86VBroadcast (v2f64 VR128X:$src))),
594 (VBROADCASTSDZrr VR128X:$src)>;
596 def : Pat<(v16f32 (int_x86_avx512_vbroadcast_ss_ps_512 (v4f32 VR128X:$src))),
597 (VBROADCASTSSZrr VR128X:$src)>;
598 def : Pat<(v8f64 (int_x86_avx512_vbroadcast_sd_pd_512 (v2f64 VR128X:$src))),
599 (VBROADCASTSDZrr VR128X:$src)>;
601 // Provide fallback in case the load node that is used in the patterns above
602 // is used by additional users, which prevents the pattern selection.
603 def : Pat<(v16f32 (X86VBroadcast FR32X:$src)),
604 (VBROADCASTSSZrr (COPY_TO_REGCLASS FR32X:$src, VR128X))>;
605 def : Pat<(v8f64 (X86VBroadcast FR64X:$src)),
606 (VBROADCASTSDZrr (COPY_TO_REGCLASS FR64X:$src, VR128X))>;
609 let Predicates = [HasAVX512] in {
610 def : Pat<(v8i32 (X86VBroadcastm (v8i1 VK8WM:$mask), (loadi32 addr:$src))),
612 (v16i32 (VPBROADCASTDZkrm (COPY_TO_REGCLASS VK8WM:$mask, VK16WM),
613 addr:$src)), sub_ymm)>;
615 //===----------------------------------------------------------------------===//
616 // AVX-512 BROADCAST MASK TO VECTOR REGISTER
619 multiclass avx512_mask_broadcast<bits<8> opc, string OpcodeStr,
620 RegisterClass DstRC, RegisterClass KRC,
621 ValueType OpVT, ValueType SrcVT> {
622 def rr : AVX512XS8I<opc, MRMDestReg, (outs DstRC:$dst), (ins KRC:$src),
623 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
627 let Predicates = [HasCDI] in {
628 defm VPBROADCASTMW2D : avx512_mask_broadcast<0x3A, "vpbroadcastmw2d", VR512,
629 VK16, v16i32, v16i1>, EVEX_V512;
630 defm VPBROADCASTMB2Q : avx512_mask_broadcast<0x2A, "vpbroadcastmb2q", VR512,
631 VK8, v8i64, v8i1>, EVEX_V512, VEX_W;
634 //===----------------------------------------------------------------------===//
637 // -- immediate form --
638 multiclass avx512_perm_imm<bits<8> opc, string OpcodeStr, RegisterClass RC,
639 SDNode OpNode, PatFrag mem_frag,
640 X86MemOperand x86memop, ValueType OpVT> {
641 def ri : AVX512AIi8<opc, MRMSrcReg, (outs RC:$dst),
642 (ins RC:$src1, i8imm:$src2),
643 !strconcat(OpcodeStr,
644 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
646 (OpVT (OpNode RC:$src1, (i8 imm:$src2))))]>,
648 def mi : AVX512AIi8<opc, MRMSrcMem, (outs RC:$dst),
649 (ins x86memop:$src1, i8imm:$src2),
650 !strconcat(OpcodeStr,
651 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
653 (OpVT (OpNode (mem_frag addr:$src1),
654 (i8 imm:$src2))))]>, EVEX;
657 defm VPERMQZ : avx512_perm_imm<0x00, "vpermq", VR512, X86VPermi, memopv8i64,
658 i512mem, v8i64>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
659 let ExeDomain = SSEPackedDouble in
660 defm VPERMPDZ : avx512_perm_imm<0x01, "vpermpd", VR512, X86VPermi, memopv8f64,
661 f512mem, v8f64>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
663 // -- VPERM - register form --
664 multiclass avx512_perm<bits<8> opc, string OpcodeStr, RegisterClass RC,
665 PatFrag mem_frag, X86MemOperand x86memop, ValueType OpVT> {
667 def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
668 (ins RC:$src1, RC:$src2),
669 !strconcat(OpcodeStr,
670 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
672 (OpVT (X86VPermv RC:$src1, RC:$src2)))]>, EVEX_4V;
674 def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
675 (ins RC:$src1, x86memop:$src2),
676 !strconcat(OpcodeStr,
677 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
679 (OpVT (X86VPermv RC:$src1, (mem_frag addr:$src2))))]>,
683 defm VPERMDZ : avx512_perm<0x36, "vpermd", VR512, memopv16i32, i512mem,
684 v16i32>, EVEX_V512, EVEX_CD8<32, CD8VF>;
685 defm VPERMQZ : avx512_perm<0x36, "vpermq", VR512, memopv8i64, i512mem,
686 v8i64>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
687 let ExeDomain = SSEPackedSingle in
688 defm VPERMPSZ : avx512_perm<0x16, "vpermps", VR512, memopv16f32, f512mem,
689 v16f32>, EVEX_V512, EVEX_CD8<32, CD8VF>;
690 let ExeDomain = SSEPackedDouble in
691 defm VPERMPDZ : avx512_perm<0x16, "vpermpd", VR512, memopv8f64, f512mem,
692 v8f64>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
694 // -- VPERM2I - 3 source operands form --
695 multiclass avx512_perm_3src<bits<8> opc, string OpcodeStr, RegisterClass RC,
696 PatFrag mem_frag, X86MemOperand x86memop,
697 SDNode OpNode, ValueType OpVT, RegisterClass KRC> {
698 let Constraints = "$src1 = $dst" in {
699 def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
700 (ins RC:$src1, RC:$src2, RC:$src3),
701 !strconcat(OpcodeStr,
702 " \t{$src3, $src2, $dst|$dst, $src2, $src3}"),
704 (OpVT (OpNode RC:$src1, RC:$src2, RC:$src3)))]>,
707 def rrk : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
708 (ins RC:$src1, KRC:$mask, RC:$src2, RC:$src3),
709 !strconcat(OpcodeStr,
710 " \t{$src3, $src2, $dst {${mask}}|"
711 "$dst {${mask}}, $src2, $src3}"),
712 [(set RC:$dst, (OpVT (vselect KRC:$mask,
713 (OpNode RC:$src1, RC:$src2,
718 let AddedComplexity = 30 in // Prefer over VMOV*rrkz Pat<>
719 def rrkz : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
720 (ins RC:$src1, KRC:$mask, RC:$src2, RC:$src3),
721 !strconcat(OpcodeStr,
722 " \t{$src3, $src2, $dst {${mask}} {z} |",
723 "$dst {${mask}} {z}, $src2, $src3}"),
724 [(set RC:$dst, (OpVT (vselect KRC:$mask,
725 (OpNode RC:$src1, RC:$src2,
728 (v16i32 immAllZerosV))))))]>,
731 def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
732 (ins RC:$src1, RC:$src2, x86memop:$src3),
733 !strconcat(OpcodeStr,
734 " \t{$src3, $src2, $dst|$dst, $src2, $src3}"),
736 (OpVT (OpNode RC:$src1, RC:$src2,
737 (mem_frag addr:$src3))))]>, EVEX_4V;
739 def rmk : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
740 (ins RC:$src1, KRC:$mask, RC:$src2, x86memop:$src3),
741 !strconcat(OpcodeStr,
742 " \t{$src3, $src2, $dst {${mask}}|"
743 "$dst {${mask}}, $src2, $src3}"),
745 (OpVT (vselect KRC:$mask,
746 (OpNode RC:$src1, RC:$src2,
747 (mem_frag addr:$src3)),
751 let AddedComplexity = 10 in // Prefer over the rrkz variant
752 def rmkz : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
753 (ins RC:$src1, KRC:$mask, RC:$src2, x86memop:$src3),
754 !strconcat(OpcodeStr,
755 " \t{$src3, $src2, $dst {${mask}} {z}|"
756 "$dst {${mask}} {z}, $src2, $src3}"),
758 (OpVT (vselect KRC:$mask,
759 (OpNode RC:$src1, RC:$src2,
760 (mem_frag addr:$src3)),
762 (v16i32 immAllZerosV))))))]>,
766 defm VPERMI2D : avx512_perm_3src<0x76, "vpermi2d", VR512, memopv16i32,
767 i512mem, X86VPermiv3, v16i32, VK16WM>,
768 EVEX_V512, EVEX_CD8<32, CD8VF>;
769 defm VPERMI2Q : avx512_perm_3src<0x76, "vpermi2q", VR512, memopv8i64,
770 i512mem, X86VPermiv3, v8i64, VK8WM>,
771 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
772 defm VPERMI2PS : avx512_perm_3src<0x77, "vpermi2ps", VR512, memopv16f32,
773 i512mem, X86VPermiv3, v16f32, VK16WM>,
774 EVEX_V512, EVEX_CD8<32, CD8VF>;
775 defm VPERMI2PD : avx512_perm_3src<0x77, "vpermi2pd", VR512, memopv8f64,
776 i512mem, X86VPermiv3, v8f64, VK8WM>,
777 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
779 multiclass avx512_perm_table_3src<bits<8> opc, string Suffix, RegisterClass RC,
780 PatFrag mem_frag, X86MemOperand x86memop,
781 SDNode OpNode, ValueType OpVT, RegisterClass KRC,
782 ValueType MaskVT, RegisterClass MRC> :
783 avx512_perm_3src<opc, "vpermt2"##Suffix, RC, mem_frag, x86memop, OpNode,
785 def : Pat<(OpVT (!cast<Intrinsic>("int_x86_avx512_mask_vpermt_"##Suffix##"_512")
786 VR512:$idx, VR512:$src1, VR512:$src2, -1)),
787 (!cast<Instruction>(NAME#rr) VR512:$src1, VR512:$idx, VR512:$src2)>;
789 def : Pat<(OpVT (!cast<Intrinsic>("int_x86_avx512_mask_vpermt_"##Suffix##"_512")
790 VR512:$idx, VR512:$src1, VR512:$src2, MRC:$mask)),
791 (!cast<Instruction>(NAME#rrk) VR512:$src1,
792 (MaskVT (COPY_TO_REGCLASS MRC:$mask, KRC)), VR512:$idx, VR512:$src2)>;
795 defm VPERMT2D : avx512_perm_table_3src<0x7E, "d", VR512, memopv16i32, i512mem,
796 X86VPermv3, v16i32, VK16WM, v16i1, GR16>,
797 EVEX_V512, EVEX_CD8<32, CD8VF>;
798 defm VPERMT2Q : avx512_perm_table_3src<0x7E, "q", VR512, memopv8i64, i512mem,
799 X86VPermv3, v8i64, VK8WM, v8i1, GR8>,
800 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
801 defm VPERMT2PS : avx512_perm_table_3src<0x7F, "ps", VR512, memopv16f32, i512mem,
802 X86VPermv3, v16f32, VK16WM, v16i1, GR16>,
803 EVEX_V512, EVEX_CD8<32, CD8VF>;
804 defm VPERMT2PD : avx512_perm_table_3src<0x7F, "pd", VR512, memopv8f64, i512mem,
805 X86VPermv3, v8f64, VK8WM, v8i1, GR8>,
806 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
808 //===----------------------------------------------------------------------===//
809 // AVX-512 - BLEND using mask
811 multiclass avx512_blendmask<bits<8> opc, string OpcodeStr,
812 RegisterClass KRC, RegisterClass RC,
813 X86MemOperand x86memop, PatFrag mem_frag,
814 SDNode OpNode, ValueType vt> {
815 def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
816 (ins KRC:$mask, RC:$src1, RC:$src2),
817 !strconcat(OpcodeStr,
818 " \t{$src2, $src1, ${dst} {${mask}}|${dst} {${mask}}, $src1, $src2}"),
819 [(set RC:$dst, (OpNode KRC:$mask, (vt RC:$src2),
820 (vt RC:$src1)))]>, EVEX_4V, EVEX_K;
822 def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
823 (ins KRC:$mask, RC:$src1, x86memop:$src2),
824 !strconcat(OpcodeStr,
825 " \t{$src2, $src1, ${dst} {${mask}}|${dst} {${mask}}, $src1, $src2}"),
826 []>, EVEX_4V, EVEX_K;
829 let ExeDomain = SSEPackedSingle in
830 defm VBLENDMPSZ : avx512_blendmask<0x65, "vblendmps",
831 VK16WM, VR512, f512mem,
832 memopv16f32, vselect, v16f32>,
833 EVEX_CD8<32, CD8VF>, EVEX_V512;
834 let ExeDomain = SSEPackedDouble in
835 defm VBLENDMPDZ : avx512_blendmask<0x65, "vblendmpd",
836 VK8WM, VR512, f512mem,
837 memopv8f64, vselect, v8f64>,
838 VEX_W, EVEX_CD8<64, CD8VF>, EVEX_V512;
840 def : Pat<(v16f32 (int_x86_avx512_mask_blend_ps_512 (v16f32 VR512:$src1),
841 (v16f32 VR512:$src2), (i16 GR16:$mask))),
842 (VBLENDMPSZrr (COPY_TO_REGCLASS GR16:$mask, VK16WM),
843 VR512:$src1, VR512:$src2)>;
845 def : Pat<(v8f64 (int_x86_avx512_mask_blend_pd_512 (v8f64 VR512:$src1),
846 (v8f64 VR512:$src2), (i8 GR8:$mask))),
847 (VBLENDMPDZrr (COPY_TO_REGCLASS GR8:$mask, VK8WM),
848 VR512:$src1, VR512:$src2)>;
850 defm VPBLENDMDZ : avx512_blendmask<0x64, "vpblendmd",
851 VK16WM, VR512, f512mem,
852 memopv16i32, vselect, v16i32>,
853 EVEX_CD8<32, CD8VF>, EVEX_V512;
855 defm VPBLENDMQZ : avx512_blendmask<0x64, "vpblendmq",
856 VK8WM, VR512, f512mem,
857 memopv8i64, vselect, v8i64>,
858 VEX_W, EVEX_CD8<64, CD8VF>, EVEX_V512;
860 def : Pat<(v16i32 (int_x86_avx512_mask_blend_d_512 (v16i32 VR512:$src1),
861 (v16i32 VR512:$src2), (i16 GR16:$mask))),
862 (VPBLENDMDZrr (COPY_TO_REGCLASS GR16:$mask, VK16),
863 VR512:$src1, VR512:$src2)>;
865 def : Pat<(v8i64 (int_x86_avx512_mask_blend_q_512 (v8i64 VR512:$src1),
866 (v8i64 VR512:$src2), (i8 GR8:$mask))),
867 (VPBLENDMQZrr (COPY_TO_REGCLASS GR8:$mask, VK8),
868 VR512:$src1, VR512:$src2)>;
870 let Predicates = [HasAVX512] in {
871 def : Pat<(v8f32 (vselect (v8i1 VK8WM:$mask), (v8f32 VR256X:$src1),
872 (v8f32 VR256X:$src2))),
874 (v16f32 (VBLENDMPSZrr (COPY_TO_REGCLASS VK8WM:$mask, VK16WM),
875 (v16f32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)),
876 (v16f32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)))), sub_ymm)>;
878 def : Pat<(v8i32 (vselect (v8i1 VK8WM:$mask), (v8i32 VR256X:$src1),
879 (v8i32 VR256X:$src2))),
881 (v16i32 (VPBLENDMDZrr (COPY_TO_REGCLASS VK8WM:$mask, VK16WM),
882 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)),
883 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)))), sub_ymm)>;
885 //===----------------------------------------------------------------------===//
886 // Compare Instructions
887 //===----------------------------------------------------------------------===//
889 // avx512_cmp_scalar - AVX512 CMPSS and CMPSD
890 multiclass avx512_cmp_scalar<RegisterClass RC, X86MemOperand x86memop,
891 Operand CC, SDNode OpNode, ValueType VT,
892 PatFrag ld_frag, string asm, string asm_alt> {
893 def rr : AVX512Ii8<0xC2, MRMSrcReg,
894 (outs VK1:$dst), (ins RC:$src1, RC:$src2, CC:$cc), asm,
895 [(set VK1:$dst, (OpNode (VT RC:$src1), RC:$src2, imm:$cc))],
896 IIC_SSE_ALU_F32S_RR>, EVEX_4V;
897 def rm : AVX512Ii8<0xC2, MRMSrcMem,
898 (outs VK1:$dst), (ins RC:$src1, x86memop:$src2, CC:$cc), asm,
899 [(set VK1:$dst, (OpNode (VT RC:$src1),
900 (ld_frag addr:$src2), imm:$cc))], IIC_SSE_ALU_F32P_RM>, EVEX_4V;
901 let isAsmParserOnly = 1, hasSideEffects = 0 in {
902 def rri_alt : AVX512Ii8<0xC2, MRMSrcReg,
903 (outs VK1:$dst), (ins RC:$src1, RC:$src2, i8imm:$cc),
904 asm_alt, [], IIC_SSE_ALU_F32S_RR>, EVEX_4V;
905 def rmi_alt : AVX512Ii8<0xC2, MRMSrcMem,
906 (outs VK1:$dst), (ins RC:$src1, x86memop:$src2, i8imm:$cc),
907 asm_alt, [], IIC_SSE_ALU_F32P_RM>, EVEX_4V;
911 let Predicates = [HasAVX512] in {
912 defm VCMPSSZ : avx512_cmp_scalar<FR32X, f32mem, AVXCC, X86cmpms, f32, loadf32,
913 "vcmp${cc}ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
914 "vcmpss\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}">,
916 defm VCMPSDZ : avx512_cmp_scalar<FR64X, f64mem, AVXCC, X86cmpms, f64, loadf64,
917 "vcmp${cc}sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
918 "vcmpsd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}">,
922 multiclass avx512_icmp_packed<bits<8> opc, string OpcodeStr, RegisterClass KRC,
923 RegisterClass RC, X86MemOperand x86memop, PatFrag memop_frag,
924 SDNode OpNode, ValueType vt> {
925 def rr : AVX512BI<opc, MRMSrcReg,
926 (outs KRC:$dst), (ins RC:$src1, RC:$src2),
927 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
928 [(set KRC:$dst, (OpNode (vt RC:$src1), (vt RC:$src2)))],
929 IIC_SSE_ALU_F32P_RR>, EVEX_4V;
930 def rm : AVX512BI<opc, MRMSrcMem,
931 (outs KRC:$dst), (ins RC:$src1, x86memop:$src2),
932 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
933 [(set KRC:$dst, (OpNode (vt RC:$src1), (memop_frag addr:$src2)))],
934 IIC_SSE_ALU_F32P_RM>, EVEX_4V;
937 defm VPCMPEQDZ : avx512_icmp_packed<0x76, "vpcmpeqd", VK16, VR512, i512mem,
938 memopv16i32, X86pcmpeqm, v16i32>, EVEX_V512,
940 defm VPCMPEQQZ : avx512_icmp_packed<0x29, "vpcmpeqq", VK8, VR512, i512mem,
941 memopv8i64, X86pcmpeqm, v8i64>, T8PD, EVEX_V512,
942 VEX_W, EVEX_CD8<64, CD8VF>;
944 defm VPCMPGTDZ : avx512_icmp_packed<0x66, "vpcmpgtd", VK16, VR512, i512mem,
945 memopv16i32, X86pcmpgtm, v16i32>, EVEX_V512,
947 defm VPCMPGTQZ : avx512_icmp_packed<0x37, "vpcmpgtq", VK8, VR512, i512mem,
948 memopv8i64, X86pcmpgtm, v8i64>, T8PD, EVEX_V512,
949 VEX_W, EVEX_CD8<64, CD8VF>;
951 def : Pat<(v8i1 (X86pcmpgtm (v8i32 VR256X:$src1), (v8i32 VR256X:$src2))),
952 (COPY_TO_REGCLASS (VPCMPGTDZrr
953 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)),
954 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm))), VK8)>;
956 def : Pat<(v8i1 (X86pcmpeqm (v8i32 VR256X:$src1), (v8i32 VR256X:$src2))),
957 (COPY_TO_REGCLASS (VPCMPEQDZrr
958 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)),
959 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm))), VK8)>;
961 multiclass avx512_icmp_cc<bits<8> opc, RegisterClass WMRC, RegisterClass KRC,
962 RegisterClass RC, X86MemOperand x86memop, PatFrag memop_frag,
963 SDNode OpNode, ValueType vt, Operand CC, string Suffix> {
964 def rri : AVX512AIi8<opc, MRMSrcReg,
965 (outs KRC:$dst), (ins RC:$src1, RC:$src2, CC:$cc),
966 !strconcat("vpcmp${cc}", Suffix,
967 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
968 [(set KRC:$dst, (OpNode (vt RC:$src1), (vt RC:$src2), imm:$cc))],
969 IIC_SSE_ALU_F32P_RR>, EVEX_4V;
970 def rmi : AVX512AIi8<opc, MRMSrcMem,
971 (outs KRC:$dst), (ins RC:$src1, x86memop:$src2, CC:$cc),
972 !strconcat("vpcmp${cc}", Suffix,
973 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
974 [(set KRC:$dst, (OpNode (vt RC:$src1), (memop_frag addr:$src2),
975 imm:$cc))], IIC_SSE_ALU_F32P_RM>, EVEX_4V;
976 // Accept explicit immediate argument form instead of comparison code.
977 let isAsmParserOnly = 1, hasSideEffects = 0 in {
978 def rri_alt : AVX512AIi8<opc, MRMSrcReg,
979 (outs KRC:$dst), (ins RC:$src1, RC:$src2, i8imm:$cc),
980 !strconcat("vpcmp", Suffix,
981 "\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}"),
982 [], IIC_SSE_ALU_F32P_RR>, EVEX_4V;
983 def rrik_alt : AVX512AIi8<opc, MRMSrcReg,
984 (outs KRC:$dst), (ins WMRC:$mask, RC:$src1, RC:$src2, i8imm:$cc),
985 !strconcat("vpcmp", Suffix,
986 "\t{$cc, $src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2, $cc}"),
987 [], IIC_SSE_ALU_F32P_RR>, EVEX_4V, EVEX_K;
988 def rmi_alt : AVX512AIi8<opc, MRMSrcMem,
989 (outs KRC:$dst), (ins RC:$src1, x86memop:$src2, i8imm:$cc),
990 !strconcat("vpcmp", Suffix,
991 "\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}"),
992 [], IIC_SSE_ALU_F32P_RM>, EVEX_4V;
993 def rmik_alt : AVX512AIi8<opc, MRMSrcMem,
994 (outs KRC:$dst), (ins WMRC:$mask, RC:$src1, x86memop:$src2, i8imm:$cc),
995 !strconcat("vpcmp", Suffix,
996 "\t{$cc, $src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2, $cc}"),
997 [], IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_K;
1001 defm VPCMPDZ : avx512_icmp_cc<0x1F, VK16WM, VK16, VR512, i512mem, memopv16i32,
1002 X86cmpm, v16i32, AVXCC, "d">,
1003 EVEX_V512, EVEX_CD8<32, CD8VF>;
1004 defm VPCMPUDZ : avx512_icmp_cc<0x1E, VK16WM, VK16, VR512, i512mem, memopv16i32,
1005 X86cmpmu, v16i32, AVXCC, "ud">,
1006 EVEX_V512, EVEX_CD8<32, CD8VF>;
1008 defm VPCMPQZ : avx512_icmp_cc<0x1F, VK8WM, VK8, VR512, i512mem, memopv8i64,
1009 X86cmpm, v8i64, AVXCC, "q">,
1010 VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
1011 defm VPCMPUQZ : avx512_icmp_cc<0x1E, VK8WM, VK8, VR512, i512mem, memopv8i64,
1012 X86cmpmu, v8i64, AVXCC, "uq">,
1013 VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
1015 // avx512_cmp_packed - compare packed instructions
1016 multiclass avx512_cmp_packed<RegisterClass KRC, RegisterClass RC,
1017 X86MemOperand x86memop, ValueType vt,
1018 string suffix, Domain d> {
1019 def rri : AVX512PIi8<0xC2, MRMSrcReg,
1020 (outs KRC:$dst), (ins RC:$src1, RC:$src2, AVXCC:$cc),
1021 !strconcat("vcmp${cc}", suffix,
1022 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1023 [(set KRC:$dst, (X86cmpm (vt RC:$src1), (vt RC:$src2), imm:$cc))], d>;
1024 def rrib: AVX512PIi8<0xC2, MRMSrcReg,
1025 (outs KRC:$dst), (ins RC:$src1, RC:$src2, AVXCC:$cc),
1026 !strconcat("vcmp${cc}", suffix,
1027 " \t{{sae}, $src2, $src1, $dst|$dst, $src1, $src2, {sae}}"),
1029 def rmi : AVX512PIi8<0xC2, MRMSrcMem,
1030 (outs KRC:$dst), (ins RC:$src1, x86memop:$src2, AVXCC:$cc),
1031 !strconcat("vcmp${cc}", suffix,
1032 " \t{$src2, $src1, $dst|$dst, $src1, $src2, $cc}"),
1034 (X86cmpm (vt RC:$src1), (memop addr:$src2), imm:$cc))], d>;
1036 // Accept explicit immediate argument form instead of comparison code.
1037 let isAsmParserOnly = 1, hasSideEffects = 0 in {
1038 def rri_alt : AVX512PIi8<0xC2, MRMSrcReg,
1039 (outs KRC:$dst), (ins RC:$src1, RC:$src2, i8imm:$cc),
1040 !strconcat("vcmp", suffix,
1041 " \t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}"), [], d>;
1042 def rmi_alt : AVX512PIi8<0xC2, MRMSrcMem,
1043 (outs KRC:$dst), (ins RC:$src1, x86memop:$src2, i8imm:$cc),
1044 !strconcat("vcmp", suffix,
1045 " \t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}"), [], d>;
1049 defm VCMPPSZ : avx512_cmp_packed<VK16, VR512, f512mem, v16f32,
1050 "ps", SSEPackedSingle>, PS, EVEX_4V, EVEX_V512,
1051 EVEX_CD8<32, CD8VF>;
1052 defm VCMPPDZ : avx512_cmp_packed<VK8, VR512, f512mem, v8f64,
1053 "pd", SSEPackedDouble>, PD, EVEX_4V, VEX_W, EVEX_V512,
1054 EVEX_CD8<64, CD8VF>;
1056 def : Pat<(v8i1 (X86cmpm (v8f32 VR256X:$src1), (v8f32 VR256X:$src2), imm:$cc)),
1057 (COPY_TO_REGCLASS (VCMPPSZrri
1058 (v16f32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)),
1059 (v16f32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)),
1061 def : Pat<(v8i1 (X86cmpm (v8i32 VR256X:$src1), (v8i32 VR256X:$src2), imm:$cc)),
1062 (COPY_TO_REGCLASS (VPCMPDZrri
1063 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)),
1064 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)),
1066 def : Pat<(v8i1 (X86cmpmu (v8i32 VR256X:$src1), (v8i32 VR256X:$src2), imm:$cc)),
1067 (COPY_TO_REGCLASS (VPCMPUDZrri
1068 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)),
1069 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)),
1072 def : Pat<(i16 (int_x86_avx512_mask_cmp_ps_512 (v16f32 VR512:$src1),
1073 (v16f32 VR512:$src2), imm:$cc, (i16 -1),
1075 (COPY_TO_REGCLASS (VCMPPSZrrib VR512:$src1, VR512:$src2,
1076 (I8Imm imm:$cc)), GR16)>;
1078 def : Pat<(i8 (int_x86_avx512_mask_cmp_pd_512 (v8f64 VR512:$src1),
1079 (v8f64 VR512:$src2), imm:$cc, (i8 -1),
1081 (COPY_TO_REGCLASS (VCMPPDZrrib VR512:$src1, VR512:$src2,
1082 (I8Imm imm:$cc)), GR8)>;
1084 def : Pat<(i16 (int_x86_avx512_mask_cmp_ps_512 (v16f32 VR512:$src1),
1085 (v16f32 VR512:$src2), imm:$cc, (i16 -1),
1087 (COPY_TO_REGCLASS (VCMPPSZrri VR512:$src1, VR512:$src2,
1088 (I8Imm imm:$cc)), GR16)>;
1090 def : Pat<(i8 (int_x86_avx512_mask_cmp_pd_512 (v8f64 VR512:$src1),
1091 (v8f64 VR512:$src2), imm:$cc, (i8 -1),
1093 (COPY_TO_REGCLASS (VCMPPDZrri VR512:$src1, VR512:$src2,
1094 (I8Imm imm:$cc)), GR8)>;
1096 // Mask register copy, including
1097 // - copy between mask registers
1098 // - load/store mask registers
1099 // - copy from GPR to mask register and vice versa
1101 multiclass avx512_mask_mov<bits<8> opc_kk, bits<8> opc_km, bits<8> opc_mk,
1102 string OpcodeStr, RegisterClass KRC,
1103 ValueType vvt, ValueType ivt, X86MemOperand x86memop> {
1104 let hasSideEffects = 0 in {
1105 def kk : I<opc_kk, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src),
1106 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"), []>;
1108 def km : I<opc_km, MRMSrcMem, (outs KRC:$dst), (ins x86memop:$src),
1109 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
1110 [(set KRC:$dst, (vvt (bitconvert (ivt (load addr:$src)))))]>;
1112 def mk : I<opc_mk, MRMDestMem, (outs), (ins x86memop:$dst, KRC:$src),
1113 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"), []>;
1117 multiclass avx512_mask_mov_gpr<bits<8> opc_kr, bits<8> opc_rk,
1119 RegisterClass KRC, RegisterClass GRC> {
1120 let hasSideEffects = 0 in {
1121 def kr : I<opc_kr, MRMSrcReg, (outs KRC:$dst), (ins GRC:$src),
1122 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"), []>;
1123 def rk : I<opc_rk, MRMSrcReg, (outs GRC:$dst), (ins KRC:$src),
1124 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"), []>;
1128 let Predicates = [HasDQI] in
1129 defm KMOVB : avx512_mask_mov<0x90, 0x90, 0x91, "kmovb", VK8, v8i1, i8,
1131 avx512_mask_mov_gpr<0x92, 0x93, "kmovb", VK8, GR32>,
1134 let Predicates = [HasAVX512] in
1135 defm KMOVW : avx512_mask_mov<0x90, 0x90, 0x91, "kmovw", VK16, v16i1, i16,
1137 avx512_mask_mov_gpr<0x92, 0x93, "kmovw", VK16, GR32>,
1140 let Predicates = [HasBWI] in {
1141 defm KMOVD : avx512_mask_mov<0x90, 0x90, 0x91, "kmovd", VK32, v32i1, i32,
1142 i32mem>, VEX, PD, VEX_W;
1143 defm KMOVD : avx512_mask_mov_gpr<0x92, 0x93, "kmovd", VK32, GR32>,
1147 let Predicates = [HasBWI] in {
1148 defm KMOVQ : avx512_mask_mov<0x90, 0x90, 0x91, "kmovq", VK64, v64i1, i64,
1149 i64mem>, VEX, PS, VEX_W;
1150 defm KMOVQ : avx512_mask_mov_gpr<0x92, 0x93, "kmovq", VK64, GR64>,
1154 // GR from/to mask register
1155 let Predicates = [HasDQI] in {
1156 def : Pat<(v8i1 (bitconvert (i8 GR8:$src))),
1157 (KMOVBkr (SUBREG_TO_REG (i32 0), GR8:$src, sub_8bit))>;
1158 def : Pat<(i8 (bitconvert (v8i1 VK8:$src))),
1159 (EXTRACT_SUBREG (KMOVBrk VK8:$src), sub_8bit)>;
1161 let Predicates = [HasAVX512] in {
1162 def : Pat<(v16i1 (bitconvert (i16 GR16:$src))),
1163 (KMOVWkr (SUBREG_TO_REG (i32 0), GR16:$src, sub_16bit))>;
1164 def : Pat<(i16 (bitconvert (v16i1 VK16:$src))),
1165 (EXTRACT_SUBREG (KMOVWrk VK16:$src), sub_16bit)>;
1167 let Predicates = [HasBWI] in {
1168 def : Pat<(v32i1 (bitconvert (i32 GR32:$src))), (KMOVDkr GR32:$src)>;
1169 def : Pat<(i32 (bitconvert (v32i1 VK32:$src))), (KMOVDrk VK32:$src)>;
1171 let Predicates = [HasBWI] in {
1172 def : Pat<(v64i1 (bitconvert (i64 GR64:$src))), (KMOVQkr GR64:$src)>;
1173 def : Pat<(i64 (bitconvert (v64i1 VK64:$src))), (KMOVQrk VK64:$src)>;
1177 let Predicates = [HasDQI] in {
1178 def : Pat<(store (i8 (bitconvert (v8i1 VK8:$src))), addr:$dst),
1179 (KMOVBmk addr:$dst, VK8:$src)>;
1181 let Predicates = [HasAVX512] in {
1182 def : Pat<(store (i16 (bitconvert (v16i1 VK16:$src))), addr:$dst),
1183 (KMOVWmk addr:$dst, VK16:$src)>;
1184 def : Pat<(store (i8 (bitconvert (v8i1 VK8:$src))), addr:$dst),
1185 (KMOVWmk addr:$dst, (COPY_TO_REGCLASS VK8:$src, VK16))>;
1186 def : Pat<(i1 (load addr:$src)),
1187 (COPY_TO_REGCLASS (KMOVWkm addr:$src), VK1)>;
1188 def : Pat<(v8i1 (bitconvert (i8 (load addr:$src)))),
1189 (COPY_TO_REGCLASS (KMOVWkm addr:$src), VK8)>;
1191 let Predicates = [HasBWI] in {
1192 def : Pat<(store (i32 (bitconvert (v32i1 VK32:$src))), addr:$dst),
1193 (KMOVDmk addr:$dst, VK32:$src)>;
1195 let Predicates = [HasBWI] in {
1196 def : Pat<(store (i64 (bitconvert (v64i1 VK64:$src))), addr:$dst),
1197 (KMOVQmk addr:$dst, VK64:$src)>;
1200 let Predicates = [HasAVX512] in {
1201 def : Pat<(i1 (trunc (i32 GR32:$src))),
1202 (COPY_TO_REGCLASS (KMOVWkr (AND32ri $src, (i32 1))), VK1)>;
1204 def : Pat<(i1 (trunc (i8 GR8:$src))),
1206 (KMOVWkr (AND32ri (SUBREG_TO_REG (i32 0), GR8:$src, sub_8bit), (i32 1))),
1208 def : Pat<(i1 (trunc (i16 GR16:$src))),
1210 (KMOVWkr (AND32ri (SUBREG_TO_REG (i32 0), $src, sub_16bit), (i32 1))),
1213 def : Pat<(i32 (zext VK1:$src)),
1214 (AND32ri (KMOVWrk (COPY_TO_REGCLASS VK1:$src, VK16)), (i32 1))>;
1215 def : Pat<(i8 (zext VK1:$src)),
1218 (COPY_TO_REGCLASS VK1:$src, VK16)), (i32 1)), sub_8bit)>;
1219 def : Pat<(i64 (zext VK1:$src)),
1220 (AND64ri8 (SUBREG_TO_REG (i64 0),
1221 (KMOVWrk (COPY_TO_REGCLASS VK1:$src, VK16)), sub_32bit), (i64 1))>;
1222 def : Pat<(i16 (zext VK1:$src)),
1224 (AND32ri (KMOVWrk (COPY_TO_REGCLASS VK1:$src, VK16)), (i32 1)),
1226 def : Pat<(v16i1 (scalar_to_vector VK1:$src)),
1227 (COPY_TO_REGCLASS VK1:$src, VK16)>;
1228 def : Pat<(v8i1 (scalar_to_vector VK1:$src)),
1229 (COPY_TO_REGCLASS VK1:$src, VK8)>;
1231 let Predicates = [HasBWI] in {
1232 def : Pat<(v32i1 (scalar_to_vector VK1:$src)),
1233 (COPY_TO_REGCLASS VK1:$src, VK32)>;
1234 def : Pat<(v64i1 (scalar_to_vector VK1:$src)),
1235 (COPY_TO_REGCLASS VK1:$src, VK64)>;
1239 // With AVX-512 only, 8-bit mask is promoted to 16-bit mask.
1240 let Predicates = [HasAVX512] in {
1241 // GR from/to 8-bit mask without native support
1242 def : Pat<(v8i1 (bitconvert (i8 GR8:$src))),
1244 (KMOVWkr (SUBREG_TO_REG (i32 0), GR8:$src, sub_8bit)),
1246 def : Pat<(i8 (bitconvert (v8i1 VK8:$src))),
1248 (KMOVWrk (COPY_TO_REGCLASS VK8:$src, VK16)),
1251 def : Pat<(i1 (X86Vextract VK16:$src, (iPTR 0))),
1252 (COPY_TO_REGCLASS VK16:$src, VK1)>;
1253 def : Pat<(i1 (X86Vextract VK8:$src, (iPTR 0))),
1254 (COPY_TO_REGCLASS VK8:$src, VK1)>;
1256 let Predicates = [HasBWI] in {
1257 def : Pat<(i1 (X86Vextract VK32:$src, (iPTR 0))),
1258 (COPY_TO_REGCLASS VK32:$src, VK1)>;
1259 def : Pat<(i1 (X86Vextract VK64:$src, (iPTR 0))),
1260 (COPY_TO_REGCLASS VK64:$src, VK1)>;
1263 // Mask unary operation
1265 multiclass avx512_mask_unop<bits<8> opc, string OpcodeStr,
1266 RegisterClass KRC, SDPatternOperator OpNode,
1268 let Predicates = [prd] in
1269 def rr : I<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src),
1270 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
1271 [(set KRC:$dst, (OpNode KRC:$src))]>;
1274 multiclass avx512_mask_unop_all<bits<8> opc, string OpcodeStr,
1275 SDPatternOperator OpNode> {
1276 defm B : avx512_mask_unop<opc, !strconcat(OpcodeStr, "b"), VK8, OpNode,
1278 defm W : avx512_mask_unop<opc, !strconcat(OpcodeStr, "w"), VK16, OpNode,
1279 HasAVX512>, VEX, PS;
1280 defm D : avx512_mask_unop<opc, !strconcat(OpcodeStr, "d"), VK32, OpNode,
1281 HasBWI>, VEX, PD, VEX_W;
1282 defm Q : avx512_mask_unop<opc, !strconcat(OpcodeStr, "q"), VK64, OpNode,
1283 HasBWI>, VEX, PS, VEX_W;
1286 defm KNOT : avx512_mask_unop_all<0x44, "knot", not>;
1288 multiclass avx512_mask_unop_int<string IntName, string InstName> {
1289 let Predicates = [HasAVX512] in
1290 def : Pat<(!cast<Intrinsic>("int_x86_avx512_"##IntName##"_w")
1292 (COPY_TO_REGCLASS (!cast<Instruction>(InstName##"Wrr")
1293 (v16i1 (COPY_TO_REGCLASS GR16:$src, VK16))), GR16)>;
1295 defm : avx512_mask_unop_int<"knot", "KNOT">;
1297 let Predicates = [HasDQI] in
1298 def : Pat<(xor VK8:$src1, (v8i1 immAllOnesV)), (KNOTBrr VK8:$src1)>;
1299 let Predicates = [HasAVX512] in
1300 def : Pat<(xor VK16:$src1, (v16i1 immAllOnesV)), (KNOTWrr VK16:$src1)>;
1301 let Predicates = [HasBWI] in
1302 def : Pat<(xor VK32:$src1, (v32i1 immAllOnesV)), (KNOTDrr VK32:$src1)>;
1303 let Predicates = [HasBWI] in
1304 def : Pat<(xor VK64:$src1, (v64i1 immAllOnesV)), (KNOTQrr VK64:$src1)>;
1306 // KNL does not support KMOVB, 8-bit mask is promoted to 16-bit
1307 let Predicates = [HasAVX512] in {
1308 def : Pat<(xor VK8:$src1, (v8i1 immAllOnesV)),
1309 (COPY_TO_REGCLASS (KNOTWrr (COPY_TO_REGCLASS VK8:$src1, VK16)), VK8)>;
1311 def : Pat<(not VK8:$src),
1313 (KNOTWrr (COPY_TO_REGCLASS VK8:$src, VK16)), VK8)>;
1316 // Mask binary operation
1317 // - KAND, KANDN, KOR, KXNOR, KXOR
1318 multiclass avx512_mask_binop<bits<8> opc, string OpcodeStr,
1319 RegisterClass KRC, SDPatternOperator OpNode,
1321 let Predicates = [prd] in
1322 def rr : I<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src1, KRC:$src2),
1323 !strconcat(OpcodeStr,
1324 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1325 [(set KRC:$dst, (OpNode KRC:$src1, KRC:$src2))]>;
1328 multiclass avx512_mask_binop_all<bits<8> opc, string OpcodeStr,
1329 SDPatternOperator OpNode> {
1330 defm B : avx512_mask_binop<opc, !strconcat(OpcodeStr, "b"), VK8, OpNode,
1331 HasDQI>, VEX_4V, VEX_L, PD;
1332 defm W : avx512_mask_binop<opc, !strconcat(OpcodeStr, "w"), VK16, OpNode,
1333 HasAVX512>, VEX_4V, VEX_L, PS;
1334 defm D : avx512_mask_binop<opc, !strconcat(OpcodeStr, "d"), VK32, OpNode,
1335 HasBWI>, VEX_4V, VEX_L, VEX_W, PD;
1336 defm Q : avx512_mask_binop<opc, !strconcat(OpcodeStr, "q"), VK64, OpNode,
1337 HasBWI>, VEX_4V, VEX_L, VEX_W, PS;
1340 def andn : PatFrag<(ops node:$i0, node:$i1), (and (not node:$i0), node:$i1)>;
1341 def xnor : PatFrag<(ops node:$i0, node:$i1), (not (xor node:$i0, node:$i1))>;
1343 let isCommutable = 1 in {
1344 defm KAND : avx512_mask_binop_all<0x41, "kand", and>;
1345 defm KOR : avx512_mask_binop_all<0x45, "kor", or>;
1346 defm KXNOR : avx512_mask_binop_all<0x46, "kxnor", xnor>;
1347 defm KXOR : avx512_mask_binop_all<0x47, "kxor", xor>;
1349 let isCommutable = 0 in
1350 defm KANDN : avx512_mask_binop_all<0x42, "kandn", andn>;
1352 def : Pat<(xor VK1:$src1, VK1:$src2),
1353 (COPY_TO_REGCLASS (KXORWrr (COPY_TO_REGCLASS VK1:$src1, VK16),
1354 (COPY_TO_REGCLASS VK1:$src2, VK16)), VK1)>;
1356 def : Pat<(or VK1:$src1, VK1:$src2),
1357 (COPY_TO_REGCLASS (KORWrr (COPY_TO_REGCLASS VK1:$src1, VK16),
1358 (COPY_TO_REGCLASS VK1:$src2, VK16)), VK1)>;
1360 def : Pat<(and VK1:$src1, VK1:$src2),
1361 (COPY_TO_REGCLASS (KANDWrr (COPY_TO_REGCLASS VK1:$src1, VK16),
1362 (COPY_TO_REGCLASS VK1:$src2, VK16)), VK1)>;
1364 multiclass avx512_mask_binop_int<string IntName, string InstName> {
1365 let Predicates = [HasAVX512] in
1366 def : Pat<(!cast<Intrinsic>("int_x86_avx512_"##IntName##"_w")
1367 (i16 GR16:$src1), (i16 GR16:$src2)),
1368 (COPY_TO_REGCLASS (!cast<Instruction>(InstName##"Wrr")
1369 (v16i1 (COPY_TO_REGCLASS GR16:$src1, VK16)),
1370 (v16i1 (COPY_TO_REGCLASS GR16:$src2, VK16))), GR16)>;
1373 defm : avx512_mask_binop_int<"kand", "KAND">;
1374 defm : avx512_mask_binop_int<"kandn", "KANDN">;
1375 defm : avx512_mask_binop_int<"kor", "KOR">;
1376 defm : avx512_mask_binop_int<"kxnor", "KXNOR">;
1377 defm : avx512_mask_binop_int<"kxor", "KXOR">;
1379 // With AVX-512, 8-bit mask is promoted to 16-bit mask.
1380 multiclass avx512_binop_pat<SDPatternOperator OpNode, Instruction Inst> {
1381 let Predicates = [HasAVX512] in
1382 def : Pat<(OpNode VK8:$src1, VK8:$src2),
1384 (Inst (COPY_TO_REGCLASS VK8:$src1, VK16),
1385 (COPY_TO_REGCLASS VK8:$src2, VK16)), VK8)>;
1388 defm : avx512_binop_pat<and, KANDWrr>;
1389 defm : avx512_binop_pat<andn, KANDNWrr>;
1390 defm : avx512_binop_pat<or, KORWrr>;
1391 defm : avx512_binop_pat<xnor, KXNORWrr>;
1392 defm : avx512_binop_pat<xor, KXORWrr>;
1395 multiclass avx512_mask_unpck<bits<8> opc, string OpcodeStr,
1396 RegisterClass KRC> {
1397 let Predicates = [HasAVX512] in
1398 def rr : I<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src1, KRC:$src2),
1399 !strconcat(OpcodeStr,
1400 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1403 multiclass avx512_mask_unpck_bw<bits<8> opc, string OpcodeStr> {
1404 defm BW : avx512_mask_unpck<opc, !strconcat(OpcodeStr, "bw"), VK16>,
1408 defm KUNPCK : avx512_mask_unpck_bw<0x4b, "kunpck">;
1409 def : Pat<(v16i1 (concat_vectors (v8i1 VK8:$src1), (v8i1 VK8:$src2))),
1410 (KUNPCKBWrr (COPY_TO_REGCLASS VK8:$src2, VK16),
1411 (COPY_TO_REGCLASS VK8:$src1, VK16))>;
1414 multiclass avx512_mask_unpck_int<string IntName, string InstName> {
1415 let Predicates = [HasAVX512] in
1416 def : Pat<(!cast<Intrinsic>("int_x86_avx512_"##IntName##"_bw")
1417 (i16 GR16:$src1), (i16 GR16:$src2)),
1418 (COPY_TO_REGCLASS (!cast<Instruction>(InstName##"BWrr")
1419 (v16i1 (COPY_TO_REGCLASS GR16:$src1, VK16)),
1420 (v16i1 (COPY_TO_REGCLASS GR16:$src2, VK16))), GR16)>;
1422 defm : avx512_mask_unpck_int<"kunpck", "KUNPCK">;
1425 multiclass avx512_mask_testop<bits<8> opc, string OpcodeStr, RegisterClass KRC,
1427 let Predicates = [HasAVX512], Defs = [EFLAGS] in
1428 def rr : I<opc, MRMSrcReg, (outs), (ins KRC:$src1, KRC:$src2),
1429 !strconcat(OpcodeStr, " \t{$src2, $src1|$src1, $src2}"),
1430 [(set EFLAGS, (OpNode KRC:$src1, KRC:$src2))]>;
1433 multiclass avx512_mask_testop_w<bits<8> opc, string OpcodeStr, SDNode OpNode> {
1434 defm W : avx512_mask_testop<opc, !strconcat(OpcodeStr, "w"), VK16, OpNode>,
1438 defm KORTEST : avx512_mask_testop_w<0x98, "kortest", X86kortest>;
1440 def : Pat<(X86cmp VK1:$src1, (i1 0)),
1441 (KORTESTWrr (COPY_TO_REGCLASS VK1:$src1, VK16),
1442 (COPY_TO_REGCLASS VK1:$src1, VK16))>;
1445 multiclass avx512_mask_shiftop<bits<8> opc, string OpcodeStr, RegisterClass KRC,
1447 let Predicates = [HasAVX512] in
1448 def ri : Ii8<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src, i8imm:$imm),
1449 !strconcat(OpcodeStr,
1450 " \t{$imm, $src, $dst|$dst, $src, $imm}"),
1451 [(set KRC:$dst, (OpNode KRC:$src, (i8 imm:$imm)))]>;
1454 multiclass avx512_mask_shiftop_w<bits<8> opc1, bits<8> opc2, string OpcodeStr,
1456 defm W : avx512_mask_shiftop<opc1, !strconcat(OpcodeStr, "w"), VK16, OpNode>,
1460 defm KSHIFTL : avx512_mask_shiftop_w<0x32, 0x33, "kshiftl", X86vshli>;
1461 defm KSHIFTR : avx512_mask_shiftop_w<0x30, 0x31, "kshiftr", X86vsrli>;
1463 // Mask setting all 0s or 1s
1464 multiclass avx512_mask_setop<RegisterClass KRC, ValueType VT, PatFrag Val> {
1465 let Predicates = [HasAVX512] in
1466 let isReMaterializable = 1, isAsCheapAsAMove = 1, isPseudo = 1 in
1467 def #NAME# : I<0, Pseudo, (outs KRC:$dst), (ins), "",
1468 [(set KRC:$dst, (VT Val))]>;
1471 multiclass avx512_mask_setop_w<PatFrag Val> {
1472 defm B : avx512_mask_setop<VK8, v8i1, Val>;
1473 defm W : avx512_mask_setop<VK16, v16i1, Val>;
1476 defm KSET0 : avx512_mask_setop_w<immAllZerosV>;
1477 defm KSET1 : avx512_mask_setop_w<immAllOnesV>;
1479 // With AVX-512 only, 8-bit mask is promoted to 16-bit mask.
1480 let Predicates = [HasAVX512] in {
1481 def : Pat<(v8i1 immAllZerosV), (COPY_TO_REGCLASS (KSET0W), VK8)>;
1482 def : Pat<(v8i1 immAllOnesV), (COPY_TO_REGCLASS (KSET1W), VK8)>;
1483 def : Pat<(i1 0), (COPY_TO_REGCLASS (KSET0W), VK1)>;
1484 def : Pat<(i1 1), (COPY_TO_REGCLASS (KSET1W), VK1)>;
1485 def : Pat<(i1 -1), (COPY_TO_REGCLASS (KSET1W), VK1)>;
1487 def : Pat<(v8i1 (extract_subvector (v16i1 VK16:$src), (iPTR 0))),
1488 (v8i1 (COPY_TO_REGCLASS VK16:$src, VK8))>;
1490 def : Pat<(v16i1 (insert_subvector undef, (v8i1 VK8:$src), (iPTR 0))),
1491 (v16i1 (COPY_TO_REGCLASS VK8:$src, VK16))>;
1493 def : Pat<(v8i1 (extract_subvector (v16i1 VK16:$src), (iPTR 8))),
1494 (v8i1 (COPY_TO_REGCLASS (KSHIFTRWri VK16:$src, (i8 8)), VK8))>;
1496 def : Pat<(v8i1 (X86vshli VK8:$src, (i8 imm:$imm))),
1497 (v8i1 (COPY_TO_REGCLASS (KSHIFTLWri (COPY_TO_REGCLASS VK8:$src, VK16), (I8Imm $imm)), VK8))>;
1499 def : Pat<(v8i1 (X86vsrli VK8:$src, (i8 imm:$imm))),
1500 (v8i1 (COPY_TO_REGCLASS (KSHIFTRWri (COPY_TO_REGCLASS VK8:$src, VK16), (I8Imm $imm)), VK8))>;
1501 //===----------------------------------------------------------------------===//
1502 // AVX-512 - Aligned and unaligned load and store
1505 multiclass avx512_load<bits<8> opc, string OpcodeStr, PatFrag ld_frag,
1506 RegisterClass KRC, RegisterClass RC,
1507 ValueType vt, ValueType zvt, X86MemOperand memop,
1508 Domain d, bit IsReMaterializable = 1> {
1509 let hasSideEffects = 0 in {
1510 def rr : AVX512PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
1511 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), [],
1513 def rrkz : AVX512PI<opc, MRMSrcReg, (outs RC:$dst), (ins KRC:$mask, RC:$src),
1514 !strconcat(OpcodeStr, "\t{$src, ${dst} {${mask}} {z}|",
1515 "${dst} {${mask}} {z}, $src}"), [], d>, EVEX, EVEX_KZ;
1517 let canFoldAsLoad = 1, isReMaterializable = IsReMaterializable,
1518 SchedRW = [WriteLoad] in
1519 def rm : AVX512PI<opc, MRMSrcMem, (outs RC:$dst), (ins memop:$src),
1520 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
1521 [(set RC:$dst, (vt (bitconvert (ld_frag addr:$src))))],
1524 let AddedComplexity = 20 in {
1525 let Constraints = "$src0 = $dst", hasSideEffects = 0 in {
1526 let hasSideEffects = 0 in
1527 def rrk : AVX512PI<opc, MRMSrcReg, (outs RC:$dst),
1528 (ins RC:$src0, KRC:$mask, RC:$src1),
1529 !strconcat(OpcodeStr, "\t{$src1, ${dst} {${mask}}|",
1530 "${dst} {${mask}}, $src1}"),
1531 [(set RC:$dst, (vt (vselect KRC:$mask,
1535 let mayLoad = 1, SchedRW = [WriteLoad] in
1536 def rmk : AVX512PI<opc, MRMSrcMem, (outs RC:$dst),
1537 (ins RC:$src0, KRC:$mask, memop:$src1),
1538 !strconcat(OpcodeStr, "\t{$src1, ${dst} {${mask}}|",
1539 "${dst} {${mask}}, $src1}"),
1542 (vt (bitconvert (ld_frag addr:$src1))),
1546 let mayLoad = 1, SchedRW = [WriteLoad] in
1547 def rmkz : AVX512PI<opc, MRMSrcMem, (outs RC:$dst),
1548 (ins KRC:$mask, memop:$src),
1549 !strconcat(OpcodeStr, "\t{$src, ${dst} {${mask}} {z}|",
1550 "${dst} {${mask}} {z}, $src}"),
1553 (vt (bitconvert (ld_frag addr:$src))),
1554 (vt (bitconvert (zvt immAllZerosV))))))],
1559 multiclass avx512_load_vl<bits<8> opc, string OpcodeStr, string ld_pat,
1560 string elty, string elsz, string vsz512,
1561 string vsz256, string vsz128, Domain d,
1562 Predicate prd, bit IsReMaterializable = 1> {
1563 let Predicates = [prd] in
1564 defm Z : avx512_load<opc, OpcodeStr,
1565 !cast<PatFrag>(ld_pat##"v"##vsz512##elty##elsz),
1566 !cast<RegisterClass>("VK"##vsz512##"WM"), VR512,
1567 !cast<ValueType>("v"##vsz512##elty##elsz), v16i32,
1568 !cast<X86MemOperand>(elty##"512mem"), d,
1569 IsReMaterializable>, EVEX_V512;
1571 let Predicates = [prd, HasVLX] in {
1572 defm Z256 : avx512_load<opc, OpcodeStr,
1573 !cast<PatFrag>(ld_pat##!if(!eq(elty,"f"),
1574 "v"##vsz256##elty##elsz, "v4i64")),
1575 !cast<RegisterClass>("VK"##vsz256##"WM"), VR256X,
1576 !cast<ValueType>("v"##vsz256##elty##elsz), v8i32,
1577 !cast<X86MemOperand>(elty##"256mem"), d,
1578 IsReMaterializable>, EVEX_V256;
1580 defm Z128 : avx512_load<opc, OpcodeStr,
1581 !cast<PatFrag>(ld_pat##!if(!eq(elty,"f"),
1582 "v"##vsz128##elty##elsz, "v2i64")),
1583 !cast<RegisterClass>("VK"##vsz128##"WM"), VR128X,
1584 !cast<ValueType>("v"##vsz128##elty##elsz), v4i32,
1585 !cast<X86MemOperand>(elty##"128mem"), d,
1586 IsReMaterializable>, EVEX_V128;
1591 multiclass avx512_store<bits<8> opc, string OpcodeStr, PatFrag st_frag,
1592 ValueType OpVT, RegisterClass KRC, RegisterClass RC,
1593 X86MemOperand memop, Domain d> {
1594 let isAsmParserOnly = 1, hasSideEffects = 0 in {
1595 def rr_alt : AVX512PI<opc, MRMDestReg, (outs RC:$dst), (ins RC:$src),
1596 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), [], d>,
1598 let Constraints = "$src1 = $dst" in
1599 def rrk_alt : AVX512PI<opc, MRMDestReg, (outs RC:$dst),
1600 (ins RC:$src1, KRC:$mask, RC:$src2),
1601 !strconcat(OpcodeStr,
1602 "\t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"), [], d>,
1604 def rrkz_alt : AVX512PI<opc, MRMDestReg, (outs RC:$dst),
1605 (ins KRC:$mask, RC:$src),
1606 !strconcat(OpcodeStr,
1607 "\t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
1608 [], d>, EVEX, EVEX_KZ;
1610 let mayStore = 1 in {
1611 def mr : AVX512PI<opc, MRMDestMem, (outs), (ins memop:$dst, RC:$src),
1612 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
1613 [(st_frag (OpVT RC:$src), addr:$dst)], d>, EVEX;
1614 def mrk : AVX512PI<opc, MRMDestMem, (outs),
1615 (ins memop:$dst, KRC:$mask, RC:$src),
1616 !strconcat(OpcodeStr,
1617 "\t{$src, ${dst} {${mask}}|${dst} {${mask}}, $src}"),
1618 [], d>, EVEX, EVEX_K;
1623 multiclass avx512_store_vl<bits<8> opc, string OpcodeStr, string st_pat,
1624 string st_suff_512, string st_suff_256,
1625 string st_suff_128, string elty, string elsz,
1626 string vsz512, string vsz256, string vsz128,
1627 Domain d, Predicate prd> {
1628 let Predicates = [prd] in
1629 defm Z : avx512_store<opc, OpcodeStr, !cast<PatFrag>(st_pat##st_suff_512),
1630 !cast<ValueType>("v"##vsz512##elty##elsz),
1631 !cast<RegisterClass>("VK"##vsz512##"WM"), VR512,
1632 !cast<X86MemOperand>(elty##"512mem"), d>, EVEX_V512;
1634 let Predicates = [prd, HasVLX] in {
1635 defm Z256 : avx512_store<opc, OpcodeStr, !cast<PatFrag>(st_pat##st_suff_256),
1636 !cast<ValueType>("v"##vsz256##elty##elsz),
1637 !cast<RegisterClass>("VK"##vsz256##"WM"), VR256X,
1638 !cast<X86MemOperand>(elty##"256mem"), d>, EVEX_V256;
1640 defm Z128 : avx512_store<opc, OpcodeStr, !cast<PatFrag>(st_pat##st_suff_128),
1641 !cast<ValueType>("v"##vsz128##elty##elsz),
1642 !cast<RegisterClass>("VK"##vsz128##"WM"), VR128X,
1643 !cast<X86MemOperand>(elty##"128mem"), d>, EVEX_V128;
1647 defm VMOVAPS : avx512_load_vl<0x28, "vmovaps", "alignedload", "f", "32",
1648 "16", "8", "4", SSEPackedSingle, HasAVX512>,
1649 avx512_store_vl<0x29, "vmovaps", "alignedstore",
1650 "512", "256", "", "f", "32", "16", "8", "4",
1651 SSEPackedSingle, HasAVX512>,
1652 PS, EVEX_CD8<32, CD8VF>;
1654 defm VMOVAPD : avx512_load_vl<0x28, "vmovapd", "alignedload", "f", "64",
1655 "8", "4", "2", SSEPackedDouble, HasAVX512>,
1656 avx512_store_vl<0x29, "vmovapd", "alignedstore",
1657 "512", "256", "", "f", "64", "8", "4", "2",
1658 SSEPackedDouble, HasAVX512>,
1659 PD, VEX_W, EVEX_CD8<64, CD8VF>;
1661 defm VMOVUPS : avx512_load_vl<0x10, "vmovups", "load", "f", "32",
1662 "16", "8", "4", SSEPackedSingle, HasAVX512>,
1663 avx512_store_vl<0x11, "vmovups", "store", "", "", "", "f", "32",
1664 "16", "8", "4", SSEPackedSingle, HasAVX512>,
1665 PS, EVEX_CD8<32, CD8VF>;
1667 defm VMOVUPD : avx512_load_vl<0x10, "vmovupd", "load", "f", "64",
1668 "8", "4", "2", SSEPackedDouble, HasAVX512, 0>,
1669 avx512_store_vl<0x11, "vmovupd", "store", "", "", "", "f", "64",
1670 "8", "4", "2", SSEPackedDouble, HasAVX512>,
1671 PD, VEX_W, EVEX_CD8<64, CD8VF>;
1673 def: Pat<(v8f64 (int_x86_avx512_mask_loadu_pd_512 addr:$ptr,
1674 (bc_v8f64 (v16i32 immAllZerosV)), GR8:$mask)),
1675 (VMOVUPDZrmkz (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)), addr:$ptr)>;
1677 def: Pat<(v16f32 (int_x86_avx512_mask_loadu_ps_512 addr:$ptr,
1678 (bc_v16f32 (v16i32 immAllZerosV)), GR16:$mask)),
1679 (VMOVUPSZrmkz (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)), addr:$ptr)>;
1681 def: Pat<(int_x86_avx512_mask_storeu_ps_512 addr:$ptr, (v16f32 VR512:$src),
1683 (VMOVUPSZmrk addr:$ptr, (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)),
1685 def: Pat<(int_x86_avx512_mask_storeu_pd_512 addr:$ptr, (v8f64 VR512:$src),
1687 (VMOVUPDZmrk addr:$ptr, (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)),
1690 defm VMOVDQA32 : avx512_load_vl<0x6F, "vmovdqa32", "alignedload", "i", "32",
1691 "16", "8", "4", SSEPackedInt, HasAVX512>,
1692 avx512_store_vl<0x7F, "vmovdqa32", "alignedstore",
1693 "512", "256", "", "i", "32", "16", "8", "4",
1694 SSEPackedInt, HasAVX512>,
1695 PD, EVEX_CD8<32, CD8VF>;
1697 defm VMOVDQA64 : avx512_load_vl<0x6F, "vmovdqa64", "alignedload", "i", "64",
1698 "8", "4", "2", SSEPackedInt, HasAVX512>,
1699 avx512_store_vl<0x7F, "vmovdqa64", "alignedstore",
1700 "512", "256", "", "i", "64", "8", "4", "2",
1701 SSEPackedInt, HasAVX512>,
1702 PD, VEX_W, EVEX_CD8<64, CD8VF>;
1704 defm VMOVDQU8 : avx512_load_vl<0x6F, "vmovdqu8", "load", "i", "8",
1705 "64", "32", "16", SSEPackedInt, HasBWI>,
1706 avx512_store_vl<0x7F, "vmovdqu8", "store", "", "", "",
1707 "i", "8", "64", "32", "16", SSEPackedInt,
1708 HasBWI>, XD, EVEX_CD8<8, CD8VF>;
1710 defm VMOVDQU16 : avx512_load_vl<0x6F, "vmovdqu16", "load", "i", "16",
1711 "32", "16", "8", SSEPackedInt, HasBWI>,
1712 avx512_store_vl<0x7F, "vmovdqu16", "store", "", "", "",
1713 "i", "16", "32", "16", "8", SSEPackedInt,
1714 HasBWI>, XD, VEX_W, EVEX_CD8<16, CD8VF>;
1716 defm VMOVDQU32 : avx512_load_vl<0x6F, "vmovdqu32", "load", "i", "32",
1717 "16", "8", "4", SSEPackedInt, HasAVX512>,
1718 avx512_store_vl<0x7F, "vmovdqu32", "store", "", "", "",
1719 "i", "32", "16", "8", "4", SSEPackedInt,
1720 HasAVX512>, XS, EVEX_CD8<32, CD8VF>;
1722 defm VMOVDQU64 : avx512_load_vl<0x6F, "vmovdqu64", "load", "i", "64",
1723 "8", "4", "2", SSEPackedInt, HasAVX512>,
1724 avx512_store_vl<0x7F, "vmovdqu64", "store", "", "", "",
1725 "i", "64", "8", "4", "2", SSEPackedInt,
1726 HasAVX512>, XS, VEX_W, EVEX_CD8<64, CD8VF>;
1728 def: Pat<(v16i32 (int_x86_avx512_mask_loadu_d_512 addr:$ptr,
1729 (v16i32 immAllZerosV), GR16:$mask)),
1730 (VMOVDQU32Zrmkz (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)), addr:$ptr)>;
1732 def: Pat<(v8i64 (int_x86_avx512_mask_loadu_q_512 addr:$ptr,
1733 (bc_v8i64 (v16i32 immAllZerosV)), GR8:$mask)),
1734 (VMOVDQU64Zrmkz (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)), addr:$ptr)>;
1736 def: Pat<(int_x86_avx512_mask_storeu_d_512 addr:$ptr, (v16i32 VR512:$src),
1738 (VMOVDQU32Zmrk addr:$ptr, (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)),
1740 def: Pat<(int_x86_avx512_mask_storeu_q_512 addr:$ptr, (v8i64 VR512:$src),
1742 (VMOVDQU64Zmrk addr:$ptr, (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)),
1745 let AddedComplexity = 20 in {
1746 def : Pat<(v8i64 (vselect VK8WM:$mask, (v8i64 VR512:$src),
1747 (bc_v8i64 (v16i32 immAllZerosV)))),
1748 (VMOVDQU64Zrrkz VK8WM:$mask, VR512:$src)>;
1750 def : Pat<(v8i64 (vselect VK8WM:$mask, (bc_v8i64 (v16i32 immAllZerosV)),
1751 (v8i64 VR512:$src))),
1752 (VMOVDQU64Zrrkz (COPY_TO_REGCLASS (KNOTWrr (COPY_TO_REGCLASS VK8:$mask, VK16)),
1755 def : Pat<(v16i32 (vselect VK16WM:$mask, (v16i32 VR512:$src),
1756 (v16i32 immAllZerosV))),
1757 (VMOVDQU32Zrrkz VK16WM:$mask, VR512:$src)>;
1759 def : Pat<(v16i32 (vselect VK16WM:$mask, (v16i32 immAllZerosV),
1760 (v16i32 VR512:$src))),
1761 (VMOVDQU32Zrrkz (KNOTWrr VK16WM:$mask), VR512:$src)>;
1764 // Move Int Doubleword to Packed Double Int
1766 def VMOVDI2PDIZrr : AVX512BI<0x6E, MRMSrcReg, (outs VR128X:$dst), (ins GR32:$src),
1767 "vmovd\t{$src, $dst|$dst, $src}",
1769 (v4i32 (scalar_to_vector GR32:$src)))], IIC_SSE_MOVDQ>,
1771 def VMOVDI2PDIZrm : AVX512BI<0x6E, MRMSrcMem, (outs VR128X:$dst), (ins i32mem:$src),
1772 "vmovd\t{$src, $dst|$dst, $src}",
1774 (v4i32 (scalar_to_vector (loadi32 addr:$src))))],
1775 IIC_SSE_MOVDQ>, EVEX, VEX_LIG, EVEX_CD8<32, CD8VT1>;
1776 def VMOV64toPQIZrr : AVX512BI<0x6E, MRMSrcReg, (outs VR128X:$dst), (ins GR64:$src),
1777 "vmovq\t{$src, $dst|$dst, $src}",
1779 (v2i64 (scalar_to_vector GR64:$src)))],
1780 IIC_SSE_MOVDQ>, EVEX, VEX_W, VEX_LIG;
1781 let isCodeGenOnly = 1 in {
1782 def VMOV64toSDZrr : AVX512BI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
1783 "vmovq\t{$src, $dst|$dst, $src}",
1784 [(set FR64:$dst, (bitconvert GR64:$src))],
1785 IIC_SSE_MOVDQ>, EVEX, VEX_W, Sched<[WriteMove]>;
1786 def VMOVSDto64Zrr : AVX512BI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
1787 "vmovq\t{$src, $dst|$dst, $src}",
1788 [(set GR64:$dst, (bitconvert FR64:$src))],
1789 IIC_SSE_MOVDQ>, EVEX, VEX_W, Sched<[WriteMove]>;
1791 def VMOVSDto64Zmr : AVX512BI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
1792 "vmovq\t{$src, $dst|$dst, $src}",
1793 [(store (i64 (bitconvert FR64:$src)), addr:$dst)],
1794 IIC_SSE_MOVDQ>, EVEX, VEX_W, Sched<[WriteStore]>,
1795 EVEX_CD8<64, CD8VT1>;
1797 // Move Int Doubleword to Single Scalar
1799 let isCodeGenOnly = 1 in {
1800 def VMOVDI2SSZrr : AVX512BI<0x6E, MRMSrcReg, (outs FR32X:$dst), (ins GR32:$src),
1801 "vmovd\t{$src, $dst|$dst, $src}",
1802 [(set FR32X:$dst, (bitconvert GR32:$src))],
1803 IIC_SSE_MOVDQ>, EVEX, VEX_LIG;
1805 def VMOVDI2SSZrm : AVX512BI<0x6E, MRMSrcMem, (outs FR32X:$dst), (ins i32mem:$src),
1806 "vmovd\t{$src, $dst|$dst, $src}",
1807 [(set FR32X:$dst, (bitconvert (loadi32 addr:$src)))],
1808 IIC_SSE_MOVDQ>, EVEX, VEX_LIG, EVEX_CD8<32, CD8VT1>;
1811 // Move doubleword from xmm register to r/m32
1813 def VMOVPDI2DIZrr : AVX512BI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128X:$src),
1814 "vmovd\t{$src, $dst|$dst, $src}",
1815 [(set GR32:$dst, (vector_extract (v4i32 VR128X:$src),
1816 (iPTR 0)))], IIC_SSE_MOVD_ToGP>,
1818 def VMOVPDI2DIZmr : AVX512BI<0x7E, MRMDestMem, (outs),
1819 (ins i32mem:$dst, VR128X:$src),
1820 "vmovd\t{$src, $dst|$dst, $src}",
1821 [(store (i32 (vector_extract (v4i32 VR128X:$src),
1822 (iPTR 0))), addr:$dst)], IIC_SSE_MOVDQ>,
1823 EVEX, VEX_LIG, EVEX_CD8<32, CD8VT1>;
1825 // Move quadword from xmm1 register to r/m64
1827 def VMOVPQIto64Zrr : I<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128X:$src),
1828 "vmovq\t{$src, $dst|$dst, $src}",
1829 [(set GR64:$dst, (extractelt (v2i64 VR128X:$src),
1831 IIC_SSE_MOVD_ToGP>, PD, EVEX, VEX_LIG, VEX_W,
1832 Requires<[HasAVX512, In64BitMode]>;
1834 def VMOVPQIto64Zmr : I<0xD6, MRMDestMem, (outs),
1835 (ins i64mem:$dst, VR128X:$src),
1836 "vmovq\t{$src, $dst|$dst, $src}",
1837 [(store (extractelt (v2i64 VR128X:$src), (iPTR 0)),
1838 addr:$dst)], IIC_SSE_MOVDQ>,
1839 EVEX, PD, VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>,
1840 Sched<[WriteStore]>, Requires<[HasAVX512, In64BitMode]>;
1842 // Move Scalar Single to Double Int
1844 let isCodeGenOnly = 1 in {
1845 def VMOVSS2DIZrr : AVX512BI<0x7E, MRMDestReg, (outs GR32:$dst),
1847 "vmovd\t{$src, $dst|$dst, $src}",
1848 [(set GR32:$dst, (bitconvert FR32X:$src))],
1849 IIC_SSE_MOVD_ToGP>, EVEX, VEX_LIG;
1850 def VMOVSS2DIZmr : AVX512BI<0x7E, MRMDestMem, (outs),
1851 (ins i32mem:$dst, FR32X:$src),
1852 "vmovd\t{$src, $dst|$dst, $src}",
1853 [(store (i32 (bitconvert FR32X:$src)), addr:$dst)],
1854 IIC_SSE_MOVDQ>, EVEX, VEX_LIG, EVEX_CD8<32, CD8VT1>;
1857 // Move Quadword Int to Packed Quadword Int
1859 def VMOVQI2PQIZrm : AVX512BI<0x6E, MRMSrcMem, (outs VR128X:$dst),
1861 "vmovq\t{$src, $dst|$dst, $src}",
1863 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>,
1864 EVEX, VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
1866 //===----------------------------------------------------------------------===//
1867 // AVX-512 MOVSS, MOVSD
1868 //===----------------------------------------------------------------------===//
1870 multiclass avx512_move_scalar <string asm, RegisterClass RC,
1871 SDNode OpNode, ValueType vt,
1872 X86MemOperand x86memop, PatFrag mem_pat> {
1873 let hasSideEffects = 0 in {
1874 def rr : SI<0x10, MRMSrcReg, (outs VR128X:$dst), (ins VR128X:$src1, RC:$src2),
1875 !strconcat(asm, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1876 [(set VR128X:$dst, (vt (OpNode VR128X:$src1,
1877 (scalar_to_vector RC:$src2))))],
1878 IIC_SSE_MOV_S_RR>, EVEX_4V, VEX_LIG;
1879 let Constraints = "$src1 = $dst" in
1880 def rrk : SI<0x10, MRMSrcReg, (outs VR128X:$dst),
1881 (ins VR128X:$src1, VK1WM:$mask, RC:$src2, RC:$src3),
1883 " \t{$src3, $src2, $dst {${mask}}|$dst {${mask}}, $src2, $src3}"),
1884 [], IIC_SSE_MOV_S_RR>, EVEX_4V, VEX_LIG, EVEX_K;
1885 def rm : SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
1886 !strconcat(asm, " \t{$src, $dst|$dst, $src}"),
1887 [(set RC:$dst, (mem_pat addr:$src))], IIC_SSE_MOV_S_RM>,
1889 def mr: SI<0x11, MRMDestMem, (outs), (ins x86memop:$dst, RC:$src),
1890 !strconcat(asm, " \t{$src, $dst|$dst, $src}"),
1891 [(store RC:$src, addr:$dst)], IIC_SSE_MOV_S_MR>,
1893 } //hasSideEffects = 0
1896 let ExeDomain = SSEPackedSingle in
1897 defm VMOVSSZ : avx512_move_scalar<"movss", FR32X, X86Movss, v4f32, f32mem,
1898 loadf32>, XS, EVEX_CD8<32, CD8VT1>;
1900 let ExeDomain = SSEPackedDouble in
1901 defm VMOVSDZ : avx512_move_scalar<"movsd", FR64X, X86Movsd, v2f64, f64mem,
1902 loadf64>, XD, VEX_W, EVEX_CD8<64, CD8VT1>;
1904 def : Pat<(f32 (X86select VK1WM:$mask, (f32 FR32X:$src1), (f32 FR32X:$src2))),
1905 (COPY_TO_REGCLASS (VMOVSSZrrk (COPY_TO_REGCLASS FR32X:$src2, VR128X),
1906 VK1WM:$mask, (f32 (IMPLICIT_DEF)), FR32X:$src1), FR32X)>;
1908 def : Pat<(f64 (X86select VK1WM:$mask, (f64 FR64X:$src1), (f64 FR64X:$src2))),
1909 (COPY_TO_REGCLASS (VMOVSDZrrk (COPY_TO_REGCLASS FR64X:$src2, VR128X),
1910 VK1WM:$mask, (f64 (IMPLICIT_DEF)), FR64X:$src1), FR64X)>;
1912 // For the disassembler
1913 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in {
1914 def VMOVSSZrr_REV : SI<0x11, MRMDestReg, (outs VR128X:$dst),
1915 (ins VR128X:$src1, FR32X:$src2),
1916 "movss\t{$src2, $src1, $dst|$dst, $src1, $src2}", [],
1918 XS, EVEX_4V, VEX_LIG;
1919 def VMOVSDZrr_REV : SI<0x11, MRMDestReg, (outs VR128X:$dst),
1920 (ins VR128X:$src1, FR64X:$src2),
1921 "movsd\t{$src2, $src1, $dst|$dst, $src1, $src2}", [],
1923 XD, EVEX_4V, VEX_LIG, VEX_W;
1926 let Predicates = [HasAVX512] in {
1927 let AddedComplexity = 15 in {
1928 // Move scalar to XMM zero-extended, zeroing a VR128X then do a
1929 // MOVS{S,D} to the lower bits.
1930 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32X:$src)))),
1931 (VMOVSSZrr (v4f32 (V_SET0)), FR32X:$src)>;
1932 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128X:$src))),
1933 (VMOVSSZrr (v4f32 (V_SET0)), (COPY_TO_REGCLASS VR128X:$src, FR32X))>;
1934 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128X:$src))),
1935 (VMOVSSZrr (v4i32 (V_SET0)), (COPY_TO_REGCLASS VR128X:$src, FR32X))>;
1936 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64X:$src)))),
1937 (VMOVSDZrr (v2f64 (V_SET0)), FR64X:$src)>;
1939 // Move low f32 and clear high bits.
1940 def : Pat<(v8f32 (X86vzmovl (v8f32 VR256X:$src))),
1941 (SUBREG_TO_REG (i32 0),
1942 (VMOVSSZrr (v4f32 (V_SET0)),
1943 (EXTRACT_SUBREG (v8f32 VR256X:$src), sub_xmm)), sub_xmm)>;
1944 def : Pat<(v8i32 (X86vzmovl (v8i32 VR256X:$src))),
1945 (SUBREG_TO_REG (i32 0),
1946 (VMOVSSZrr (v4i32 (V_SET0)),
1947 (EXTRACT_SUBREG (v8i32 VR256X:$src), sub_xmm)), sub_xmm)>;
1950 let AddedComplexity = 20 in {
1951 // MOVSSrm zeros the high parts of the register; represent this
1952 // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
1953 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
1954 (COPY_TO_REGCLASS (VMOVSSZrm addr:$src), VR128X)>;
1955 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
1956 (COPY_TO_REGCLASS (VMOVSSZrm addr:$src), VR128X)>;
1957 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
1958 (COPY_TO_REGCLASS (VMOVSSZrm addr:$src), VR128X)>;
1960 // MOVSDrm zeros the high parts of the register; represent this
1961 // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
1962 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
1963 (COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>;
1964 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
1965 (COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>;
1966 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
1967 (COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>;
1968 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
1969 (COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>;
1970 def : Pat<(v2f64 (X86vzload addr:$src)),
1971 (COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>;
1973 // Represent the same patterns above but in the form they appear for
1975 def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
1976 (v4i32 (scalar_to_vector (loadi32 addr:$src))), (iPTR 0)))),
1977 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrm addr:$src), sub_xmm)>;
1978 def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
1979 (v4f32 (scalar_to_vector (loadf32 addr:$src))), (iPTR 0)))),
1980 (SUBREG_TO_REG (i32 0), (VMOVSSZrm addr:$src), sub_xmm)>;
1981 def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,
1982 (v2f64 (scalar_to_vector (loadf64 addr:$src))), (iPTR 0)))),
1983 (SUBREG_TO_REG (i32 0), (VMOVSDZrm addr:$src), sub_xmm)>;
1985 def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
1986 (v4f32 (scalar_to_vector FR32X:$src)), (iPTR 0)))),
1987 (SUBREG_TO_REG (i32 0), (v4f32 (VMOVSSZrr (v4f32 (V_SET0)),
1988 FR32X:$src)), sub_xmm)>;
1989 def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,
1990 (v2f64 (scalar_to_vector FR64X:$src)), (iPTR 0)))),
1991 (SUBREG_TO_REG (i64 0), (v2f64 (VMOVSDZrr (v2f64 (V_SET0)),
1992 FR64X:$src)), sub_xmm)>;
1993 def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
1994 (v2i64 (scalar_to_vector (loadi64 addr:$src))), (iPTR 0)))),
1995 (SUBREG_TO_REG (i64 0), (VMOVQI2PQIZrm addr:$src), sub_xmm)>;
1997 // Move low f64 and clear high bits.
1998 def : Pat<(v4f64 (X86vzmovl (v4f64 VR256X:$src))),
1999 (SUBREG_TO_REG (i32 0),
2000 (VMOVSDZrr (v2f64 (V_SET0)),
2001 (EXTRACT_SUBREG (v4f64 VR256X:$src), sub_xmm)), sub_xmm)>;
2003 def : Pat<(v4i64 (X86vzmovl (v4i64 VR256X:$src))),
2004 (SUBREG_TO_REG (i32 0), (VMOVSDZrr (v2i64 (V_SET0)),
2005 (EXTRACT_SUBREG (v4i64 VR256X:$src), sub_xmm)), sub_xmm)>;
2007 // Extract and store.
2008 def : Pat<(store (f32 (vector_extract (v4f32 VR128X:$src), (iPTR 0))),
2010 (VMOVSSZmr addr:$dst, (COPY_TO_REGCLASS (v4f32 VR128X:$src), FR32X))>;
2011 def : Pat<(store (f64 (vector_extract (v2f64 VR128X:$src), (iPTR 0))),
2013 (VMOVSDZmr addr:$dst, (COPY_TO_REGCLASS (v2f64 VR128X:$src), FR64X))>;
2015 // Shuffle with VMOVSS
2016 def : Pat<(v4i32 (X86Movss VR128X:$src1, VR128X:$src2)),
2017 (VMOVSSZrr (v4i32 VR128X:$src1),
2018 (COPY_TO_REGCLASS (v4i32 VR128X:$src2), FR32X))>;
2019 def : Pat<(v4f32 (X86Movss VR128X:$src1, VR128X:$src2)),
2020 (VMOVSSZrr (v4f32 VR128X:$src1),
2021 (COPY_TO_REGCLASS (v4f32 VR128X:$src2), FR32X))>;
2024 def : Pat<(v8i32 (X86Movss VR256X:$src1, VR256X:$src2)),
2025 (SUBREG_TO_REG (i32 0),
2026 (VMOVSSZrr (EXTRACT_SUBREG (v8i32 VR256X:$src1), sub_xmm),
2027 (EXTRACT_SUBREG (v8i32 VR256X:$src2), sub_xmm)),
2029 def : Pat<(v8f32 (X86Movss VR256X:$src1, VR256X:$src2)),
2030 (SUBREG_TO_REG (i32 0),
2031 (VMOVSSZrr (EXTRACT_SUBREG (v8f32 VR256X:$src1), sub_xmm),
2032 (EXTRACT_SUBREG (v8f32 VR256X:$src2), sub_xmm)),
2035 // Shuffle with VMOVSD
2036 def : Pat<(v2i64 (X86Movsd VR128X:$src1, VR128X:$src2)),
2037 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
2038 def : Pat<(v2f64 (X86Movsd VR128X:$src1, VR128X:$src2)),
2039 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
2040 def : Pat<(v4f32 (X86Movsd VR128X:$src1, VR128X:$src2)),
2041 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
2042 def : Pat<(v4i32 (X86Movsd VR128X:$src1, VR128X:$src2)),
2043 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
2046 def : Pat<(v4i64 (X86Movsd VR256X:$src1, VR256X:$src2)),
2047 (SUBREG_TO_REG (i32 0),
2048 (VMOVSDZrr (EXTRACT_SUBREG (v4i64 VR256X:$src1), sub_xmm),
2049 (EXTRACT_SUBREG (v4i64 VR256X:$src2), sub_xmm)),
2051 def : Pat<(v4f64 (X86Movsd VR256X:$src1, VR256X:$src2)),
2052 (SUBREG_TO_REG (i32 0),
2053 (VMOVSDZrr (EXTRACT_SUBREG (v4f64 VR256X:$src1), sub_xmm),
2054 (EXTRACT_SUBREG (v4f64 VR256X:$src2), sub_xmm)),
2057 def : Pat<(v2f64 (X86Movlpd VR128X:$src1, VR128X:$src2)),
2058 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
2059 def : Pat<(v2i64 (X86Movlpd VR128X:$src1, VR128X:$src2)),
2060 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
2061 def : Pat<(v4f32 (X86Movlps VR128X:$src1, VR128X:$src2)),
2062 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
2063 def : Pat<(v4i32 (X86Movlps VR128X:$src1, VR128X:$src2)),
2064 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
2067 let AddedComplexity = 15 in
2068 def VMOVZPQILo2PQIZrr : AVX512XSI<0x7E, MRMSrcReg, (outs VR128X:$dst),
2070 "vmovq\t{$src, $dst|$dst, $src}",
2071 [(set VR128X:$dst, (v2i64 (X86vzmovl
2072 (v2i64 VR128X:$src))))],
2073 IIC_SSE_MOVQ_RR>, EVEX, VEX_W;
2075 let AddedComplexity = 20 in
2076 def VMOVZPQILo2PQIZrm : AVX512XSI<0x7E, MRMSrcMem, (outs VR128X:$dst),
2078 "vmovq\t{$src, $dst|$dst, $src}",
2079 [(set VR128X:$dst, (v2i64 (X86vzmovl
2080 (loadv2i64 addr:$src))))],
2081 IIC_SSE_MOVDQ>, EVEX, VEX_W,
2082 EVEX_CD8<8, CD8VT8>;
2084 let Predicates = [HasAVX512] in {
2085 // AVX 128-bit movd/movq instruction write zeros in the high 128-bit part.
2086 let AddedComplexity = 20 in {
2087 def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector (loadi32 addr:$src))))),
2088 (VMOVDI2PDIZrm addr:$src)>;
2089 def : Pat<(v2i64 (X86vzmovl (v2i64 (scalar_to_vector GR64:$src)))),
2090 (VMOV64toPQIZrr GR64:$src)>;
2091 def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector GR32:$src)))),
2092 (VMOVDI2PDIZrr GR32:$src)>;
2094 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
2095 (VMOVDI2PDIZrm addr:$src)>;
2096 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
2097 (VMOVDI2PDIZrm addr:$src)>;
2098 def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
2099 (VMOVZPQILo2PQIZrm addr:$src)>;
2100 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128X:$src))),
2101 (VMOVZPQILo2PQIZrr VR128X:$src)>;
2102 def : Pat<(v2i64 (X86vzload addr:$src)),
2103 (VMOVZPQILo2PQIZrm addr:$src)>;
2106 // Use regular 128-bit instructions to match 256-bit scalar_to_vec+zext.
2107 def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
2108 (v4i32 (scalar_to_vector GR32:$src)),(iPTR 0)))),
2109 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrr GR32:$src), sub_xmm)>;
2110 def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
2111 (v2i64 (scalar_to_vector GR64:$src)),(iPTR 0)))),
2112 (SUBREG_TO_REG (i64 0), (VMOV64toPQIZrr GR64:$src), sub_xmm)>;
2115 def : Pat<(v16i32 (X86Vinsert (v16i32 immAllZerosV), GR32:$src2, (iPTR 0))),
2116 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrr GR32:$src2), sub_xmm)>;
2118 def : Pat<(v8i64 (X86Vinsert (bc_v8i64 (v16i32 immAllZerosV)), GR64:$src2, (iPTR 0))),
2119 (SUBREG_TO_REG (i32 0), (VMOV64toPQIZrr GR64:$src2), sub_xmm)>;
2121 def : Pat<(v16i32 (X86Vinsert undef, GR32:$src2, (iPTR 0))),
2122 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrr GR32:$src2), sub_xmm)>;
2124 def : Pat<(v8i64 (X86Vinsert undef, GR64:$src2, (iPTR 0))),
2125 (SUBREG_TO_REG (i32 0), (VMOV64toPQIZrr GR64:$src2), sub_xmm)>;
2127 //===----------------------------------------------------------------------===//
2128 // AVX-512 - Non-temporals
2129 //===----------------------------------------------------------------------===//
2130 let SchedRW = [WriteLoad] in {
2131 def VMOVNTDQAZrm : AVX512PI<0x2A, MRMSrcMem, (outs VR512:$dst),
2132 (ins i512mem:$src), "vmovntdqa\t{$src, $dst|$dst, $src}",
2133 [(set VR512:$dst, (int_x86_avx512_movntdqa addr:$src))],
2134 SSEPackedInt>, EVEX, T8PD, EVEX_V512,
2135 EVEX_CD8<64, CD8VF>;
2137 let Predicates = [HasAVX512, HasVLX] in {
2138 def VMOVNTDQAZ256rm : AVX512PI<0x2A, MRMSrcMem, (outs VR256X:$dst),
2140 "vmovntdqa\t{$src, $dst|$dst, $src}", [],
2141 SSEPackedInt>, EVEX, T8PD, EVEX_V256,
2142 EVEX_CD8<64, CD8VF>;
2144 def VMOVNTDQAZ128rm : AVX512PI<0x2A, MRMSrcMem, (outs VR128X:$dst),
2146 "vmovntdqa\t{$src, $dst|$dst, $src}", [],
2147 SSEPackedInt>, EVEX, T8PD, EVEX_V128,
2148 EVEX_CD8<64, CD8VF>;
2152 multiclass avx512_movnt<bits<8> opc, string OpcodeStr, PatFrag st_frag,
2153 ValueType OpVT, RegisterClass RC, X86MemOperand memop,
2154 Domain d, InstrItinClass itin = IIC_SSE_MOVNT> {
2155 let SchedRW = [WriteStore], mayStore = 1,
2156 AddedComplexity = 400 in
2157 def mr : AVX512PI<opc, MRMDestMem, (outs), (ins memop:$dst, RC:$src),
2158 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2159 [(st_frag (OpVT RC:$src), addr:$dst)], d, itin>, EVEX;
2162 multiclass avx512_movnt_vl<bits<8> opc, string OpcodeStr, PatFrag st_frag,
2163 string elty, string elsz, string vsz512,
2164 string vsz256, string vsz128, Domain d,
2165 Predicate prd, InstrItinClass itin = IIC_SSE_MOVNT> {
2166 let Predicates = [prd] in
2167 defm Z : avx512_movnt<opc, OpcodeStr, st_frag,
2168 !cast<ValueType>("v"##vsz512##elty##elsz), VR512,
2169 !cast<X86MemOperand>(elty##"512mem"), d, itin>,
2172 let Predicates = [prd, HasVLX] in {
2173 defm Z256 : avx512_movnt<opc, OpcodeStr, st_frag,
2174 !cast<ValueType>("v"##vsz256##elty##elsz), VR256X,
2175 !cast<X86MemOperand>(elty##"256mem"), d, itin>,
2178 defm Z128 : avx512_movnt<opc, OpcodeStr, st_frag,
2179 !cast<ValueType>("v"##vsz128##elty##elsz), VR128X,
2180 !cast<X86MemOperand>(elty##"128mem"), d, itin>,
2185 defm VMOVNTDQ : avx512_movnt_vl<0xE7, "vmovntdq", alignednontemporalstore,
2186 "i", "64", "8", "4", "2", SSEPackedInt,
2187 HasAVX512>, PD, EVEX_CD8<64, CD8VF>;
2189 defm VMOVNTPD : avx512_movnt_vl<0x2B, "vmovntpd", alignednontemporalstore,
2190 "f", "64", "8", "4", "2", SSEPackedDouble,
2191 HasAVX512>, PD, VEX_W, EVEX_CD8<64, CD8VF>;
2193 defm VMOVNTPS : avx512_movnt_vl<0x2B, "vmovntps", alignednontemporalstore,
2194 "f", "32", "16", "8", "4", SSEPackedSingle,
2195 HasAVX512>, PS, EVEX_CD8<32, CD8VF>;
2197 //===----------------------------------------------------------------------===//
2198 // AVX-512 - Integer arithmetic
2200 multiclass avx512_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
2201 ValueType OpVT, RegisterClass KRC,
2202 RegisterClass RC, PatFrag memop_frag,
2203 X86MemOperand x86memop, PatFrag scalar_mfrag,
2204 X86MemOperand x86scalar_mop, string BrdcstStr,
2205 OpndItins itins, bit IsCommutable = 0> {
2206 let isCommutable = IsCommutable in
2207 def rr : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
2208 (ins RC:$src1, RC:$src2),
2209 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2210 [(set RC:$dst, (OpVT (OpNode (OpVT RC:$src1), (OpVT RC:$src2))))],
2212 let AddedComplexity = 30 in {
2213 let Constraints = "$src0 = $dst" in
2214 def rrk : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
2215 (ins RC:$src0, KRC:$mask, RC:$src1, RC:$src2),
2216 !strconcat(OpcodeStr,
2217 " \t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
2218 [(set RC:$dst, (OpVT (vselect KRC:$mask,
2219 (OpNode (OpVT RC:$src1), (OpVT RC:$src2)),
2221 itins.rr>, EVEX_4V, EVEX_K;
2222 def rrkz : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
2223 (ins KRC:$mask, RC:$src1, RC:$src2),
2224 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst {${mask}} {z}" ,
2225 "|$dst {${mask}} {z}, $src1, $src2}"),
2226 [(set RC:$dst, (OpVT (vselect KRC:$mask,
2227 (OpNode (OpVT RC:$src1), (OpVT RC:$src2)),
2228 (OpVT immAllZerosV))))],
2229 itins.rr>, EVEX_4V, EVEX_KZ;
2232 let mayLoad = 1 in {
2233 def rm : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
2234 (ins RC:$src1, x86memop:$src2),
2235 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2236 [(set RC:$dst, (OpVT (OpNode (OpVT RC:$src1), (memop_frag addr:$src2))))],
2238 let AddedComplexity = 30 in {
2239 let Constraints = "$src0 = $dst" in
2240 def rmk : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
2241 (ins RC:$src0, KRC:$mask, RC:$src1, x86memop:$src2),
2242 !strconcat(OpcodeStr,
2243 " \t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
2244 [(set RC:$dst, (OpVT (vselect KRC:$mask,
2245 (OpNode (OpVT RC:$src1), (memop_frag addr:$src2)),
2247 itins.rm>, EVEX_4V, EVEX_K;
2248 def rmkz : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
2249 (ins KRC:$mask, RC:$src1, x86memop:$src2),
2250 !strconcat(OpcodeStr,
2251 " \t{$src2, $src1, $dst {${mask}} {z}|$dst {${mask}} {z}, $src1, $src2}"),
2252 [(set RC:$dst, (OpVT (vselect KRC:$mask,
2253 (OpNode (OpVT RC:$src1), (memop_frag addr:$src2)),
2254 (OpVT immAllZerosV))))],
2255 itins.rm>, EVEX_4V, EVEX_KZ;
2257 def rmb : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
2258 (ins RC:$src1, x86scalar_mop:$src2),
2259 !strconcat(OpcodeStr, " \t{${src2}", BrdcstStr,
2260 ", $src1, $dst|$dst, $src1, ${src2}", BrdcstStr, "}"),
2261 [(set RC:$dst, (OpNode RC:$src1,
2262 (OpVT (X86VBroadcast (scalar_mfrag addr:$src2)))))],
2263 itins.rm>, EVEX_4V, EVEX_B;
2264 let AddedComplexity = 30 in {
2265 let Constraints = "$src0 = $dst" in
2266 def rmbk : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
2267 (ins RC:$src0, KRC:$mask, RC:$src1, x86scalar_mop:$src2),
2268 !strconcat(OpcodeStr, " \t{${src2}", BrdcstStr,
2269 ", $src1, $dst {${mask}}|$dst {${mask}}, $src1, ${src2}",
2271 [(set RC:$dst, (OpVT (vselect KRC:$mask,
2272 (OpNode (OpVT RC:$src1),
2273 (OpVT (X86VBroadcast (scalar_mfrag addr:$src2)))),
2275 itins.rm>, EVEX_4V, EVEX_B, EVEX_K;
2276 def rmbkz : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
2277 (ins KRC:$mask, RC:$src1, x86scalar_mop:$src2),
2278 !strconcat(OpcodeStr, " \t{${src2}", BrdcstStr,
2279 ", $src1, $dst {${mask}} {z}|$dst {${mask}} {z}, $src1, ${src2}",
2281 [(set RC:$dst, (OpVT (vselect KRC:$mask,
2282 (OpNode (OpVT RC:$src1),
2283 (OpVT (X86VBroadcast (scalar_mfrag addr:$src2)))),
2284 (OpVT immAllZerosV))))],
2285 itins.rm>, EVEX_4V, EVEX_B, EVEX_KZ;
2290 multiclass avx512_binop_rm2<bits<8> opc, string OpcodeStr, ValueType DstVT,
2291 ValueType SrcVT, RegisterClass KRC, RegisterClass RC,
2292 PatFrag memop_frag, X86MemOperand x86memop,
2293 PatFrag scalar_mfrag, X86MemOperand x86scalar_mop,
2294 string BrdcstStr, OpndItins itins, bit IsCommutable = 0> {
2295 let isCommutable = IsCommutable in
2297 def rr : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
2298 (ins RC:$src1, RC:$src2),
2299 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2301 def rrk : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
2302 (ins KRC:$mask, RC:$src1, RC:$src2),
2303 !strconcat(OpcodeStr,
2304 " \t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
2305 [], itins.rr>, EVEX_4V, EVEX_K;
2306 def rrkz : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
2307 (ins KRC:$mask, RC:$src1, RC:$src2),
2308 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst {${mask}} {z}" ,
2309 "|$dst {${mask}} {z}, $src1, $src2}"),
2310 [], itins.rr>, EVEX_4V, EVEX_KZ;
2312 let mayLoad = 1 in {
2313 def rm : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
2314 (ins RC:$src1, x86memop:$src2),
2315 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2317 def rmk : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
2318 (ins KRC:$mask, RC:$src1, x86memop:$src2),
2319 !strconcat(OpcodeStr,
2320 " \t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
2321 [], itins.rm>, EVEX_4V, EVEX_K;
2322 def rmkz : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
2323 (ins KRC:$mask, RC:$src1, x86memop:$src2),
2324 !strconcat(OpcodeStr,
2325 " \t{$src2, $src1, $dst {${mask}} {z}|$dst {${mask}} {z}, $src1, $src2}"),
2326 [], itins.rm>, EVEX_4V, EVEX_KZ;
2327 def rmb : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
2328 (ins RC:$src1, x86scalar_mop:$src2),
2329 !strconcat(OpcodeStr, " \t{${src2}", BrdcstStr,
2330 ", $src1, $dst|$dst, $src1, ${src2}", BrdcstStr, "}"),
2331 [], itins.rm>, EVEX_4V, EVEX_B;
2332 def rmbk : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
2333 (ins KRC:$mask, RC:$src1, x86scalar_mop:$src2),
2334 !strconcat(OpcodeStr, " \t{${src2}", BrdcstStr,
2335 ", $src1, $dst {${mask}}|$dst {${mask}}, $src1, ${src2}",
2337 [], itins.rm>, EVEX_4V, EVEX_B, EVEX_K;
2338 def rmbkz : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
2339 (ins KRC:$mask, RC:$src1, x86scalar_mop:$src2),
2340 !strconcat(OpcodeStr, " \t{${src2}", BrdcstStr,
2341 ", $src1, $dst {${mask}} {z}|$dst {${mask}} {z}, $src1, ${src2}",
2343 [], itins.rm>, EVEX_4V, EVEX_B, EVEX_KZ;
2347 defm VPADDDZ : avx512_binop_rm<0xFE, "vpaddd", add, v16i32, VK16WM, VR512,
2348 memopv16i32, i512mem, loadi32, i32mem, "{1to16}",
2349 SSE_INTALU_ITINS_P, 1>, EVEX_V512, EVEX_CD8<32, CD8VF>;
2351 defm VPSUBDZ : avx512_binop_rm<0xFA, "vpsubd", sub, v16i32, VK16WM, VR512,
2352 memopv16i32, i512mem, loadi32, i32mem, "{1to16}",
2353 SSE_INTALU_ITINS_P, 0>, EVEX_V512, EVEX_CD8<32, CD8VF>;
2355 defm VPMULLDZ : avx512_binop_rm<0x40, "vpmulld", mul, v16i32, VK16WM, VR512,
2356 memopv16i32, i512mem, loadi32, i32mem, "{1to16}",
2357 SSE_INTALU_ITINS_P, 1>, T8PD, EVEX_V512, EVEX_CD8<32, CD8VF>;
2359 defm VPADDQZ : avx512_binop_rm<0xD4, "vpaddq", add, v8i64, VK8WM, VR512,
2360 memopv8i64, i512mem, loadi64, i64mem, "{1to8}",
2361 SSE_INTALU_ITINS_P, 1>, EVEX_CD8<64, CD8VF>, EVEX_V512, VEX_W;
2363 defm VPSUBQZ : avx512_binop_rm<0xFB, "vpsubq", sub, v8i64, VK8WM, VR512,
2364 memopv8i64, i512mem, loadi64, i64mem, "{1to8}",
2365 SSE_INTALU_ITINS_P, 0>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
2367 defm VPMULDQZ : avx512_binop_rm2<0x28, "vpmuldq", v8i64, v16i32, VK8WM, VR512,
2368 memopv8i64, i512mem, loadi64, i64mem, "{1to8}",
2369 SSE_INTALU_ITINS_P, 1>, T8PD, EVEX_V512,
2370 EVEX_CD8<64, CD8VF>, VEX_W;
2372 defm VPMULUDQZ : avx512_binop_rm2<0xF4, "vpmuludq", v8i64, v16i32, VK8WM, VR512,
2373 memopv8i64, i512mem, loadi64, i64mem, "{1to8}",
2374 SSE_INTMUL_ITINS_P, 1>, EVEX_V512, EVEX_CD8<64, CD8VF>, VEX_W;
2376 def : Pat<(v8i64 (X86pmuludq (v16i32 VR512:$src1), (v16i32 VR512:$src2))),
2377 (VPMULUDQZrr VR512:$src1, VR512:$src2)>;
2379 def : Pat<(v8i64 (int_x86_avx512_mask_pmulu_dq_512 (v16i32 VR512:$src1),
2380 (v16i32 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
2381 (VPMULUDQZrr VR512:$src1, VR512:$src2)>;
2382 def : Pat<(v8i64 (int_x86_avx512_mask_pmul_dq_512 (v16i32 VR512:$src1),
2383 (v16i32 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
2384 (VPMULDQZrr VR512:$src1, VR512:$src2)>;
2386 defm VPMAXUDZ : avx512_binop_rm<0x3F, "vpmaxud", X86umax, v16i32, VK16WM, VR512,
2387 memopv16i32, i512mem, loadi32, i32mem, "{1to16}",
2388 SSE_INTALU_ITINS_P, 1>,
2389 T8PD, EVEX_V512, EVEX_CD8<32, CD8VF>;
2390 defm VPMAXUQZ : avx512_binop_rm<0x3F, "vpmaxuq", X86umax, v8i64, VK8WM, VR512,
2391 memopv8i64, i512mem, loadi64, i64mem, "{1to8}",
2392 SSE_INTALU_ITINS_P, 0>,
2393 T8PD, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
2395 defm VPMAXSDZ : avx512_binop_rm<0x3D, "vpmaxsd", X86smax, v16i32, VK16WM, VR512,
2396 memopv16i32, i512mem, loadi32, i32mem, "{1to16}",
2397 SSE_INTALU_ITINS_P, 1>,
2398 T8PD, EVEX_V512, EVEX_CD8<32, CD8VF>;
2399 defm VPMAXSQZ : avx512_binop_rm<0x3D, "vpmaxsq", X86smax, v8i64, VK8WM, VR512,
2400 memopv8i64, i512mem, loadi64, i64mem, "{1to8}",
2401 SSE_INTALU_ITINS_P, 0>,
2402 T8PD, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
2404 defm VPMINUDZ : avx512_binop_rm<0x3B, "vpminud", X86umin, v16i32, VK16WM, VR512,
2405 memopv16i32, i512mem, loadi32, i32mem, "{1to16}",
2406 SSE_INTALU_ITINS_P, 1>,
2407 T8PD, EVEX_V512, EVEX_CD8<32, CD8VF>;
2408 defm VPMINUQZ : avx512_binop_rm<0x3B, "vpminuq", X86umin, v8i64, VK8WM, VR512,
2409 memopv8i64, i512mem, loadi64, i64mem, "{1to8}",
2410 SSE_INTALU_ITINS_P, 0>,
2411 T8PD, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
2413 defm VPMINSDZ : avx512_binop_rm<0x39, "vpminsd", X86smin, v16i32, VK16WM, VR512,
2414 memopv16i32, i512mem, loadi32, i32mem, "{1to16}",
2415 SSE_INTALU_ITINS_P, 1>,
2416 T8PD, EVEX_V512, EVEX_CD8<32, CD8VF>;
2417 defm VPMINSQZ : avx512_binop_rm<0x39, "vpminsq", X86smin, v8i64, VK8WM, VR512,
2418 memopv8i64, i512mem, loadi64, i64mem, "{1to8}",
2419 SSE_INTALU_ITINS_P, 0>,
2420 T8PD, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
2422 def : Pat <(v16i32 (int_x86_avx512_mask_pmaxs_d_512 (v16i32 VR512:$src1),
2423 (v16i32 VR512:$src2), (v16i32 immAllZerosV), (i16 -1))),
2424 (VPMAXSDZrr VR512:$src1, VR512:$src2)>;
2425 def : Pat <(v16i32 (int_x86_avx512_mask_pmaxu_d_512 (v16i32 VR512:$src1),
2426 (v16i32 VR512:$src2), (v16i32 immAllZerosV), (i16 -1))),
2427 (VPMAXUDZrr VR512:$src1, VR512:$src2)>;
2428 def : Pat <(v8i64 (int_x86_avx512_mask_pmaxs_q_512 (v8i64 VR512:$src1),
2429 (v8i64 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
2430 (VPMAXSQZrr VR512:$src1, VR512:$src2)>;
2431 def : Pat <(v8i64 (int_x86_avx512_mask_pmaxu_q_512 (v8i64 VR512:$src1),
2432 (v8i64 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
2433 (VPMAXUQZrr VR512:$src1, VR512:$src2)>;
2434 def : Pat <(v16i32 (int_x86_avx512_mask_pmins_d_512 (v16i32 VR512:$src1),
2435 (v16i32 VR512:$src2), (v16i32 immAllZerosV), (i16 -1))),
2436 (VPMINSDZrr VR512:$src1, VR512:$src2)>;
2437 def : Pat <(v16i32 (int_x86_avx512_mask_pminu_d_512 (v16i32 VR512:$src1),
2438 (v16i32 VR512:$src2), (v16i32 immAllZerosV), (i16 -1))),
2439 (VPMINUDZrr VR512:$src1, VR512:$src2)>;
2440 def : Pat <(v8i64 (int_x86_avx512_mask_pmins_q_512 (v8i64 VR512:$src1),
2441 (v8i64 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
2442 (VPMINSQZrr VR512:$src1, VR512:$src2)>;
2443 def : Pat <(v8i64 (int_x86_avx512_mask_pminu_q_512 (v8i64 VR512:$src1),
2444 (v8i64 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
2445 (VPMINUQZrr VR512:$src1, VR512:$src2)>;
2446 //===----------------------------------------------------------------------===//
2447 // AVX-512 - Unpack Instructions
2448 //===----------------------------------------------------------------------===//
2450 multiclass avx512_unpack_fp<bits<8> opc, SDNode OpNode, ValueType vt,
2451 PatFrag mem_frag, RegisterClass RC,
2452 X86MemOperand x86memop, string asm,
2454 def rr : AVX512PI<opc, MRMSrcReg,
2455 (outs RC:$dst), (ins RC:$src1, RC:$src2),
2457 (vt (OpNode RC:$src1, RC:$src2)))],
2459 def rm : AVX512PI<opc, MRMSrcMem,
2460 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
2462 (vt (OpNode RC:$src1,
2463 (bitconvert (mem_frag addr:$src2)))))],
2467 defm VUNPCKHPSZ: avx512_unpack_fp<0x15, X86Unpckh, v16f32, memopv8f64,
2468 VR512, f512mem, "vunpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2469 SSEPackedSingle>, PS, EVEX_V512, EVEX_CD8<32, CD8VF>;
2470 defm VUNPCKHPDZ: avx512_unpack_fp<0x15, X86Unpckh, v8f64, memopv8f64,
2471 VR512, f512mem, "vunpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2472 SSEPackedDouble>, PD, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
2473 defm VUNPCKLPSZ: avx512_unpack_fp<0x14, X86Unpckl, v16f32, memopv8f64,
2474 VR512, f512mem, "vunpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2475 SSEPackedSingle>, PS, EVEX_V512, EVEX_CD8<32, CD8VF>;
2476 defm VUNPCKLPDZ: avx512_unpack_fp<0x14, X86Unpckl, v8f64, memopv8f64,
2477 VR512, f512mem, "vunpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2478 SSEPackedDouble>, PD, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
2480 multiclass avx512_unpack_int<bits<8> opc, string OpcodeStr, SDNode OpNode,
2481 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
2482 X86MemOperand x86memop> {
2483 def rr : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
2484 (ins RC:$src1, RC:$src2),
2485 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2486 [(set RC:$dst, (OpVT (OpNode (OpVT RC:$src1), (OpVT RC:$src2))))],
2487 IIC_SSE_UNPCK>, EVEX_4V;
2488 def rm : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
2489 (ins RC:$src1, x86memop:$src2),
2490 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2491 [(set RC:$dst, (OpVT (OpNode (OpVT RC:$src1),
2492 (bitconvert (memop_frag addr:$src2)))))],
2493 IIC_SSE_UNPCK>, EVEX_4V;
2495 defm VPUNPCKLDQZ : avx512_unpack_int<0x62, "vpunpckldq", X86Unpckl, v16i32,
2496 VR512, memopv16i32, i512mem>, EVEX_V512,
2497 EVEX_CD8<32, CD8VF>;
2498 defm VPUNPCKLQDQZ : avx512_unpack_int<0x6C, "vpunpcklqdq", X86Unpckl, v8i64,
2499 VR512, memopv8i64, i512mem>, EVEX_V512,
2500 VEX_W, EVEX_CD8<64, CD8VF>;
2501 defm VPUNPCKHDQZ : avx512_unpack_int<0x6A, "vpunpckhdq", X86Unpckh, v16i32,
2502 VR512, memopv16i32, i512mem>, EVEX_V512,
2503 EVEX_CD8<32, CD8VF>;
2504 defm VPUNPCKHQDQZ : avx512_unpack_int<0x6D, "vpunpckhqdq", X86Unpckh, v8i64,
2505 VR512, memopv8i64, i512mem>, EVEX_V512,
2506 VEX_W, EVEX_CD8<64, CD8VF>;
2507 //===----------------------------------------------------------------------===//
2511 multiclass avx512_pshuf_imm<bits<8> opc, string OpcodeStr, RegisterClass RC,
2512 SDNode OpNode, PatFrag mem_frag,
2513 X86MemOperand x86memop, ValueType OpVT> {
2514 def ri : AVX512Ii8<opc, MRMSrcReg, (outs RC:$dst),
2515 (ins RC:$src1, i8imm:$src2),
2516 !strconcat(OpcodeStr,
2517 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2519 (OpVT (OpNode RC:$src1, (i8 imm:$src2))))]>,
2521 def mi : AVX512Ii8<opc, MRMSrcMem, (outs RC:$dst),
2522 (ins x86memop:$src1, i8imm:$src2),
2523 !strconcat(OpcodeStr,
2524 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2526 (OpVT (OpNode (mem_frag addr:$src1),
2527 (i8 imm:$src2))))]>, EVEX;
2530 defm VPSHUFDZ : avx512_pshuf_imm<0x70, "vpshufd", VR512, X86PShufd, memopv16i32,
2531 i512mem, v16i32>, PD, EVEX_V512, EVEX_CD8<32, CD8VF>;
2533 let ExeDomain = SSEPackedSingle in
2534 defm VPERMILPSZ : avx512_pshuf_imm<0x04, "vpermilps", VR512, X86VPermilp,
2535 memopv16f32, i512mem, v16f32>, TAPD, EVEX_V512,
2536 EVEX_CD8<32, CD8VF>;
2537 let ExeDomain = SSEPackedDouble in
2538 defm VPERMILPDZ : avx512_pshuf_imm<0x05, "vpermilpd", VR512, X86VPermilp,
2539 memopv8f64, i512mem, v8f64>, TAPD, EVEX_V512,
2540 VEX_W, EVEX_CD8<32, CD8VF>;
2542 def : Pat<(v16i32 (X86VPermilp VR512:$src1, (i8 imm:$imm))),
2543 (VPERMILPSZri VR512:$src1, imm:$imm)>;
2544 def : Pat<(v8i64 (X86VPermilp VR512:$src1, (i8 imm:$imm))),
2545 (VPERMILPDZri VR512:$src1, imm:$imm)>;
2547 //===----------------------------------------------------------------------===//
2548 // AVX-512 Logical Instructions
2549 //===----------------------------------------------------------------------===//
2551 defm VPANDDZ : avx512_binop_rm<0xDB, "vpandd", and, v16i32, VK16WM, VR512, memopv16i32,
2552 i512mem, loadi32, i32mem, "{1to16}", SSE_BIT_ITINS_P, 1>,
2553 EVEX_V512, EVEX_CD8<32, CD8VF>;
2554 defm VPANDQZ : avx512_binop_rm<0xDB, "vpandq", and, v8i64, VK8WM, VR512, memopv8i64,
2555 i512mem, loadi64, i64mem, "{1to8}", SSE_BIT_ITINS_P, 1>,
2556 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
2557 defm VPORDZ : avx512_binop_rm<0xEB, "vpord", or, v16i32, VK16WM, VR512, memopv16i32,
2558 i512mem, loadi32, i32mem, "{1to16}", SSE_BIT_ITINS_P, 1>,
2559 EVEX_V512, EVEX_CD8<32, CD8VF>;
2560 defm VPORQZ : avx512_binop_rm<0xEB, "vporq", or, v8i64, VK8WM, VR512, memopv8i64,
2561 i512mem, loadi64, i64mem, "{1to8}", SSE_BIT_ITINS_P, 1>,
2562 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
2563 defm VPXORDZ : avx512_binop_rm<0xEF, "vpxord", xor, v16i32, VK16WM, VR512, memopv16i32,
2564 i512mem, loadi32, i32mem, "{1to16}", SSE_BIT_ITINS_P, 1>,
2565 EVEX_V512, EVEX_CD8<32, CD8VF>;
2566 defm VPXORQZ : avx512_binop_rm<0xEF, "vpxorq", xor, v8i64, VK8WM, VR512, memopv8i64,
2567 i512mem, loadi64, i64mem, "{1to8}", SSE_BIT_ITINS_P, 1>,
2568 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
2569 defm VPANDNDZ : avx512_binop_rm<0xDF, "vpandnd", X86andnp, v16i32, VK16WM, VR512,
2570 memopv16i32, i512mem, loadi32, i32mem, "{1to16}",
2571 SSE_BIT_ITINS_P, 0>, EVEX_V512, EVEX_CD8<32, CD8VF>;
2572 defm VPANDNQZ : avx512_binop_rm<0xDF, "vpandnq", X86andnp, v8i64, VK8WM, VR512,
2573 memopv8i64, i512mem, loadi64, i64mem, "{1to8}",
2574 SSE_BIT_ITINS_P, 0>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
2576 //===----------------------------------------------------------------------===//
2577 // AVX-512 FP arithmetic
2578 //===----------------------------------------------------------------------===//
2580 multiclass avx512_binop_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
2582 defm SSZ : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"), OpNode, FR32X,
2583 f32mem, itins.s, 0>, XS, EVEX_4V, VEX_LIG,
2584 EVEX_CD8<32, CD8VT1>;
2585 defm SDZ : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd"), OpNode, FR64X,
2586 f64mem, itins.d, 0>, XD, VEX_W, EVEX_4V, VEX_LIG,
2587 EVEX_CD8<64, CD8VT1>;
2590 let isCommutable = 1 in {
2591 defm VADD : avx512_binop_s<0x58, "add", fadd, SSE_ALU_ITINS_S>;
2592 defm VMUL : avx512_binop_s<0x59, "mul", fmul, SSE_ALU_ITINS_S>;
2593 defm VMIN : avx512_binop_s<0x5D, "min", X86fmin, SSE_ALU_ITINS_S>;
2594 defm VMAX : avx512_binop_s<0x5F, "max", X86fmax, SSE_ALU_ITINS_S>;
2596 let isCommutable = 0 in {
2597 defm VSUB : avx512_binop_s<0x5C, "sub", fsub, SSE_ALU_ITINS_S>;
2598 defm VDIV : avx512_binop_s<0x5E, "div", fdiv, SSE_ALU_ITINS_S>;
2601 multiclass avx512_fp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
2603 RegisterClass RC, ValueType vt,
2604 X86MemOperand x86memop, PatFrag mem_frag,
2605 X86MemOperand x86scalar_mop, PatFrag scalar_mfrag,
2607 Domain d, OpndItins itins, bit commutable> {
2608 let isCommutable = commutable in {
2609 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
2610 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2611 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], itins.rr, d>,
2614 def rrk: PI<opc, MRMSrcReg, (outs RC:$dst), (ins KRC:$mask, RC:$src1, RC:$src2),
2615 !strconcat(OpcodeStr,
2616 " \t{$src2, $src1, $dst {${mask}} |$dst {${mask}}, $src1, $src2}"),
2617 [], itins.rr, d>, EVEX_4V, EVEX_K;
2619 def rrkz: PI<opc, MRMSrcReg, (outs RC:$dst), (ins KRC:$mask, RC:$src1, RC:$src2),
2620 !strconcat(OpcodeStr,
2621 " \t{$src2, $src1, $dst {${mask}} {z}|$dst {${mask}} {z}, $src1, $src2}"),
2622 [], itins.rr, d>, EVEX_4V, EVEX_KZ;
2625 let mayLoad = 1 in {
2626 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
2627 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2628 [(set RC:$dst, (OpNode RC:$src1, (mem_frag addr:$src2)))],
2629 itins.rm, d>, EVEX_4V;
2631 def rmb : PI<opc, MRMSrcMem, (outs RC:$dst),
2632 (ins RC:$src1, x86scalar_mop:$src2),
2633 !strconcat(OpcodeStr, " \t{${src2}", BrdcstStr,
2634 ", $src1, $dst|$dst, $src1, ${src2}", BrdcstStr, "}"),
2635 [(set RC:$dst, (OpNode RC:$src1,
2636 (vt (X86VBroadcast (scalar_mfrag addr:$src2)))))],
2637 itins.rm, d>, EVEX_4V, EVEX_B;
2639 def rmk : PI<opc, MRMSrcMem, (outs RC:$dst),
2640 (ins KRC:$mask, RC:$src1, x86memop:$src2), !strconcat(OpcodeStr,
2641 "\t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
2642 [], itins.rm, d>, EVEX_4V, EVEX_K;
2644 def rmkz : PI<opc, MRMSrcMem, (outs RC:$dst),
2645 (ins KRC:$mask, RC:$src1, x86memop:$src2), !strconcat(OpcodeStr,
2646 "\t{$src2, $src1, $dst {${mask}} {z}|$dst {${mask}} {z}, $src1, $src2}"),
2647 [], itins.rm, d>, EVEX_4V, EVEX_KZ;
2649 def rmbk : PI<opc, MRMSrcMem, (outs RC:$dst),
2650 (ins KRC:$mask, RC:$src1, x86scalar_mop:$src2), !strconcat(OpcodeStr,
2651 " \t{${src2}", BrdcstStr,
2652 ", $src1, $dst {${mask}}|$dst {${mask}}, $src1, ${src2}", BrdcstStr, "}"),
2653 [], itins.rm, d>, EVEX_4V, EVEX_B, EVEX_K;
2655 def rmbkz : PI<opc, MRMSrcMem, (outs RC:$dst),
2656 (ins KRC:$mask, RC:$src1, x86scalar_mop:$src2), !strconcat(OpcodeStr,
2657 " \t{${src2}", BrdcstStr,
2658 ", $src1, $dst {${mask}} {z}|$dst {${mask}} {z}, $src1, ${src2}",
2660 [], itins.rm, d>, EVEX_4V, EVEX_B, EVEX_KZ;
2664 defm VADDPSZ : avx512_fp_packed<0x58, "addps", fadd, VK16WM, VR512, v16f32, f512mem,
2665 memopv16f32, f32mem, loadf32, "{1to16}", SSEPackedSingle,
2666 SSE_ALU_ITINS_P.s, 1>, EVEX_V512, PS, EVEX_CD8<32, CD8VF>;
2668 defm VADDPDZ : avx512_fp_packed<0x58, "addpd", fadd, VK8WM, VR512, v8f64, f512mem,
2669 memopv8f64, f64mem, loadf64, "{1to8}", SSEPackedDouble,
2670 SSE_ALU_ITINS_P.d, 1>,
2671 EVEX_V512, PD, VEX_W, EVEX_CD8<64, CD8VF>;
2673 defm VMULPSZ : avx512_fp_packed<0x59, "mulps", fmul, VK16WM, VR512, v16f32, f512mem,
2674 memopv16f32, f32mem, loadf32, "{1to16}", SSEPackedSingle,
2675 SSE_ALU_ITINS_P.s, 1>, EVEX_V512, PS, EVEX_CD8<32, CD8VF>;
2676 defm VMULPDZ : avx512_fp_packed<0x59, "mulpd", fmul, VK8WM, VR512, v8f64, f512mem,
2677 memopv8f64, f64mem, loadf64, "{1to8}", SSEPackedDouble,
2678 SSE_ALU_ITINS_P.d, 1>,
2679 EVEX_V512, PD, VEX_W, EVEX_CD8<64, CD8VF>;
2681 defm VMINPSZ : avx512_fp_packed<0x5D, "minps", X86fmin, VK16WM, VR512, v16f32, f512mem,
2682 memopv16f32, f32mem, loadf32, "{1to16}", SSEPackedSingle,
2683 SSE_ALU_ITINS_P.s, 1>,
2684 EVEX_V512, PS, EVEX_CD8<32, CD8VF>;
2685 defm VMAXPSZ : avx512_fp_packed<0x5F, "maxps", X86fmax, VK16WM, VR512, v16f32, f512mem,
2686 memopv16f32, f32mem, loadf32, "{1to16}", SSEPackedSingle,
2687 SSE_ALU_ITINS_P.s, 1>,
2688 EVEX_V512, PS, EVEX_CD8<32, CD8VF>;
2690 defm VMINPDZ : avx512_fp_packed<0x5D, "minpd", X86fmin, VK8WM, VR512, v8f64, f512mem,
2691 memopv8f64, f64mem, loadf64, "{1to8}", SSEPackedDouble,
2692 SSE_ALU_ITINS_P.d, 1>,
2693 EVEX_V512, PD, VEX_W, EVEX_CD8<64, CD8VF>;
2694 defm VMAXPDZ : avx512_fp_packed<0x5F, "maxpd", X86fmax, VK8WM, VR512, v8f64, f512mem,
2695 memopv8f64, f64mem, loadf64, "{1to8}", SSEPackedDouble,
2696 SSE_ALU_ITINS_P.d, 1>,
2697 EVEX_V512, PD, VEX_W, EVEX_CD8<64, CD8VF>;
2699 defm VSUBPSZ : avx512_fp_packed<0x5C, "subps", fsub, VK16WM, VR512, v16f32, f512mem,
2700 memopv16f32, f32mem, loadf32, "{1to16}", SSEPackedSingle,
2701 SSE_ALU_ITINS_P.s, 0>, EVEX_V512, PS, EVEX_CD8<32, CD8VF>;
2702 defm VDIVPSZ : avx512_fp_packed<0x5E, "divps", fdiv, VK16WM, VR512, v16f32, f512mem,
2703 memopv16f32, f32mem, loadf32, "{1to16}", SSEPackedSingle,
2704 SSE_ALU_ITINS_P.s, 0>, EVEX_V512, PS, EVEX_CD8<32, CD8VF>;
2706 defm VSUBPDZ : avx512_fp_packed<0x5C, "subpd", fsub, VK8WM, VR512, v8f64, f512mem,
2707 memopv8f64, f64mem, loadf64, "{1to8}", SSEPackedDouble,
2708 SSE_ALU_ITINS_P.d, 0>,
2709 EVEX_V512, PD, VEX_W, EVEX_CD8<64, CD8VF>;
2710 defm VDIVPDZ : avx512_fp_packed<0x5E, "divpd", fdiv, VK8WM, VR512, v8f64, f512mem,
2711 memopv8f64, f64mem, loadf64, "{1to8}", SSEPackedDouble,
2712 SSE_ALU_ITINS_P.d, 0>,
2713 EVEX_V512, PD, VEX_W, EVEX_CD8<64, CD8VF>;
2715 def : Pat<(v16f32 (int_x86_avx512_mask_max_ps_512 (v16f32 VR512:$src1),
2716 (v16f32 VR512:$src2), (bc_v16f32 (v16i32 immAllZerosV)),
2717 (i16 -1), FROUND_CURRENT)),
2718 (VMAXPSZrr VR512:$src1, VR512:$src2)>;
2720 def : Pat<(v8f64 (int_x86_avx512_mask_max_pd_512 (v8f64 VR512:$src1),
2721 (v8f64 VR512:$src2), (bc_v8f64 (v16i32 immAllZerosV)),
2722 (i8 -1), FROUND_CURRENT)),
2723 (VMAXPDZrr VR512:$src1, VR512:$src2)>;
2725 def : Pat<(v16f32 (int_x86_avx512_mask_min_ps_512 (v16f32 VR512:$src1),
2726 (v16f32 VR512:$src2), (bc_v16f32 (v16i32 immAllZerosV)),
2727 (i16 -1), FROUND_CURRENT)),
2728 (VMINPSZrr VR512:$src1, VR512:$src2)>;
2730 def : Pat<(v8f64 (int_x86_avx512_mask_min_pd_512 (v8f64 VR512:$src1),
2731 (v8f64 VR512:$src2), (bc_v8f64 (v16i32 immAllZerosV)),
2732 (i8 -1), FROUND_CURRENT)),
2733 (VMINPDZrr VR512:$src1, VR512:$src2)>;
2734 //===----------------------------------------------------------------------===//
2735 // AVX-512 VPTESTM instructions
2736 //===----------------------------------------------------------------------===//
2738 multiclass avx512_vptest<bits<8> opc, string OpcodeStr, RegisterClass KRC,
2739 RegisterClass RC, X86MemOperand x86memop, PatFrag memop_frag,
2740 SDNode OpNode, ValueType vt> {
2741 def rr : AVX512PI<opc, MRMSrcReg,
2742 (outs KRC:$dst), (ins RC:$src1, RC:$src2),
2743 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2744 [(set KRC:$dst, (OpNode (vt RC:$src1), (vt RC:$src2)))],
2745 SSEPackedInt>, EVEX_4V;
2746 def rm : AVX512PI<opc, MRMSrcMem,
2747 (outs KRC:$dst), (ins RC:$src1, x86memop:$src2),
2748 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2749 [(set KRC:$dst, (OpNode (vt RC:$src1),
2750 (bitconvert (memop_frag addr:$src2))))], SSEPackedInt>, EVEX_4V;
2753 defm VPTESTMDZ : avx512_vptest<0x27, "vptestmd", VK16, VR512, f512mem,
2754 memopv16i32, X86testm, v16i32>, T8PD, EVEX_V512,
2755 EVEX_CD8<32, CD8VF>;
2756 defm VPTESTMQZ : avx512_vptest<0x27, "vptestmq", VK8, VR512, f512mem,
2757 memopv8i64, X86testm, v8i64>, T8PD, EVEX_V512, VEX_W,
2758 EVEX_CD8<64, CD8VF>;
2760 let Predicates = [HasCDI] in {
2761 defm VPTESTNMDZ : avx512_vptest<0x27, "vptestnmd", VK16, VR512, f512mem,
2762 memopv16i32, X86testnm, v16i32>, T8XS, EVEX_V512,
2763 EVEX_CD8<32, CD8VF>;
2764 defm VPTESTNMQZ : avx512_vptest<0x27, "vptestnmq", VK8, VR512, f512mem,
2765 memopv8i64, X86testnm, v8i64>, T8XS, EVEX_V512, VEX_W,
2766 EVEX_CD8<64, CD8VF>;
2769 def : Pat <(i16 (int_x86_avx512_mask_ptestm_d_512 (v16i32 VR512:$src1),
2770 (v16i32 VR512:$src2), (i16 -1))),
2771 (COPY_TO_REGCLASS (VPTESTMDZrr VR512:$src1, VR512:$src2), GR16)>;
2773 def : Pat <(i8 (int_x86_avx512_mask_ptestm_q_512 (v8i64 VR512:$src1),
2774 (v8i64 VR512:$src2), (i8 -1))),
2775 (COPY_TO_REGCLASS (VPTESTMQZrr VR512:$src1, VR512:$src2), GR8)>;
2776 //===----------------------------------------------------------------------===//
2777 // AVX-512 Shift instructions
2778 //===----------------------------------------------------------------------===//
2779 multiclass avx512_shift_rmi<bits<8> opc, Format ImmFormR, Format ImmFormM,
2780 string OpcodeStr, SDNode OpNode, RegisterClass RC,
2781 ValueType vt, X86MemOperand x86memop, PatFrag mem_frag,
2782 RegisterClass KRC> {
2783 def ri : AVX512BIi8<opc, ImmFormR, (outs RC:$dst),
2784 (ins RC:$src1, i8imm:$src2),
2785 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2786 [(set RC:$dst, (vt (OpNode RC:$src1, (i8 imm:$src2))))],
2787 SSE_INTSHIFT_ITINS_P.rr>, EVEX_4V;
2788 def rik : AVX512BIi8<opc, ImmFormR, (outs RC:$dst),
2789 (ins KRC:$mask, RC:$src1, i8imm:$src2),
2790 !strconcat(OpcodeStr,
2791 " \t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
2792 [], SSE_INTSHIFT_ITINS_P.rr>, EVEX_4V, EVEX_K;
2793 def mi: AVX512BIi8<opc, ImmFormM, (outs RC:$dst),
2794 (ins x86memop:$src1, i8imm:$src2),
2795 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2796 [(set RC:$dst, (OpNode (mem_frag addr:$src1),
2797 (i8 imm:$src2)))], SSE_INTSHIFT_ITINS_P.rm>, EVEX_4V;
2798 def mik: AVX512BIi8<opc, ImmFormM, (outs RC:$dst),
2799 (ins KRC:$mask, x86memop:$src1, i8imm:$src2),
2800 !strconcat(OpcodeStr,
2801 " \t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
2802 [], SSE_INTSHIFT_ITINS_P.rm>, EVEX_4V, EVEX_K;
2805 multiclass avx512_shift_rrm<bits<8> opc, string OpcodeStr, SDNode OpNode,
2806 RegisterClass RC, ValueType vt, ValueType SrcVT,
2807 PatFrag bc_frag, RegisterClass KRC> {
2808 // src2 is always 128-bit
2809 def rr : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
2810 (ins RC:$src1, VR128X:$src2),
2811 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2812 [(set RC:$dst, (vt (OpNode RC:$src1, (SrcVT VR128X:$src2))))],
2813 SSE_INTSHIFT_ITINS_P.rr>, EVEX_4V;
2814 def rrk : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
2815 (ins KRC:$mask, RC:$src1, VR128X:$src2),
2816 !strconcat(OpcodeStr,
2817 " \t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
2818 [], SSE_INTSHIFT_ITINS_P.rr>, EVEX_4V, EVEX_K;
2819 def rm : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
2820 (ins RC:$src1, i128mem:$src2),
2821 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2822 [(set RC:$dst, (vt (OpNode RC:$src1,
2823 (bc_frag (memopv2i64 addr:$src2)))))],
2824 SSE_INTSHIFT_ITINS_P.rm>, EVEX_4V;
2825 def rmk : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
2826 (ins KRC:$mask, RC:$src1, i128mem:$src2),
2827 !strconcat(OpcodeStr,
2828 " \t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
2829 [], SSE_INTSHIFT_ITINS_P.rm>, EVEX_4V, EVEX_K;
2832 defm VPSRLDZ : avx512_shift_rmi<0x72, MRM2r, MRM2m, "vpsrld", X86vsrli,
2833 VR512, v16i32, i512mem, memopv16i32, VK16WM>,
2834 EVEX_V512, EVEX_CD8<32, CD8VF>;
2835 defm VPSRLDZ : avx512_shift_rrm<0xD2, "vpsrld", X86vsrl,
2836 VR512, v16i32, v4i32, bc_v4i32, VK16WM>, EVEX_V512,
2837 EVEX_CD8<32, CD8VQ>;
2839 defm VPSRLQZ : avx512_shift_rmi<0x73, MRM2r, MRM2m, "vpsrlq", X86vsrli,
2840 VR512, v8i64, i512mem, memopv8i64, VK8WM>, EVEX_V512,
2841 EVEX_CD8<64, CD8VF>, VEX_W;
2842 defm VPSRLQZ : avx512_shift_rrm<0xD3, "vpsrlq", X86vsrl,
2843 VR512, v8i64, v2i64, bc_v2i64, VK8WM>, EVEX_V512,
2844 EVEX_CD8<64, CD8VQ>, VEX_W;
2846 defm VPSLLDZ : avx512_shift_rmi<0x72, MRM6r, MRM6m, "vpslld", X86vshli,
2847 VR512, v16i32, i512mem, memopv16i32, VK16WM>, EVEX_V512,
2848 EVEX_CD8<32, CD8VF>;
2849 defm VPSLLDZ : avx512_shift_rrm<0xF2, "vpslld", X86vshl,
2850 VR512, v16i32, v4i32, bc_v4i32, VK16WM>, EVEX_V512,
2851 EVEX_CD8<32, CD8VQ>;
2853 defm VPSLLQZ : avx512_shift_rmi<0x73, MRM6r, MRM6m, "vpsllq", X86vshli,
2854 VR512, v8i64, i512mem, memopv8i64, VK8WM>, EVEX_V512,
2855 EVEX_CD8<64, CD8VF>, VEX_W;
2856 defm VPSLLQZ : avx512_shift_rrm<0xF3, "vpsllq", X86vshl,
2857 VR512, v8i64, v2i64, bc_v2i64, VK8WM>, EVEX_V512,
2858 EVEX_CD8<64, CD8VQ>, VEX_W;
2860 defm VPSRADZ : avx512_shift_rmi<0x72, MRM4r, MRM4m, "vpsrad", X86vsrai,
2861 VR512, v16i32, i512mem, memopv16i32, VK16WM>,
2862 EVEX_V512, EVEX_CD8<32, CD8VF>;
2863 defm VPSRADZ : avx512_shift_rrm<0xE2, "vpsrad", X86vsra,
2864 VR512, v16i32, v4i32, bc_v4i32, VK16WM>, EVEX_V512,
2865 EVEX_CD8<32, CD8VQ>;
2867 defm VPSRAQZ : avx512_shift_rmi<0x72, MRM4r, MRM4m, "vpsraq", X86vsrai,
2868 VR512, v8i64, i512mem, memopv8i64, VK8WM>, EVEX_V512,
2869 EVEX_CD8<64, CD8VF>, VEX_W;
2870 defm VPSRAQZ : avx512_shift_rrm<0xE2, "vpsraq", X86vsra,
2871 VR512, v8i64, v2i64, bc_v2i64, VK8WM>, EVEX_V512,
2872 EVEX_CD8<64, CD8VQ>, VEX_W;
2874 //===-------------------------------------------------------------------===//
2875 // Variable Bit Shifts
2876 //===-------------------------------------------------------------------===//
2877 multiclass avx512_var_shift<bits<8> opc, string OpcodeStr, SDNode OpNode,
2878 RegisterClass RC, ValueType vt,
2879 X86MemOperand x86memop, PatFrag mem_frag> {
2880 def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
2881 (ins RC:$src1, RC:$src2),
2882 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2884 (vt (OpNode RC:$src1, (vt RC:$src2))))]>,
2886 def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
2887 (ins RC:$src1, x86memop:$src2),
2888 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2890 (vt (OpNode RC:$src1, (mem_frag addr:$src2))))]>,
2894 defm VPSLLVDZ : avx512_var_shift<0x47, "vpsllvd", shl, VR512, v16i32,
2895 i512mem, memopv16i32>, EVEX_V512,
2896 EVEX_CD8<32, CD8VF>;
2897 defm VPSLLVQZ : avx512_var_shift<0x47, "vpsllvq", shl, VR512, v8i64,
2898 i512mem, memopv8i64>, EVEX_V512, VEX_W,
2899 EVEX_CD8<64, CD8VF>;
2900 defm VPSRLVDZ : avx512_var_shift<0x45, "vpsrlvd", srl, VR512, v16i32,
2901 i512mem, memopv16i32>, EVEX_V512,
2902 EVEX_CD8<32, CD8VF>;
2903 defm VPSRLVQZ : avx512_var_shift<0x45, "vpsrlvq", srl, VR512, v8i64,
2904 i512mem, memopv8i64>, EVEX_V512, VEX_W,
2905 EVEX_CD8<64, CD8VF>;
2906 defm VPSRAVDZ : avx512_var_shift<0x46, "vpsravd", sra, VR512, v16i32,
2907 i512mem, memopv16i32>, EVEX_V512,
2908 EVEX_CD8<32, CD8VF>;
2909 defm VPSRAVQZ : avx512_var_shift<0x46, "vpsravq", sra, VR512, v8i64,
2910 i512mem, memopv8i64>, EVEX_V512, VEX_W,
2911 EVEX_CD8<64, CD8VF>;
2913 //===----------------------------------------------------------------------===//
2914 // AVX-512 - MOVDDUP
2915 //===----------------------------------------------------------------------===//
2917 multiclass avx512_movddup<string OpcodeStr, RegisterClass RC, ValueType VT,
2918 X86MemOperand x86memop, PatFrag memop_frag> {
2919 def rr : AVX512PDI<0x12, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
2920 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
2921 [(set RC:$dst, (VT (X86Movddup RC:$src)))]>, EVEX;
2922 def rm : AVX512PDI<0x12, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
2923 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
2925 (VT (X86Movddup (memop_frag addr:$src))))]>, EVEX;
2928 defm VMOVDDUPZ : avx512_movddup<"vmovddup", VR512, v8f64, f512mem, memopv8f64>,
2929 VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
2930 def : Pat<(X86Movddup (v8f64 (scalar_to_vector (loadf64 addr:$src)))),
2931 (VMOVDDUPZrm addr:$src)>;
2933 //===---------------------------------------------------------------------===//
2934 // Replicate Single FP - MOVSHDUP and MOVSLDUP
2935 //===---------------------------------------------------------------------===//
2936 multiclass avx512_replicate_sfp<bits<8> op, SDNode OpNode, string OpcodeStr,
2937 ValueType vt, RegisterClass RC, PatFrag mem_frag,
2938 X86MemOperand x86memop> {
2939 def rr : AVX512XSI<op, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
2940 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
2941 [(set RC:$dst, (vt (OpNode RC:$src)))]>, EVEX;
2943 def rm : AVX512XSI<op, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
2944 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
2945 [(set RC:$dst, (OpNode (mem_frag addr:$src)))]>, EVEX;
2948 defm VMOVSHDUPZ : avx512_replicate_sfp<0x16, X86Movshdup, "vmovshdup",
2949 v16f32, VR512, memopv16f32, f512mem>, EVEX_V512,
2950 EVEX_CD8<32, CD8VF>;
2951 defm VMOVSLDUPZ : avx512_replicate_sfp<0x12, X86Movsldup, "vmovsldup",
2952 v16f32, VR512, memopv16f32, f512mem>, EVEX_V512,
2953 EVEX_CD8<32, CD8VF>;
2955 def : Pat<(v16i32 (X86Movshdup VR512:$src)), (VMOVSHDUPZrr VR512:$src)>;
2956 def : Pat<(v16i32 (X86Movshdup (memopv16i32 addr:$src))),
2957 (VMOVSHDUPZrm addr:$src)>;
2958 def : Pat<(v16i32 (X86Movsldup VR512:$src)), (VMOVSLDUPZrr VR512:$src)>;
2959 def : Pat<(v16i32 (X86Movsldup (memopv16i32 addr:$src))),
2960 (VMOVSLDUPZrm addr:$src)>;
2962 //===----------------------------------------------------------------------===//
2963 // Move Low to High and High to Low packed FP Instructions
2964 //===----------------------------------------------------------------------===//
2965 def VMOVLHPSZrr : AVX512PSI<0x16, MRMSrcReg, (outs VR128X:$dst),
2966 (ins VR128X:$src1, VR128X:$src2),
2967 "vmovlhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2968 [(set VR128X:$dst, (v4f32 (X86Movlhps VR128X:$src1, VR128X:$src2)))],
2969 IIC_SSE_MOV_LH>, EVEX_4V;
2970 def VMOVHLPSZrr : AVX512PSI<0x12, MRMSrcReg, (outs VR128X:$dst),
2971 (ins VR128X:$src1, VR128X:$src2),
2972 "vmovhlps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2973 [(set VR128X:$dst, (v4f32 (X86Movhlps VR128X:$src1, VR128X:$src2)))],
2974 IIC_SSE_MOV_LH>, EVEX_4V;
2976 let Predicates = [HasAVX512] in {
2978 def : Pat<(v4i32 (X86Movlhps VR128X:$src1, VR128X:$src2)),
2979 (VMOVLHPSZrr VR128X:$src1, VR128X:$src2)>;
2980 def : Pat<(v2i64 (X86Movlhps VR128X:$src1, VR128X:$src2)),
2981 (VMOVLHPSZrr (v2i64 VR128X:$src1), VR128X:$src2)>;
2984 def : Pat<(v4i32 (X86Movhlps VR128X:$src1, VR128X:$src2)),
2985 (VMOVHLPSZrr VR128X:$src1, VR128X:$src2)>;
2988 //===----------------------------------------------------------------------===//
2989 // FMA - Fused Multiply Operations
2991 let Constraints = "$src1 = $dst" in {
2992 multiclass avx512_fma3p_rm<bits<8> opc, string OpcodeStr,
2993 RegisterClass RC, X86MemOperand x86memop,
2994 PatFrag mem_frag, X86MemOperand x86scalar_mop, PatFrag scalar_mfrag,
2995 string BrdcstStr, SDNode OpNode, ValueType OpVT,
2996 RegisterClass KRC> {
2997 defm r: AVX512_masking_3src<opc, MRMSrcReg, (outs RC:$dst),
2998 (ins RC:$src2, RC:$src3),
2999 OpcodeStr, "$src3, $src2", "$src2, $src3",
3000 (OpVT (OpNode RC:$src1, RC:$src2, RC:$src3)), OpVT, RC, KRC>,
3004 def m: AVX512FMA3<opc, MRMSrcMem, (outs RC:$dst),
3005 (ins RC:$src1, RC:$src2, x86memop:$src3),
3006 !strconcat(OpcodeStr, " \t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3007 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2,
3008 (mem_frag addr:$src3))))]>;
3009 def mb: AVX512FMA3<opc, MRMSrcMem, (outs RC:$dst),
3010 (ins RC:$src1, RC:$src2, x86scalar_mop:$src3),
3011 !strconcat(OpcodeStr, " \t{${src3}", BrdcstStr,
3012 ", $src2, $dst|$dst, $src2, ${src3}", BrdcstStr, "}"),
3013 [(set RC:$dst, (OpNode RC:$src1, RC:$src2,
3014 (OpVT (X86VBroadcast (scalar_mfrag addr:$src3)))))]>, EVEX_B;
3016 } // Constraints = "$src1 = $dst"
3018 let ExeDomain = SSEPackedSingle in {
3019 defm VFMADD213PSZ : avx512_fma3p_rm<0xA8, "vfmadd213ps", VR512, f512mem,
3020 memopv16f32, f32mem, loadf32, "{1to16}",
3021 X86Fmadd, v16f32, VK16WM>, EVEX_V512,
3022 EVEX_CD8<32, CD8VF>;
3023 defm VFMSUB213PSZ : avx512_fma3p_rm<0xAA, "vfmsub213ps", VR512, f512mem,
3024 memopv16f32, f32mem, loadf32, "{1to16}",
3025 X86Fmsub, v16f32, VK16WM>, EVEX_V512,
3026 EVEX_CD8<32, CD8VF>;
3027 defm VFMADDSUB213PSZ : avx512_fma3p_rm<0xA6, "vfmaddsub213ps", VR512, f512mem,
3028 memopv16f32, f32mem, loadf32, "{1to16}",
3029 X86Fmaddsub, v16f32, VK16WM>,
3030 EVEX_V512, EVEX_CD8<32, CD8VF>;
3031 defm VFMSUBADD213PSZ : avx512_fma3p_rm<0xA7, "vfmsubadd213ps", VR512, f512mem,
3032 memopv16f32, f32mem, loadf32, "{1to16}",
3033 X86Fmsubadd, v16f32, VK16WM>,
3034 EVEX_V512, EVEX_CD8<32, CD8VF>;
3035 defm VFNMADD213PSZ : avx512_fma3p_rm<0xAC, "vfnmadd213ps", VR512, f512mem,
3036 memopv16f32, f32mem, loadf32, "{1to16}",
3037 X86Fnmadd, v16f32, VK16WM>, EVEX_V512,
3038 EVEX_CD8<32, CD8VF>;
3039 defm VFNMSUB213PSZ : avx512_fma3p_rm<0xAE, "vfnmsub213ps", VR512, f512mem,
3040 memopv16f32, f32mem, loadf32, "{1to16}",
3041 X86Fnmsub, v16f32, VK16WM>, EVEX_V512,
3042 EVEX_CD8<32, CD8VF>;
3044 let ExeDomain = SSEPackedDouble in {
3045 defm VFMADD213PDZ : avx512_fma3p_rm<0xA8, "vfmadd213pd", VR512, f512mem,
3046 memopv8f64, f64mem, loadf64, "{1to8}",
3047 X86Fmadd, v8f64, VK8WM>, EVEX_V512,
3048 VEX_W, EVEX_CD8<64, CD8VF>;
3049 defm VFMSUB213PDZ : avx512_fma3p_rm<0xAA, "vfmsub213pd", VR512, f512mem,
3050 memopv8f64, f64mem, loadf64, "{1to8}",
3051 X86Fmsub, v8f64, VK8WM>, EVEX_V512, VEX_W,
3052 EVEX_CD8<64, CD8VF>;
3053 defm VFMADDSUB213PDZ : avx512_fma3p_rm<0xA6, "vfmaddsub213pd", VR512, f512mem,
3054 memopv8f64, f64mem, loadf64, "{1to8}",
3055 X86Fmaddsub, v8f64, VK8WM>,
3056 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
3057 defm VFMSUBADD213PDZ : avx512_fma3p_rm<0xA7, "vfmsubadd213pd", VR512, f512mem,
3058 memopv8f64, f64mem, loadf64, "{1to8}",
3059 X86Fmsubadd, v8f64, VK8WM>,
3060 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
3061 defm VFNMADD213PDZ : avx512_fma3p_rm<0xAC, "vfnmadd213pd", VR512, f512mem,
3062 memopv8f64, f64mem, loadf64, "{1to8}",
3063 X86Fnmadd, v8f64, VK8WM>, EVEX_V512, VEX_W,
3064 EVEX_CD8<64, CD8VF>;
3065 defm VFNMSUB213PDZ : avx512_fma3p_rm<0xAE, "vfnmsub213pd", VR512, f512mem,
3066 memopv8f64, f64mem, loadf64, "{1to8}",
3067 X86Fnmsub, v8f64, VK8WM>, EVEX_V512, VEX_W,
3068 EVEX_CD8<64, CD8VF>;
3071 let Constraints = "$src1 = $dst" in {
3072 multiclass avx512_fma3p_m132<bits<8> opc, string OpcodeStr,
3073 RegisterClass RC, X86MemOperand x86memop,
3074 PatFrag mem_frag, X86MemOperand x86scalar_mop, PatFrag scalar_mfrag,
3075 string BrdcstStr, SDNode OpNode, ValueType OpVT> {
3077 def m: AVX512FMA3<opc, MRMSrcMem, (outs RC:$dst),
3078 (ins RC:$src1, RC:$src3, x86memop:$src2),
3079 !strconcat(OpcodeStr, " \t{$src2, $src3, $dst|$dst, $src3, $src2}"),
3080 [(set RC:$dst, (OpVT (OpNode RC:$src1, (mem_frag addr:$src2), RC:$src3)))]>;
3081 def mb: AVX512FMA3<opc, MRMSrcMem, (outs RC:$dst),
3082 (ins RC:$src1, RC:$src3, x86scalar_mop:$src2),
3083 !strconcat(OpcodeStr, " \t{${src2}", BrdcstStr,
3084 ", $src3, $dst|$dst, $src3, ${src2}", BrdcstStr, "}"),
3085 [(set RC:$dst, (OpNode RC:$src1,
3086 (OpVT (X86VBroadcast (scalar_mfrag addr:$src2))), RC:$src3))]>, EVEX_B;
3088 } // Constraints = "$src1 = $dst"
3091 let ExeDomain = SSEPackedSingle in {
3092 defm VFMADD132PSZ : avx512_fma3p_m132<0x98, "vfmadd132ps", VR512, f512mem,
3093 memopv16f32, f32mem, loadf32, "{1to16}",
3094 X86Fmadd, v16f32>, EVEX_V512,
3095 EVEX_CD8<32, CD8VF>;
3096 defm VFMSUB132PSZ : avx512_fma3p_m132<0x9A, "vfmsub132ps", VR512, f512mem,
3097 memopv16f32, f32mem, loadf32, "{1to16}",
3098 X86Fmsub, v16f32>, EVEX_V512,
3099 EVEX_CD8<32, CD8VF>;
3100 defm VFMADDSUB132PSZ : avx512_fma3p_m132<0x96, "vfmaddsub132ps", VR512, f512mem,
3101 memopv16f32, f32mem, loadf32, "{1to16}",
3102 X86Fmaddsub, v16f32>,
3103 EVEX_V512, EVEX_CD8<32, CD8VF>;
3104 defm VFMSUBADD132PSZ : avx512_fma3p_m132<0x97, "vfmsubadd132ps", VR512, f512mem,
3105 memopv16f32, f32mem, loadf32, "{1to16}",
3106 X86Fmsubadd, v16f32>,
3107 EVEX_V512, EVEX_CD8<32, CD8VF>;
3108 defm VFNMADD132PSZ : avx512_fma3p_m132<0x9C, "vfnmadd132ps", VR512, f512mem,
3109 memopv16f32, f32mem, loadf32, "{1to16}",
3110 X86Fnmadd, v16f32>, EVEX_V512,
3111 EVEX_CD8<32, CD8VF>;
3112 defm VFNMSUB132PSZ : avx512_fma3p_m132<0x9E, "vfnmsub132ps", VR512, f512mem,
3113 memopv16f32, f32mem, loadf32, "{1to16}",
3114 X86Fnmsub, v16f32>, EVEX_V512,
3115 EVEX_CD8<32, CD8VF>;
3117 let ExeDomain = SSEPackedDouble in {
3118 defm VFMADD132PDZ : avx512_fma3p_m132<0x98, "vfmadd132pd", VR512, f512mem,
3119 memopv8f64, f64mem, loadf64, "{1to8}",
3120 X86Fmadd, v8f64>, EVEX_V512,
3121 VEX_W, EVEX_CD8<64, CD8VF>;
3122 defm VFMSUB132PDZ : avx512_fma3p_m132<0x9A, "vfmsub132pd", VR512, f512mem,
3123 memopv8f64, f64mem, loadf64, "{1to8}",
3124 X86Fmsub, v8f64>, EVEX_V512, VEX_W,
3125 EVEX_CD8<64, CD8VF>;
3126 defm VFMADDSUB132PDZ : avx512_fma3p_m132<0x96, "vfmaddsub132pd", VR512, f512mem,
3127 memopv8f64, f64mem, loadf64, "{1to8}",
3128 X86Fmaddsub, v8f64>, EVEX_V512, VEX_W,
3129 EVEX_CD8<64, CD8VF>;
3130 defm VFMSUBADD132PDZ : avx512_fma3p_m132<0x97, "vfmsubadd132pd", VR512, f512mem,
3131 memopv8f64, f64mem, loadf64, "{1to8}",
3132 X86Fmsubadd, v8f64>, EVEX_V512, VEX_W,
3133 EVEX_CD8<64, CD8VF>;
3134 defm VFNMADD132PDZ : avx512_fma3p_m132<0x9C, "vfnmadd132pd", VR512, f512mem,
3135 memopv8f64, f64mem, loadf64, "{1to8}",
3136 X86Fnmadd, v8f64>, EVEX_V512, VEX_W,
3137 EVEX_CD8<64, CD8VF>;
3138 defm VFNMSUB132PDZ : avx512_fma3p_m132<0x9E, "vfnmsub132pd", VR512, f512mem,
3139 memopv8f64, f64mem, loadf64, "{1to8}",
3140 X86Fnmsub, v8f64>, EVEX_V512, VEX_W,
3141 EVEX_CD8<64, CD8VF>;
3145 let Constraints = "$src1 = $dst" in {
3146 multiclass avx512_fma3s_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
3147 RegisterClass RC, ValueType OpVT,
3148 X86MemOperand x86memop, Operand memop,
3150 let isCommutable = 1 in
3151 def r : AVX512FMA3<opc, MRMSrcReg, (outs RC:$dst),
3152 (ins RC:$src1, RC:$src2, RC:$src3),
3153 !strconcat(OpcodeStr,
3154 " \t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3156 (OpVT (OpNode RC:$src2, RC:$src1, RC:$src3)))]>;
3158 def m : AVX512FMA3<opc, MRMSrcMem, (outs RC:$dst),
3159 (ins RC:$src1, RC:$src2, f128mem:$src3),
3160 !strconcat(OpcodeStr,
3161 " \t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3163 (OpVT (OpNode RC:$src2, RC:$src1,
3164 (mem_frag addr:$src3))))]>;
3167 } // Constraints = "$src1 = $dst"
3169 defm VFMADDSSZ : avx512_fma3s_rm<0xA9, "vfmadd213ss", X86Fmadd, FR32X,
3170 f32, f32mem, ssmem, loadf32>, EVEX_CD8<32, CD8VT1>;
3171 defm VFMADDSDZ : avx512_fma3s_rm<0xA9, "vfmadd213sd", X86Fmadd, FR64X,
3172 f64, f64mem, sdmem, loadf64>, VEX_W, EVEX_CD8<64, CD8VT1>;
3173 defm VFMSUBSSZ : avx512_fma3s_rm<0xAB, "vfmsub213ss", X86Fmsub, FR32X,
3174 f32, f32mem, ssmem, loadf32>, EVEX_CD8<32, CD8VT1>;
3175 defm VFMSUBSDZ : avx512_fma3s_rm<0xAB, "vfmsub213sd", X86Fmsub, FR64X,
3176 f64, f64mem, sdmem, loadf64>, VEX_W, EVEX_CD8<64, CD8VT1>;
3177 defm VFNMADDSSZ : avx512_fma3s_rm<0xAD, "vfnmadd213ss", X86Fnmadd, FR32X,
3178 f32, f32mem, ssmem, loadf32>, EVEX_CD8<32, CD8VT1>;
3179 defm VFNMADDSDZ : avx512_fma3s_rm<0xAD, "vfnmadd213sd", X86Fnmadd, FR64X,
3180 f64, f64mem, sdmem, loadf64>, VEX_W, EVEX_CD8<64, CD8VT1>;
3181 defm VFNMSUBSSZ : avx512_fma3s_rm<0xAF, "vfnmsub213ss", X86Fnmsub, FR32X,
3182 f32, f32mem, ssmem, loadf32>, EVEX_CD8<32, CD8VT1>;
3183 defm VFNMSUBSDZ : avx512_fma3s_rm<0xAF, "vfnmsub213sd", X86Fnmsub, FR64X,
3184 f64, f64mem, sdmem, loadf64>, VEX_W, EVEX_CD8<64, CD8VT1>;
3186 //===----------------------------------------------------------------------===//
3187 // AVX-512 Scalar convert from sign integer to float/double
3188 //===----------------------------------------------------------------------===//
3190 multiclass avx512_vcvtsi<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
3191 X86MemOperand x86memop, string asm> {
3192 let hasSideEffects = 0 in {
3193 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src),
3194 !strconcat(asm," \t{$src, $src1, $dst|$dst, $src1, $src}"), []>,
3197 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
3198 (ins DstRC:$src1, x86memop:$src),
3199 !strconcat(asm," \t{$src, $src1, $dst|$dst, $src1, $src}"), []>,
3201 } // hasSideEffects = 0
3203 let Predicates = [HasAVX512] in {
3204 defm VCVTSI2SSZ : avx512_vcvtsi<0x2A, GR32, FR32X, i32mem, "cvtsi2ss{l}">,
3205 XS, VEX_LIG, EVEX_CD8<32, CD8VT1>;
3206 defm VCVTSI642SSZ : avx512_vcvtsi<0x2A, GR64, FR32X, i64mem, "cvtsi2ss{q}">,
3207 XS, VEX_W, VEX_LIG, EVEX_CD8<64, CD8VT1>;
3208 defm VCVTSI2SDZ : avx512_vcvtsi<0x2A, GR32, FR64X, i32mem, "cvtsi2sd{l}">,
3209 XD, VEX_LIG, EVEX_CD8<32, CD8VT1>;
3210 defm VCVTSI642SDZ : avx512_vcvtsi<0x2A, GR64, FR64X, i64mem, "cvtsi2sd{q}">,
3211 XD, VEX_W, VEX_LIG, EVEX_CD8<64, CD8VT1>;
3213 def : Pat<(f32 (sint_to_fp (loadi32 addr:$src))),
3214 (VCVTSI2SSZrm (f32 (IMPLICIT_DEF)), addr:$src)>;
3215 def : Pat<(f32 (sint_to_fp (loadi64 addr:$src))),
3216 (VCVTSI642SSZrm (f32 (IMPLICIT_DEF)), addr:$src)>;
3217 def : Pat<(f64 (sint_to_fp (loadi32 addr:$src))),
3218 (VCVTSI2SDZrm (f64 (IMPLICIT_DEF)), addr:$src)>;
3219 def : Pat<(f64 (sint_to_fp (loadi64 addr:$src))),
3220 (VCVTSI642SDZrm (f64 (IMPLICIT_DEF)), addr:$src)>;
3222 def : Pat<(f32 (sint_to_fp GR32:$src)),
3223 (VCVTSI2SSZrr (f32 (IMPLICIT_DEF)), GR32:$src)>;
3224 def : Pat<(f32 (sint_to_fp GR64:$src)),
3225 (VCVTSI642SSZrr (f32 (IMPLICIT_DEF)), GR64:$src)>;
3226 def : Pat<(f64 (sint_to_fp GR32:$src)),
3227 (VCVTSI2SDZrr (f64 (IMPLICIT_DEF)), GR32:$src)>;
3228 def : Pat<(f64 (sint_to_fp GR64:$src)),
3229 (VCVTSI642SDZrr (f64 (IMPLICIT_DEF)), GR64:$src)>;
3231 defm VCVTUSI2SSZ : avx512_vcvtsi<0x7B, GR32, FR32X, i32mem, "cvtusi2ss{l}">,
3232 XS, VEX_LIG, EVEX_CD8<32, CD8VT1>;
3233 defm VCVTUSI642SSZ : avx512_vcvtsi<0x7B, GR64, FR32X, i64mem, "cvtusi2ss{q}">,
3234 XS, VEX_W, VEX_LIG, EVEX_CD8<64, CD8VT1>;
3235 defm VCVTUSI2SDZ : avx512_vcvtsi<0x7B, GR32, FR64X, i32mem, "cvtusi2sd{l}">,
3236 XD, VEX_LIG, EVEX_CD8<32, CD8VT1>;
3237 defm VCVTUSI642SDZ : avx512_vcvtsi<0x7B, GR64, FR64X, i64mem, "cvtusi2sd{q}">,
3238 XD, VEX_W, VEX_LIG, EVEX_CD8<64, CD8VT1>;
3240 def : Pat<(f32 (uint_to_fp (loadi32 addr:$src))),
3241 (VCVTUSI2SSZrm (f32 (IMPLICIT_DEF)), addr:$src)>;
3242 def : Pat<(f32 (uint_to_fp (loadi64 addr:$src))),
3243 (VCVTUSI642SSZrm (f32 (IMPLICIT_DEF)), addr:$src)>;
3244 def : Pat<(f64 (uint_to_fp (loadi32 addr:$src))),
3245 (VCVTUSI2SDZrm (f64 (IMPLICIT_DEF)), addr:$src)>;
3246 def : Pat<(f64 (uint_to_fp (loadi64 addr:$src))),
3247 (VCVTUSI642SDZrm (f64 (IMPLICIT_DEF)), addr:$src)>;
3249 def : Pat<(f32 (uint_to_fp GR32:$src)),
3250 (VCVTUSI2SSZrr (f32 (IMPLICIT_DEF)), GR32:$src)>;
3251 def : Pat<(f32 (uint_to_fp GR64:$src)),
3252 (VCVTUSI642SSZrr (f32 (IMPLICIT_DEF)), GR64:$src)>;
3253 def : Pat<(f64 (uint_to_fp GR32:$src)),
3254 (VCVTUSI2SDZrr (f64 (IMPLICIT_DEF)), GR32:$src)>;
3255 def : Pat<(f64 (uint_to_fp GR64:$src)),
3256 (VCVTUSI642SDZrr (f64 (IMPLICIT_DEF)), GR64:$src)>;
3259 //===----------------------------------------------------------------------===//
3260 // AVX-512 Scalar convert from float/double to integer
3261 //===----------------------------------------------------------------------===//
3262 multiclass avx512_cvt_s_int<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
3263 Intrinsic Int, Operand memop, ComplexPattern mem_cpat,
3265 let hasSideEffects = 0 in {
3266 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
3267 !strconcat(asm," \t{$src, $dst|$dst, $src}"),
3268 [(set DstRC:$dst, (Int SrcRC:$src))]>, EVEX, VEX_LIG,
3269 Requires<[HasAVX512]>;
3271 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins memop:$src),
3272 !strconcat(asm," \t{$src, $dst|$dst, $src}"), []>, EVEX, VEX_LIG,
3273 Requires<[HasAVX512]>;
3274 } // hasSideEffects = 0
3276 let Predicates = [HasAVX512] in {
3277 // Convert float/double to signed/unsigned int 32/64
3278 defm VCVTSS2SIZ: avx512_cvt_s_int<0x2D, VR128X, GR32, int_x86_sse_cvtss2si,
3279 ssmem, sse_load_f32, "cvtss2si">,
3280 XS, EVEX_CD8<32, CD8VT1>;
3281 defm VCVTSS2SI64Z: avx512_cvt_s_int<0x2D, VR128X, GR64, int_x86_sse_cvtss2si64,
3282 ssmem, sse_load_f32, "cvtss2si">,
3283 XS, VEX_W, EVEX_CD8<32, CD8VT1>;
3284 defm VCVTSS2USIZ: avx512_cvt_s_int<0x79, VR128X, GR32, int_x86_avx512_cvtss2usi,
3285 ssmem, sse_load_f32, "cvtss2usi">,
3286 XS, EVEX_CD8<32, CD8VT1>;
3287 defm VCVTSS2USI64Z: avx512_cvt_s_int<0x79, VR128X, GR64,
3288 int_x86_avx512_cvtss2usi64, ssmem,
3289 sse_load_f32, "cvtss2usi">, XS, VEX_W,
3290 EVEX_CD8<32, CD8VT1>;
3291 defm VCVTSD2SIZ: avx512_cvt_s_int<0x2D, VR128X, GR32, int_x86_sse2_cvtsd2si,
3292 sdmem, sse_load_f64, "cvtsd2si">,
3293 XD, EVEX_CD8<64, CD8VT1>;
3294 defm VCVTSD2SI64Z: avx512_cvt_s_int<0x2D, VR128X, GR64, int_x86_sse2_cvtsd2si64,
3295 sdmem, sse_load_f64, "cvtsd2si">,
3296 XD, VEX_W, EVEX_CD8<64, CD8VT1>;
3297 defm VCVTSD2USIZ: avx512_cvt_s_int<0x79, VR128X, GR32, int_x86_avx512_cvtsd2usi,
3298 sdmem, sse_load_f64, "cvtsd2usi">,
3299 XD, EVEX_CD8<64, CD8VT1>;
3300 defm VCVTSD2USI64Z: avx512_cvt_s_int<0x79, VR128X, GR64,
3301 int_x86_avx512_cvtsd2usi64, sdmem,
3302 sse_load_f64, "cvtsd2usi">, XD, VEX_W,
3303 EVEX_CD8<64, CD8VT1>;
3305 let isCodeGenOnly = 1 in {
3306 defm Int_VCVTSI2SSZ : sse12_cvt_sint_3addr<0x2A, GR32, VR128X,
3307 int_x86_sse_cvtsi2ss, i32mem, loadi32, "cvtsi2ss{l}",
3308 SSE_CVT_Scalar, 0>, XS, EVEX_4V;
3309 defm Int_VCVTSI2SS64Z : sse12_cvt_sint_3addr<0x2A, GR64, VR128X,
3310 int_x86_sse_cvtsi642ss, i64mem, loadi64, "cvtsi2ss{q}",
3311 SSE_CVT_Scalar, 0>, XS, EVEX_4V, VEX_W;
3312 defm Int_VCVTSI2SDZ : sse12_cvt_sint_3addr<0x2A, GR32, VR128X,
3313 int_x86_sse2_cvtsi2sd, i32mem, loadi32, "cvtsi2sd{l}",
3314 SSE_CVT_Scalar, 0>, XD, EVEX_4V;
3315 defm Int_VCVTSI2SD64Z : sse12_cvt_sint_3addr<0x2A, GR64, VR128X,
3316 int_x86_sse2_cvtsi642sd, i64mem, loadi64, "cvtsi2sd{q}",
3317 SSE_CVT_Scalar, 0>, XD, EVEX_4V, VEX_W;
3319 defm Int_VCVTUSI2SSZ : sse12_cvt_sint_3addr<0x2A, GR32, VR128X,
3320 int_x86_avx512_cvtusi2ss, i32mem, loadi32, "cvtusi2ss{l}",
3321 SSE_CVT_Scalar, 0>, XS, EVEX_4V;
3322 defm Int_VCVTUSI2SS64Z : sse12_cvt_sint_3addr<0x2A, GR64, VR128X,
3323 int_x86_avx512_cvtusi642ss, i64mem, loadi64, "cvtusi2ss{q}",
3324 SSE_CVT_Scalar, 0>, XS, EVEX_4V, VEX_W;
3325 defm Int_VCVTUSI2SDZ : sse12_cvt_sint_3addr<0x2A, GR32, VR128X,
3326 int_x86_avx512_cvtusi2sd, i32mem, loadi32, "cvtusi2sd{l}",
3327 SSE_CVT_Scalar, 0>, XD, EVEX_4V;
3328 defm Int_VCVTUSI2SD64Z : sse12_cvt_sint_3addr<0x2A, GR64, VR128X,
3329 int_x86_avx512_cvtusi642sd, i64mem, loadi64, "cvtusi2sd{q}",
3330 SSE_CVT_Scalar, 0>, XD, EVEX_4V, VEX_W;
3331 } // isCodeGenOnly = 1
3333 // Convert float/double to signed/unsigned int 32/64 with truncation
3334 let isCodeGenOnly = 1 in {
3335 defm Int_VCVTTSS2SIZ : avx512_cvt_s_int<0x2C, VR128X, GR32, int_x86_sse_cvttss2si,
3336 ssmem, sse_load_f32, "cvttss2si">,
3337 XS, EVEX_CD8<32, CD8VT1>;
3338 defm Int_VCVTTSS2SI64Z : avx512_cvt_s_int<0x2C, VR128X, GR64,
3339 int_x86_sse_cvttss2si64, ssmem, sse_load_f32,
3340 "cvttss2si">, XS, VEX_W,
3341 EVEX_CD8<32, CD8VT1>;
3342 defm Int_VCVTTSD2SIZ : avx512_cvt_s_int<0x2C, VR128X, GR32, int_x86_sse2_cvttsd2si,
3343 sdmem, sse_load_f64, "cvttsd2si">, XD,
3344 EVEX_CD8<64, CD8VT1>;
3345 defm Int_VCVTTSD2SI64Z : avx512_cvt_s_int<0x2C, VR128X, GR64,
3346 int_x86_sse2_cvttsd2si64, sdmem, sse_load_f64,
3347 "cvttsd2si">, XD, VEX_W,
3348 EVEX_CD8<64, CD8VT1>;
3349 defm Int_VCVTTSS2USIZ : avx512_cvt_s_int<0x78, VR128X, GR32,
3350 int_x86_avx512_cvttss2usi, ssmem, sse_load_f32,
3351 "cvttss2usi">, XS, EVEX_CD8<32, CD8VT1>;
3352 defm Int_VCVTTSS2USI64Z : avx512_cvt_s_int<0x78, VR128X, GR64,
3353 int_x86_avx512_cvttss2usi64, ssmem,
3354 sse_load_f32, "cvttss2usi">, XS, VEX_W,
3355 EVEX_CD8<32, CD8VT1>;
3356 defm Int_VCVTTSD2USIZ : avx512_cvt_s_int<0x78, VR128X, GR32,
3357 int_x86_avx512_cvttsd2usi,
3358 sdmem, sse_load_f64, "cvttsd2usi">, XD,
3359 EVEX_CD8<64, CD8VT1>;
3360 defm Int_VCVTTSD2USI64Z : avx512_cvt_s_int<0x78, VR128X, GR64,
3361 int_x86_avx512_cvttsd2usi64, sdmem,
3362 sse_load_f64, "cvttsd2usi">, XD, VEX_W,
3363 EVEX_CD8<64, CD8VT1>;
3364 } // isCodeGenOnly = 1
3366 multiclass avx512_cvt_s<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
3367 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
3369 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
3370 !strconcat(asm," \t{$src, $dst|$dst, $src}"),
3371 [(set DstRC:$dst, (OpNode SrcRC:$src))]>, EVEX;
3372 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
3373 !strconcat(asm," \t{$src, $dst|$dst, $src}"),
3374 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))]>, EVEX;
3377 defm VCVTTSS2SIZ : avx512_cvt_s<0x2C, FR32X, GR32, fp_to_sint, f32mem,
3378 loadf32, "cvttss2si">, XS,
3379 EVEX_CD8<32, CD8VT1>;
3380 defm VCVTTSS2USIZ : avx512_cvt_s<0x78, FR32X, GR32, fp_to_uint, f32mem,
3381 loadf32, "cvttss2usi">, XS,
3382 EVEX_CD8<32, CD8VT1>;
3383 defm VCVTTSS2SI64Z : avx512_cvt_s<0x2C, FR32X, GR64, fp_to_sint, f32mem,
3384 loadf32, "cvttss2si">, XS, VEX_W,
3385 EVEX_CD8<32, CD8VT1>;
3386 defm VCVTTSS2USI64Z : avx512_cvt_s<0x78, FR32X, GR64, fp_to_uint, f32mem,
3387 loadf32, "cvttss2usi">, XS, VEX_W,
3388 EVEX_CD8<32, CD8VT1>;
3389 defm VCVTTSD2SIZ : avx512_cvt_s<0x2C, FR64X, GR32, fp_to_sint, f64mem,
3390 loadf64, "cvttsd2si">, XD,
3391 EVEX_CD8<64, CD8VT1>;
3392 defm VCVTTSD2USIZ : avx512_cvt_s<0x78, FR64X, GR32, fp_to_uint, f64mem,
3393 loadf64, "cvttsd2usi">, XD,
3394 EVEX_CD8<64, CD8VT1>;
3395 defm VCVTTSD2SI64Z : avx512_cvt_s<0x2C, FR64X, GR64, fp_to_sint, f64mem,
3396 loadf64, "cvttsd2si">, XD, VEX_W,
3397 EVEX_CD8<64, CD8VT1>;
3398 defm VCVTTSD2USI64Z : avx512_cvt_s<0x78, FR64X, GR64, fp_to_uint, f64mem,
3399 loadf64, "cvttsd2usi">, XD, VEX_W,
3400 EVEX_CD8<64, CD8VT1>;
3402 //===----------------------------------------------------------------------===//
3403 // AVX-512 Convert form float to double and back
3404 //===----------------------------------------------------------------------===//
3405 let hasSideEffects = 0 in {
3406 def VCVTSS2SDZrr : AVX512XSI<0x5A, MRMSrcReg, (outs FR64X:$dst),
3407 (ins FR32X:$src1, FR32X:$src2),
3408 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3409 []>, EVEX_4V, VEX_LIG, Sched<[WriteCvtF2F]>;
3411 def VCVTSS2SDZrm : AVX512XSI<0x5A, MRMSrcMem, (outs FR64X:$dst),
3412 (ins FR32X:$src1, f32mem:$src2),
3413 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3414 []>, EVEX_4V, VEX_LIG, Sched<[WriteCvtF2FLd, ReadAfterLd]>,
3415 EVEX_CD8<32, CD8VT1>;
3417 // Convert scalar double to scalar single
3418 def VCVTSD2SSZrr : AVX512XDI<0x5A, MRMSrcReg, (outs FR32X:$dst),
3419 (ins FR64X:$src1, FR64X:$src2),
3420 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3421 []>, EVEX_4V, VEX_LIG, VEX_W, Sched<[WriteCvtF2F]>;
3423 def VCVTSD2SSZrm : AVX512XDI<0x5A, MRMSrcMem, (outs FR32X:$dst),
3424 (ins FR64X:$src1, f64mem:$src2),
3425 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3426 []>, EVEX_4V, VEX_LIG, VEX_W,
3427 Sched<[WriteCvtF2FLd, ReadAfterLd]>, EVEX_CD8<64, CD8VT1>;
3430 def : Pat<(f64 (fextend FR32X:$src)), (VCVTSS2SDZrr FR32X:$src, FR32X:$src)>,
3431 Requires<[HasAVX512]>;
3432 def : Pat<(fextend (loadf32 addr:$src)),
3433 (VCVTSS2SDZrm (f32 (IMPLICIT_DEF)), addr:$src)>, Requires<[HasAVX512]>;
3435 def : Pat<(extloadf32 addr:$src),
3436 (VCVTSS2SDZrm (f32 (IMPLICIT_DEF)), addr:$src)>,
3437 Requires<[HasAVX512, OptForSize]>;
3439 def : Pat<(extloadf32 addr:$src),
3440 (VCVTSS2SDZrr (f32 (IMPLICIT_DEF)), (VMOVSSZrm addr:$src))>,
3441 Requires<[HasAVX512, OptForSpeed]>;
3443 def : Pat<(f32 (fround FR64X:$src)), (VCVTSD2SSZrr FR64X:$src, FR64X:$src)>,
3444 Requires<[HasAVX512]>;
3446 multiclass avx512_vcvt_fp_with_rc<bits<8> opc, string asm, RegisterClass SrcRC,
3447 RegisterClass DstRC, SDNode OpNode, PatFrag mem_frag,
3448 X86MemOperand x86memop, ValueType OpVT, ValueType InVT,
3450 let hasSideEffects = 0 in {
3451 def rr : AVX512PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
3452 !strconcat(asm," \t{$src, $dst|$dst, $src}"),
3454 (OpVT (OpNode (InVT SrcRC:$src))))], d>, EVEX;
3455 def rrb : AVX512PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src, AVX512RC:$rc),
3456 !strconcat(asm," \t{$rc, $src, $dst|$dst, $src, $rc}"),
3457 [], d>, EVEX, EVEX_B, EVEX_RC;
3459 def rm : AVX512PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
3460 !strconcat(asm," \t{$src, $dst|$dst, $src}"),
3462 (OpVT (OpNode (InVT (bitconvert (mem_frag addr:$src))))))], d>, EVEX;
3463 } // hasSideEffects = 0
3466 multiclass avx512_vcvt_fp<bits<8> opc, string asm, RegisterClass SrcRC,
3467 RegisterClass DstRC, SDNode OpNode, PatFrag mem_frag,
3468 X86MemOperand x86memop, ValueType OpVT, ValueType InVT,
3470 let hasSideEffects = 0 in {
3471 def rr : AVX512PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
3472 !strconcat(asm," \t{$src, $dst|$dst, $src}"),
3474 (OpVT (OpNode (InVT SrcRC:$src))))], d>, EVEX;
3476 def rm : AVX512PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
3477 !strconcat(asm," \t{$src, $dst|$dst, $src}"),
3479 (OpVT (OpNode (InVT (bitconvert (mem_frag addr:$src))))))], d>, EVEX;
3480 } // hasSideEffects = 0
3483 defm VCVTPD2PSZ : avx512_vcvt_fp_with_rc<0x5A, "vcvtpd2ps", VR512, VR256X, fround,
3484 memopv8f64, f512mem, v8f32, v8f64,
3485 SSEPackedSingle>, EVEX_V512, VEX_W, PD,
3486 EVEX_CD8<64, CD8VF>;
3488 defm VCVTPS2PDZ : avx512_vcvt_fp<0x5A, "vcvtps2pd", VR256X, VR512, fextend,
3489 memopv4f64, f256mem, v8f64, v8f32,
3490 SSEPackedDouble>, EVEX_V512, PS,
3491 EVEX_CD8<32, CD8VH>;
3492 def : Pat<(v8f64 (extloadv8f32 addr:$src)),
3493 (VCVTPS2PDZrm addr:$src)>;
3495 def : Pat<(v8f32 (int_x86_avx512_mask_cvtpd2ps_512 (v8f64 VR512:$src),
3496 (bc_v8f32(v8i32 immAllZerosV)), (i8 -1), (i32 FROUND_CURRENT))),
3497 (VCVTPD2PSZrr VR512:$src)>;
3499 def : Pat<(v8f32 (int_x86_avx512_mask_cvtpd2ps_512 (v8f64 VR512:$src),
3500 (bc_v8f32(v8i32 immAllZerosV)), (i8 -1), imm:$rc)),
3501 (VCVTPD2PSZrrb VR512:$src, imm:$rc)>;
3503 //===----------------------------------------------------------------------===//
3504 // AVX-512 Vector convert from sign integer to float/double
3505 //===----------------------------------------------------------------------===//
3507 defm VCVTDQ2PSZ : avx512_vcvt_fp_with_rc<0x5B, "vcvtdq2ps", VR512, VR512, sint_to_fp,
3508 memopv8i64, i512mem, v16f32, v16i32,
3509 SSEPackedSingle>, EVEX_V512, PS,
3510 EVEX_CD8<32, CD8VF>;
3512 defm VCVTDQ2PDZ : avx512_vcvt_fp<0xE6, "vcvtdq2pd", VR256X, VR512, sint_to_fp,
3513 memopv4i64, i256mem, v8f64, v8i32,
3514 SSEPackedDouble>, EVEX_V512, XS,
3515 EVEX_CD8<32, CD8VH>;
3517 defm VCVTTPS2DQZ : avx512_vcvt_fp<0x5B, "vcvttps2dq", VR512, VR512, fp_to_sint,
3518 memopv16f32, f512mem, v16i32, v16f32,
3519 SSEPackedSingle>, EVEX_V512, XS,
3520 EVEX_CD8<32, CD8VF>;
3522 defm VCVTTPD2DQZ : avx512_vcvt_fp<0xE6, "vcvttpd2dq", VR512, VR256X, fp_to_sint,
3523 memopv8f64, f512mem, v8i32, v8f64,
3524 SSEPackedDouble>, EVEX_V512, PD, VEX_W,
3525 EVEX_CD8<64, CD8VF>;
3527 defm VCVTTPS2UDQZ : avx512_vcvt_fp<0x78, "vcvttps2udq", VR512, VR512, fp_to_uint,
3528 memopv16f32, f512mem, v16i32, v16f32,
3529 SSEPackedSingle>, EVEX_V512, PS,
3530 EVEX_CD8<32, CD8VF>;
3532 // cvttps2udq (src, 0, mask-all-ones, sae-current)
3533 def : Pat<(v16i32 (int_x86_avx512_mask_cvttps2udq_512 (v16f32 VR512:$src),
3534 (v16i32 immAllZerosV), (i16 -1), FROUND_CURRENT)),
3535 (VCVTTPS2UDQZrr VR512:$src)>;
3537 defm VCVTTPD2UDQZ : avx512_vcvt_fp<0x78, "vcvttpd2udq", VR512, VR256X, fp_to_uint,
3538 memopv8f64, f512mem, v8i32, v8f64,
3539 SSEPackedDouble>, EVEX_V512, PS, VEX_W,
3540 EVEX_CD8<64, CD8VF>;
3542 // cvttpd2udq (src, 0, mask-all-ones, sae-current)
3543 def : Pat<(v8i32 (int_x86_avx512_mask_cvttpd2udq_512 (v8f64 VR512:$src),
3544 (v8i32 immAllZerosV), (i8 -1), FROUND_CURRENT)),
3545 (VCVTTPD2UDQZrr VR512:$src)>;
3547 defm VCVTUDQ2PDZ : avx512_vcvt_fp<0x7A, "vcvtudq2pd", VR256X, VR512, uint_to_fp,
3548 memopv4i64, f256mem, v8f64, v8i32,
3549 SSEPackedDouble>, EVEX_V512, XS,
3550 EVEX_CD8<32, CD8VH>;
3552 defm VCVTUDQ2PSZ : avx512_vcvt_fp_with_rc<0x7A, "vcvtudq2ps", VR512, VR512, uint_to_fp,
3553 memopv16i32, f512mem, v16f32, v16i32,
3554 SSEPackedSingle>, EVEX_V512, XD,
3555 EVEX_CD8<32, CD8VF>;
3557 def : Pat<(v8i32 (fp_to_uint (v8f32 VR256X:$src1))),
3558 (EXTRACT_SUBREG (v16i32 (VCVTTPS2UDQZrr
3559 (v16f32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)))), sub_ymm)>;
3561 def : Pat<(v4i32 (fp_to_uint (v4f32 VR128X:$src1))),
3562 (EXTRACT_SUBREG (v16i32 (VCVTTPS2UDQZrr
3563 (v16f32 (SUBREG_TO_REG (i32 0), VR128X:$src1, sub_xmm)))), sub_xmm)>;
3565 def : Pat<(v8f32 (uint_to_fp (v8i32 VR256X:$src1))),
3566 (EXTRACT_SUBREG (v16f32 (VCVTUDQ2PSZrr
3567 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)))), sub_ymm)>;
3569 def : Pat<(v4f32 (uint_to_fp (v4i32 VR128X:$src1))),
3570 (EXTRACT_SUBREG (v16f32 (VCVTUDQ2PSZrr
3571 (v16i32 (SUBREG_TO_REG (i32 0), VR128X:$src1, sub_xmm)))), sub_xmm)>;
3573 def : Pat<(v4f64 (uint_to_fp (v4i32 VR128X:$src1))),
3574 (EXTRACT_SUBREG (v8f64 (VCVTUDQ2PDZrr
3575 (v8i32 (SUBREG_TO_REG (i32 0), VR128X:$src1, sub_xmm)))), sub_ymm)>;
3577 def : Pat<(v16f32 (int_x86_avx512_mask_cvtdq2ps_512 (v16i32 VR512:$src),
3578 (bc_v16f32 (v16i32 immAllZerosV)), (i16 -1), imm:$rc)),
3579 (VCVTDQ2PSZrrb VR512:$src, imm:$rc)>;
3580 def : Pat<(v8f64 (int_x86_avx512_mask_cvtdq2pd_512 (v8i32 VR256X:$src),
3581 (bc_v8f64 (v16i32 immAllZerosV)), (i8 -1))),
3582 (VCVTDQ2PDZrr VR256X:$src)>;
3583 def : Pat<(v16f32 (int_x86_avx512_mask_cvtudq2ps_512 (v16i32 VR512:$src),
3584 (bc_v16f32 (v16i32 immAllZerosV)), (i16 -1), imm:$rc)),
3585 (VCVTUDQ2PSZrrb VR512:$src, imm:$rc)>;
3586 def : Pat<(v8f64 (int_x86_avx512_mask_cvtudq2pd_512 (v8i32 VR256X:$src),
3587 (bc_v8f64 (v16i32 immAllZerosV)), (i8 -1))),
3588 (VCVTUDQ2PDZrr VR256X:$src)>;
3590 multiclass avx512_vcvt_fp2int<bits<8> opc, string asm, RegisterClass SrcRC,
3591 RegisterClass DstRC, PatFrag mem_frag,
3592 X86MemOperand x86memop, Domain d> {
3593 let hasSideEffects = 0 in {
3594 def rr : AVX512PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
3595 !strconcat(asm," \t{$src, $dst|$dst, $src}"),
3597 def rrb : AVX512PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src, AVX512RC:$rc),
3598 !strconcat(asm," \t{$rc, $src, $dst|$dst, $src, $rc}"),
3599 [], d>, EVEX, EVEX_B, EVEX_RC;
3601 def rm : AVX512PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
3602 !strconcat(asm," \t{$src, $dst|$dst, $src}"),
3604 } // hasSideEffects = 0
3607 defm VCVTPS2DQZ : avx512_vcvt_fp2int<0x5B, "vcvtps2dq", VR512, VR512,
3608 memopv16f32, f512mem, SSEPackedSingle>, PD,
3609 EVEX_V512, EVEX_CD8<32, CD8VF>;
3610 defm VCVTPD2DQZ : avx512_vcvt_fp2int<0xE6, "vcvtpd2dq", VR512, VR256X,
3611 memopv8f64, f512mem, SSEPackedDouble>, XD, VEX_W,
3612 EVEX_V512, EVEX_CD8<64, CD8VF>;
3614 def : Pat <(v16i32 (int_x86_avx512_mask_cvtps2dq_512 (v16f32 VR512:$src),
3615 (v16i32 immAllZerosV), (i16 -1), imm:$rc)),
3616 (VCVTPS2DQZrrb VR512:$src, imm:$rc)>;
3618 def : Pat <(v8i32 (int_x86_avx512_mask_cvtpd2dq_512 (v8f64 VR512:$src),
3619 (v8i32 immAllZerosV), (i8 -1), imm:$rc)),
3620 (VCVTPD2DQZrrb VR512:$src, imm:$rc)>;
3622 defm VCVTPS2UDQZ : avx512_vcvt_fp2int<0x79, "vcvtps2udq", VR512, VR512,
3623 memopv16f32, f512mem, SSEPackedSingle>,
3624 PS, EVEX_V512, EVEX_CD8<32, CD8VF>;
3625 defm VCVTPD2UDQZ : avx512_vcvt_fp2int<0x79, "vcvtpd2udq", VR512, VR256X,
3626 memopv8f64, f512mem, SSEPackedDouble>, VEX_W,
3627 PS, EVEX_V512, EVEX_CD8<64, CD8VF>;
3629 def : Pat <(v16i32 (int_x86_avx512_mask_cvtps2udq_512 (v16f32 VR512:$src),
3630 (v16i32 immAllZerosV), (i16 -1), imm:$rc)),
3631 (VCVTPS2UDQZrrb VR512:$src, imm:$rc)>;
3633 def : Pat <(v8i32 (int_x86_avx512_mask_cvtpd2udq_512 (v8f64 VR512:$src),
3634 (v8i32 immAllZerosV), (i8 -1), imm:$rc)),
3635 (VCVTPD2UDQZrrb VR512:$src, imm:$rc)>;
3637 let Predicates = [HasAVX512] in {
3638 def : Pat<(v8f32 (fround (loadv8f64 addr:$src))),
3639 (VCVTPD2PSZrm addr:$src)>;
3640 def : Pat<(v8f64 (extloadv8f32 addr:$src)),
3641 (VCVTPS2PDZrm addr:$src)>;
3644 //===----------------------------------------------------------------------===//
3645 // Half precision conversion instructions
3646 //===----------------------------------------------------------------------===//
3647 multiclass avx512_cvtph2ps<RegisterClass destRC, RegisterClass srcRC,
3648 X86MemOperand x86memop> {
3649 def rr : AVX5128I<0x13, MRMSrcReg, (outs destRC:$dst), (ins srcRC:$src),
3650 "vcvtph2ps\t{$src, $dst|$dst, $src}",
3652 let hasSideEffects = 0, mayLoad = 1 in
3653 def rm : AVX5128I<0x13, MRMSrcMem, (outs destRC:$dst), (ins x86memop:$src),
3654 "vcvtph2ps\t{$src, $dst|$dst, $src}", []>, EVEX;
3657 multiclass avx512_cvtps2ph<RegisterClass destRC, RegisterClass srcRC,
3658 X86MemOperand x86memop> {
3659 def rr : AVX512AIi8<0x1D, MRMDestReg, (outs destRC:$dst),
3660 (ins srcRC:$src1, i32i8imm:$src2),
3661 "vcvtps2ph \t{$src2, $src1, $dst|$dst, $src1, $src2}",
3663 let hasSideEffects = 0, mayStore = 1 in
3664 def mr : AVX512AIi8<0x1D, MRMDestMem, (outs),
3665 (ins x86memop:$dst, srcRC:$src1, i32i8imm:$src2),
3666 "vcvtps2ph \t{$src2, $src1, $dst|$dst, $src1, $src2}", []>, EVEX;
3669 defm VCVTPH2PSZ : avx512_cvtph2ps<VR512, VR256X, f256mem>, EVEX_V512,
3670 EVEX_CD8<32, CD8VH>;
3671 defm VCVTPS2PHZ : avx512_cvtps2ph<VR256X, VR512, f256mem>, EVEX_V512,
3672 EVEX_CD8<32, CD8VH>;
3674 def : Pat<(v16i16 (int_x86_avx512_mask_vcvtps2ph_512 (v16f32 VR512:$src),
3675 imm:$rc, (bc_v16i16(v8i32 immAllZerosV)), (i16 -1))),
3676 (VCVTPS2PHZrr VR512:$src, imm:$rc)>;
3678 def : Pat<(v16f32 (int_x86_avx512_mask_vcvtph2ps_512 (v16i16 VR256X:$src),
3679 (bc_v16f32(v16i32 immAllZerosV)), (i16 -1), (i32 FROUND_CURRENT))),
3680 (VCVTPH2PSZrr VR256X:$src)>;
3682 let Defs = [EFLAGS], Predicates = [HasAVX512] in {
3683 defm VUCOMISSZ : sse12_ord_cmp<0x2E, FR32X, X86cmp, f32, f32mem, loadf32,
3684 "ucomiss">, PS, EVEX, VEX_LIG,
3685 EVEX_CD8<32, CD8VT1>;
3686 defm VUCOMISDZ : sse12_ord_cmp<0x2E, FR64X, X86cmp, f64, f64mem, loadf64,
3687 "ucomisd">, PD, EVEX,
3688 VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
3689 let Pattern = []<dag> in {
3690 defm VCOMISSZ : sse12_ord_cmp<0x2F, VR128X, undef, v4f32, f128mem, load,
3691 "comiss">, PS, EVEX, VEX_LIG,
3692 EVEX_CD8<32, CD8VT1>;
3693 defm VCOMISDZ : sse12_ord_cmp<0x2F, VR128X, undef, v2f64, f128mem, load,
3694 "comisd">, PD, EVEX,
3695 VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
3697 let isCodeGenOnly = 1 in {
3698 defm Int_VUCOMISSZ : sse12_ord_cmp<0x2E, VR128X, X86ucomi, v4f32, f128mem,
3699 load, "ucomiss">, PS, EVEX, VEX_LIG,
3700 EVEX_CD8<32, CD8VT1>;
3701 defm Int_VUCOMISDZ : sse12_ord_cmp<0x2E, VR128X, X86ucomi, v2f64, f128mem,
3702 load, "ucomisd">, PD, EVEX,
3703 VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
3705 defm Int_VCOMISSZ : sse12_ord_cmp<0x2F, VR128X, X86comi, v4f32, f128mem,
3706 load, "comiss">, PS, EVEX, VEX_LIG,
3707 EVEX_CD8<32, CD8VT1>;
3708 defm Int_VCOMISDZ : sse12_ord_cmp<0x2F, VR128X, X86comi, v2f64, f128mem,
3709 load, "comisd">, PD, EVEX,
3710 VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
3714 /// avx512_fp14_s rcp14ss, rcp14sd, rsqrt14ss, rsqrt14sd
3715 multiclass avx512_fp14_s<bits<8> opc, string OpcodeStr, RegisterClass RC,
3716 X86MemOperand x86memop> {
3717 let hasSideEffects = 0 in {
3718 def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
3719 (ins RC:$src1, RC:$src2),
3720 !strconcat(OpcodeStr,
3721 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>, EVEX_4V;
3722 let mayLoad = 1 in {
3723 def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
3724 (ins RC:$src1, x86memop:$src2),
3725 !strconcat(OpcodeStr,
3726 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>, EVEX_4V;
3731 defm VRCP14SS : avx512_fp14_s<0x4D, "vrcp14ss", FR32X, f32mem>,
3732 EVEX_CD8<32, CD8VT1>;
3733 defm VRCP14SD : avx512_fp14_s<0x4D, "vrcp14sd", FR64X, f64mem>,
3734 VEX_W, EVEX_CD8<64, CD8VT1>;
3735 defm VRSQRT14SS : avx512_fp14_s<0x4F, "vrsqrt14ss", FR32X, f32mem>,
3736 EVEX_CD8<32, CD8VT1>;
3737 defm VRSQRT14SD : avx512_fp14_s<0x4F, "vrsqrt14sd", FR64X, f64mem>,
3738 VEX_W, EVEX_CD8<64, CD8VT1>;
3740 def : Pat <(v4f32 (int_x86_avx512_rcp14_ss (v4f32 VR128X:$src1),
3741 (v4f32 VR128X:$src2), (bc_v4f32 (v4i32 immAllZerosV)), (i8 -1))),
3742 (COPY_TO_REGCLASS (VRCP14SSrr (COPY_TO_REGCLASS VR128X:$src1, FR32X),
3743 (COPY_TO_REGCLASS VR128X:$src2, FR32X)), VR128X)>;
3745 def : Pat <(v2f64 (int_x86_avx512_rcp14_sd (v2f64 VR128X:$src1),
3746 (v2f64 VR128X:$src2), (bc_v2f64 (v4i32 immAllZerosV)), (i8 -1))),
3747 (COPY_TO_REGCLASS (VRCP14SDrr (COPY_TO_REGCLASS VR128X:$src1, FR64X),
3748 (COPY_TO_REGCLASS VR128X:$src2, FR64X)), VR128X)>;
3750 def : Pat <(v4f32 (int_x86_avx512_rsqrt14_ss (v4f32 VR128X:$src1),
3751 (v4f32 VR128X:$src2), (bc_v4f32 (v4i32 immAllZerosV)), (i8 -1))),
3752 (COPY_TO_REGCLASS (VRSQRT14SSrr (COPY_TO_REGCLASS VR128X:$src1, FR32X),
3753 (COPY_TO_REGCLASS VR128X:$src2, FR32X)), VR128X)>;
3755 def : Pat <(v2f64 (int_x86_avx512_rsqrt14_sd (v2f64 VR128X:$src1),
3756 (v2f64 VR128X:$src2), (bc_v2f64 (v4i32 immAllZerosV)), (i8 -1))),
3757 (COPY_TO_REGCLASS (VRSQRT14SDrr (COPY_TO_REGCLASS VR128X:$src1, FR64X),
3758 (COPY_TO_REGCLASS VR128X:$src2, FR64X)), VR128X)>;
3760 /// avx512_fp14_p rcp14ps, rcp14pd, rsqrt14ps, rsqrt14pd
3761 multiclass avx512_fp14_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
3762 RegisterClass RC, X86MemOperand x86memop,
3763 PatFrag mem_frag, ValueType OpVt> {
3764 def r : AVX5128I<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
3765 !strconcat(OpcodeStr,
3766 " \t{$src, $dst|$dst, $src}"),
3767 [(set RC:$dst, (OpVt (OpNode RC:$src)))]>,
3769 def m : AVX5128I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
3770 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
3771 [(set RC:$dst, (OpVt (OpNode (mem_frag addr:$src))))]>,
3774 defm VRSQRT14PSZ : avx512_fp14_p<0x4E, "vrsqrt14ps", X86frsqrt, VR512, f512mem,
3775 memopv16f32, v16f32>, EVEX_V512, EVEX_CD8<32, CD8VF>;
3776 defm VRSQRT14PDZ : avx512_fp14_p<0x4E, "vrsqrt14pd", X86frsqrt, VR512, f512mem,
3777 memopv8f64, v8f64>, VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
3778 defm VRCP14PSZ : avx512_fp14_p<0x4C, "vrcp14ps", X86frcp, VR512, f512mem,
3779 memopv16f32, v16f32>, EVEX_V512, EVEX_CD8<32, CD8VF>;
3780 defm VRCP14PDZ : avx512_fp14_p<0x4C, "vrcp14pd", X86frcp, VR512, f512mem,
3781 memopv8f64, v8f64>, VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
3783 def : Pat <(v16f32 (int_x86_avx512_rsqrt14_ps_512 (v16f32 VR512:$src),
3784 (bc_v16f32 (v16i32 immAllZerosV)), (i16 -1))),
3785 (VRSQRT14PSZr VR512:$src)>;
3786 def : Pat <(v8f64 (int_x86_avx512_rsqrt14_pd_512 (v8f64 VR512:$src),
3787 (bc_v8f64 (v16i32 immAllZerosV)), (i8 -1))),
3788 (VRSQRT14PDZr VR512:$src)>;
3790 def : Pat <(v16f32 (int_x86_avx512_rcp14_ps_512 (v16f32 VR512:$src),
3791 (bc_v16f32 (v16i32 immAllZerosV)), (i16 -1))),
3792 (VRCP14PSZr VR512:$src)>;
3793 def : Pat <(v8f64 (int_x86_avx512_rcp14_pd_512 (v8f64 VR512:$src),
3794 (bc_v8f64 (v16i32 immAllZerosV)), (i8 -1))),
3795 (VRCP14PDZr VR512:$src)>;
3797 /// avx512_fp28_s rcp28ss, rcp28sd, rsqrt28ss, rsqrt28sd
3798 multiclass avx512_fp28_s<bits<8> opc, string OpcodeStr, RegisterClass RC,
3799 X86MemOperand x86memop> {
3800 let hasSideEffects = 0, Predicates = [HasERI] in {
3801 def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
3802 (ins RC:$src1, RC:$src2),
3803 !strconcat(OpcodeStr,
3804 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>, EVEX_4V;
3805 def rrb : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
3806 (ins RC:$src1, RC:$src2),
3807 !strconcat(OpcodeStr,
3808 " \t{{sae}, $src2, $src1, $dst|$dst, $src1, $src2, {sae}}"),
3809 []>, EVEX_4V, EVEX_B;
3810 let mayLoad = 1 in {
3811 def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
3812 (ins RC:$src1, x86memop:$src2),
3813 !strconcat(OpcodeStr,
3814 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>, EVEX_4V;
3819 defm VRCP28SS : avx512_fp28_s<0xCB, "vrcp28ss", FR32X, f32mem>,
3820 EVEX_CD8<32, CD8VT1>;
3821 defm VRCP28SD : avx512_fp28_s<0xCB, "vrcp28sd", FR64X, f64mem>,
3822 VEX_W, EVEX_CD8<64, CD8VT1>;
3823 defm VRSQRT28SS : avx512_fp28_s<0xCD, "vrsqrt28ss", FR32X, f32mem>,
3824 EVEX_CD8<32, CD8VT1>;
3825 defm VRSQRT28SD : avx512_fp28_s<0xCD, "vrsqrt28sd", FR64X, f64mem>,
3826 VEX_W, EVEX_CD8<64, CD8VT1>;
3828 def : Pat <(v4f32 (int_x86_avx512_rcp28_ss (v4f32 VR128X:$src1),
3829 (v4f32 VR128X:$src2), (bc_v4f32 (v4i32 immAllZerosV)), (i8 -1),
3831 (COPY_TO_REGCLASS (VRCP28SSrrb (COPY_TO_REGCLASS VR128X:$src1, FR32X),
3832 (COPY_TO_REGCLASS VR128X:$src2, FR32X)), VR128X)>;
3834 def : Pat <(v2f64 (int_x86_avx512_rcp28_sd (v2f64 VR128X:$src1),
3835 (v2f64 VR128X:$src2), (bc_v2f64 (v4i32 immAllZerosV)), (i8 -1),
3837 (COPY_TO_REGCLASS (VRCP28SDrrb (COPY_TO_REGCLASS VR128X:$src1, FR64X),
3838 (COPY_TO_REGCLASS VR128X:$src2, FR64X)), VR128X)>;
3840 def : Pat <(v4f32 (int_x86_avx512_rsqrt28_ss (v4f32 VR128X:$src1),
3841 (v4f32 VR128X:$src2), (bc_v4f32 (v4i32 immAllZerosV)), (i8 -1),
3843 (COPY_TO_REGCLASS (VRSQRT28SSrrb (COPY_TO_REGCLASS VR128X:$src1, FR32X),
3844 (COPY_TO_REGCLASS VR128X:$src2, FR32X)), VR128X)>;
3846 def : Pat <(v2f64 (int_x86_avx512_rsqrt28_sd (v2f64 VR128X:$src1),
3847 (v2f64 VR128X:$src2), (bc_v2f64 (v4i32 immAllZerosV)), (i8 -1),
3849 (COPY_TO_REGCLASS (VRSQRT28SDrrb (COPY_TO_REGCLASS VR128X:$src1, FR64X),
3850 (COPY_TO_REGCLASS VR128X:$src2, FR64X)), VR128X)>;
3852 /// avx512_fp28_p rcp28ps, rcp28pd, rsqrt28ps, rsqrt28pd
3853 multiclass avx512_fp28_p<bits<8> opc, string OpcodeStr,
3854 RegisterClass RC, X86MemOperand x86memop> {
3855 let hasSideEffects = 0, Predicates = [HasERI] in {
3856 def r : AVX5128I<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
3857 !strconcat(OpcodeStr,
3858 " \t{$src, $dst|$dst, $src}"),
3860 def rb : AVX5128I<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
3861 !strconcat(OpcodeStr,
3862 " \t{{sae}, $src, $dst|$dst, $src, {sae}}"),
3864 def m : AVX5128I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
3865 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
3869 defm VRSQRT28PSZ : avx512_fp28_p<0xCC, "vrsqrt28ps", VR512, f512mem>,
3870 EVEX_V512, EVEX_CD8<32, CD8VF>;
3871 defm VRSQRT28PDZ : avx512_fp28_p<0xCC, "vrsqrt28pd", VR512, f512mem>,
3872 VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
3873 defm VRCP28PSZ : avx512_fp28_p<0xCA, "vrcp28ps", VR512, f512mem>,
3874 EVEX_V512, EVEX_CD8<32, CD8VF>;
3875 defm VRCP28PDZ : avx512_fp28_p<0xCA, "vrcp28pd", VR512, f512mem>,
3876 VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
3878 def : Pat <(v16f32 (int_x86_avx512_rsqrt28_ps (v16f32 VR512:$src),
3879 (bc_v16f32 (v16i32 immAllZerosV)), (i16 -1), FROUND_NO_EXC)),
3880 (VRSQRT28PSZrb VR512:$src)>;
3881 def : Pat <(v8f64 (int_x86_avx512_rsqrt28_pd (v8f64 VR512:$src),
3882 (bc_v8f64 (v16i32 immAllZerosV)), (i8 -1), FROUND_NO_EXC)),
3883 (VRSQRT28PDZrb VR512:$src)>;
3885 def : Pat <(v16f32 (int_x86_avx512_rcp28_ps (v16f32 VR512:$src),
3886 (bc_v16f32 (v16i32 immAllZerosV)), (i16 -1), FROUND_NO_EXC)),
3887 (VRCP28PSZrb VR512:$src)>;
3888 def : Pat <(v8f64 (int_x86_avx512_rcp28_pd (v8f64 VR512:$src),
3889 (bc_v8f64 (v16i32 immAllZerosV)), (i8 -1), FROUND_NO_EXC)),
3890 (VRCP28PDZrb VR512:$src)>;
3892 multiclass avx512_sqrt_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
3893 OpndItins itins_s, OpndItins itins_d> {
3894 def PSZrr :AVX512PSI<opc, MRMSrcReg, (outs VR512:$dst), (ins VR512:$src),
3895 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
3896 [(set VR512:$dst, (v16f32 (OpNode VR512:$src)))], itins_s.rr>,
3900 def PSZrm : AVX512PSI<opc, MRMSrcMem, (outs VR512:$dst), (ins f512mem:$src),
3901 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
3903 (OpNode (v16f32 (bitconvert (memopv16f32 addr:$src)))))],
3904 itins_s.rm>, EVEX, EVEX_V512, EVEX_CD8<32, CD8VF>;
3906 def PDZrr : AVX512PDI<opc, MRMSrcReg, (outs VR512:$dst), (ins VR512:$src),
3907 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
3908 [(set VR512:$dst, (v8f64 (OpNode VR512:$src)))], itins_d.rr>,
3912 def PDZrm : AVX512PDI<opc, MRMSrcMem, (outs VR512:$dst), (ins f512mem:$src),
3913 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
3914 [(set VR512:$dst, (OpNode
3915 (v8f64 (bitconvert (memopv16f32 addr:$src)))))],
3916 itins_d.rm>, EVEX, EVEX_V512, EVEX_CD8<64, CD8VF>;
3920 multiclass avx512_sqrt_scalar<bits<8> opc, string OpcodeStr,
3921 Intrinsic F32Int, Intrinsic F64Int,
3922 OpndItins itins_s, OpndItins itins_d> {
3923 def SSZr : SI<opc, MRMSrcReg, (outs FR32X:$dst),
3924 (ins FR32X:$src1, FR32X:$src2),
3925 !strconcat(OpcodeStr,
3926 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3927 [], itins_s.rr>, XS, EVEX_4V;
3928 let isCodeGenOnly = 1 in
3929 def SSZr_Int : SIi8<opc, MRMSrcReg, (outs VR128X:$dst),
3930 (ins VR128X:$src1, VR128X:$src2),
3931 !strconcat(OpcodeStr,
3932 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3934 (F32Int VR128X:$src1, VR128X:$src2))],
3935 itins_s.rr>, XS, EVEX_4V;
3936 let mayLoad = 1 in {
3937 def SSZm : SI<opc, MRMSrcMem, (outs FR32X:$dst),
3938 (ins FR32X:$src1, f32mem:$src2),
3939 !strconcat(OpcodeStr,
3940 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3941 [], itins_s.rm>, XS, EVEX_4V, EVEX_CD8<32, CD8VT1>;
3942 let isCodeGenOnly = 1 in
3943 def SSZm_Int : SIi8<opc, MRMSrcMem, (outs VR128X:$dst),
3944 (ins VR128X:$src1, ssmem:$src2),
3945 !strconcat(OpcodeStr,
3946 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3948 (F32Int VR128X:$src1, sse_load_f32:$src2))],
3949 itins_s.rm>, XS, EVEX_4V, EVEX_CD8<32, CD8VT1>;
3951 def SDZr : SI<opc, MRMSrcReg, (outs FR64X:$dst),
3952 (ins FR64X:$src1, FR64X:$src2),
3953 !strconcat(OpcodeStr,
3954 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>,
3956 let isCodeGenOnly = 1 in
3957 def SDZr_Int : SIi8<opc, MRMSrcReg, (outs VR128X:$dst),
3958 (ins VR128X:$src1, VR128X:$src2),
3959 !strconcat(OpcodeStr,
3960 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3962 (F64Int VR128X:$src1, VR128X:$src2))],
3963 itins_s.rr>, XD, EVEX_4V, VEX_W;
3964 let mayLoad = 1 in {
3965 def SDZm : SI<opc, MRMSrcMem, (outs FR64X:$dst),
3966 (ins FR64X:$src1, f64mem:$src2),
3967 !strconcat(OpcodeStr,
3968 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>,
3969 XD, EVEX_4V, VEX_W, EVEX_CD8<64, CD8VT1>;
3970 let isCodeGenOnly = 1 in
3971 def SDZm_Int : SIi8<opc, MRMSrcMem, (outs VR128X:$dst),
3972 (ins VR128X:$src1, sdmem:$src2),
3973 !strconcat(OpcodeStr,
3974 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3976 (F64Int VR128X:$src1, sse_load_f64:$src2))]>,
3977 XD, EVEX_4V, VEX_W, EVEX_CD8<64, CD8VT1>;
3982 defm VSQRT : avx512_sqrt_scalar<0x51, "sqrt",
3983 int_x86_avx512_sqrt_ss, int_x86_avx512_sqrt_sd,
3984 SSE_SQRTSS, SSE_SQRTSD>,
3985 avx512_sqrt_packed<0x51, "vsqrt", fsqrt,
3986 SSE_SQRTPS, SSE_SQRTPD>;
3988 let Predicates = [HasAVX512] in {
3989 def : Pat<(v16f32 (int_x86_avx512_sqrt_ps_512 (v16f32 VR512:$src1),
3990 (bc_v16f32 (v16i32 immAllZerosV)), (i16 -1), FROUND_CURRENT)),
3991 (VSQRTPSZrr VR512:$src1)>;
3992 def : Pat<(v8f64 (int_x86_avx512_sqrt_pd_512 (v8f64 VR512:$src1),
3993 (bc_v8f64 (v16i32 immAllZerosV)), (i8 -1), FROUND_CURRENT)),
3994 (VSQRTPDZrr VR512:$src1)>;
3996 def : Pat<(f32 (fsqrt FR32X:$src)),
3997 (VSQRTSSZr (f32 (IMPLICIT_DEF)), FR32X:$src)>;
3998 def : Pat<(f32 (fsqrt (load addr:$src))),
3999 (VSQRTSSZm (f32 (IMPLICIT_DEF)), addr:$src)>,
4000 Requires<[OptForSize]>;
4001 def : Pat<(f64 (fsqrt FR64X:$src)),
4002 (VSQRTSDZr (f64 (IMPLICIT_DEF)), FR64X:$src)>;
4003 def : Pat<(f64 (fsqrt (load addr:$src))),
4004 (VSQRTSDZm (f64 (IMPLICIT_DEF)), addr:$src)>,
4005 Requires<[OptForSize]>;
4007 def : Pat<(f32 (X86frsqrt FR32X:$src)),
4008 (VRSQRT14SSrr (f32 (IMPLICIT_DEF)), FR32X:$src)>;
4009 def : Pat<(f32 (X86frsqrt (load addr:$src))),
4010 (VRSQRT14SSrm (f32 (IMPLICIT_DEF)), addr:$src)>,
4011 Requires<[OptForSize]>;
4013 def : Pat<(f32 (X86frcp FR32X:$src)),
4014 (VRCP14SSrr (f32 (IMPLICIT_DEF)), FR32X:$src)>;
4015 def : Pat<(f32 (X86frcp (load addr:$src))),
4016 (VRCP14SSrm (f32 (IMPLICIT_DEF)), addr:$src)>,
4017 Requires<[OptForSize]>;
4019 def : Pat<(int_x86_sse_sqrt_ss VR128X:$src),
4020 (COPY_TO_REGCLASS (VSQRTSSZr (f32 (IMPLICIT_DEF)),
4021 (COPY_TO_REGCLASS VR128X:$src, FR32)),
4023 def : Pat<(int_x86_sse_sqrt_ss sse_load_f32:$src),
4024 (VSQRTSSZm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
4026 def : Pat<(int_x86_sse2_sqrt_sd VR128X:$src),
4027 (COPY_TO_REGCLASS (VSQRTSDZr (f64 (IMPLICIT_DEF)),
4028 (COPY_TO_REGCLASS VR128X:$src, FR64)),
4030 def : Pat<(int_x86_sse2_sqrt_sd sse_load_f64:$src),
4031 (VSQRTSDZm_Int (v2f64 (IMPLICIT_DEF)), sse_load_f64:$src)>;
4035 multiclass avx512_fp_unop_rm<bits<8> opcps, bits<8> opcpd, string OpcodeStr,
4036 X86MemOperand x86memop, RegisterClass RC,
4037 PatFrag mem_frag32, PatFrag mem_frag64,
4038 Intrinsic V4F32Int, Intrinsic V2F64Int,
4040 let ExeDomain = SSEPackedSingle in {
4041 // Intrinsic operation, reg.
4042 // Vector intrinsic operation, reg
4043 def PSr : AVX512AIi8<opcps, MRMSrcReg,
4044 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4045 !strconcat(OpcodeStr,
4046 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4047 [(set RC:$dst, (V4F32Int RC:$src1, imm:$src2))]>;
4049 // Vector intrinsic operation, mem
4050 def PSm : AVX512AIi8<opcps, MRMSrcMem,
4051 (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
4052 !strconcat(OpcodeStr,
4053 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4055 (V4F32Int (mem_frag32 addr:$src1),imm:$src2))]>,
4056 EVEX_CD8<32, VForm>;
4057 } // ExeDomain = SSEPackedSingle
4059 let ExeDomain = SSEPackedDouble in {
4060 // Vector intrinsic operation, reg
4061 def PDr : AVX512AIi8<opcpd, MRMSrcReg,
4062 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4063 !strconcat(OpcodeStr,
4064 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4065 [(set RC:$dst, (V2F64Int RC:$src1, imm:$src2))]>;
4067 // Vector intrinsic operation, mem
4068 def PDm : AVX512AIi8<opcpd, MRMSrcMem,
4069 (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
4070 !strconcat(OpcodeStr,
4071 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4073 (V2F64Int (mem_frag64 addr:$src1),imm:$src2))]>,
4074 EVEX_CD8<64, VForm>;
4075 } // ExeDomain = SSEPackedDouble
4078 multiclass avx512_fp_binop_rm<bits<8> opcss, bits<8> opcsd,
4082 let ExeDomain = GenericDomain in {
4084 let hasSideEffects = 0 in
4085 def SSr : AVX512AIi8<opcss, MRMSrcReg,
4086 (outs FR32X:$dst), (ins FR32X:$src1, FR32X:$src2, i32i8imm:$src3),
4087 !strconcat(OpcodeStr,
4088 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4091 // Intrinsic operation, reg.
4092 let isCodeGenOnly = 1 in
4093 def SSr_Int : AVX512AIi8<opcss, MRMSrcReg,
4094 (outs VR128X:$dst), (ins VR128X:$src1, VR128X:$src2, i32i8imm:$src3),
4095 !strconcat(OpcodeStr,
4096 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4097 [(set VR128X:$dst, (F32Int VR128X:$src1, VR128X:$src2, imm:$src3))]>;
4099 // Intrinsic operation, mem.
4100 def SSm : AVX512AIi8<opcss, MRMSrcMem, (outs VR128X:$dst),
4101 (ins VR128X:$src1, ssmem:$src2, i32i8imm:$src3),
4102 !strconcat(OpcodeStr,
4103 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4104 [(set VR128X:$dst, (F32Int VR128X:$src1,
4105 sse_load_f32:$src2, imm:$src3))]>,
4106 EVEX_CD8<32, CD8VT1>;
4109 let hasSideEffects = 0 in
4110 def SDr : AVX512AIi8<opcsd, MRMSrcReg,
4111 (outs FR64X:$dst), (ins FR64X:$src1, FR64X:$src2, i32i8imm:$src3),
4112 !strconcat(OpcodeStr,
4113 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4116 // Intrinsic operation, reg.
4117 let isCodeGenOnly = 1 in
4118 def SDr_Int : AVX512AIi8<opcsd, MRMSrcReg,
4119 (outs VR128X:$dst), (ins VR128X:$src1, VR128X:$src2, i32i8imm:$src3),
4120 !strconcat(OpcodeStr,
4121 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4122 [(set VR128X:$dst, (F64Int VR128X:$src1, VR128X:$src2, imm:$src3))]>,
4125 // Intrinsic operation, mem.
4126 def SDm : AVX512AIi8<opcsd, MRMSrcMem,
4127 (outs VR128X:$dst), (ins VR128X:$src1, sdmem:$src2, i32i8imm:$src3),
4128 !strconcat(OpcodeStr,
4129 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4131 (F64Int VR128X:$src1, sse_load_f64:$src2, imm:$src3))]>,
4132 VEX_W, EVEX_CD8<64, CD8VT1>;
4133 } // ExeDomain = GenericDomain
4136 multiclass avx512_rndscale<bits<8> opc, string OpcodeStr,
4137 X86MemOperand x86memop, RegisterClass RC,
4138 PatFrag mem_frag, Domain d> {
4139 let ExeDomain = d in {
4140 // Intrinsic operation, reg.
4141 // Vector intrinsic operation, reg
4142 def r : AVX512AIi8<opc, MRMSrcReg,
4143 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4144 !strconcat(OpcodeStr,
4145 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4148 // Vector intrinsic operation, mem
4149 def m : AVX512AIi8<opc, MRMSrcMem,
4150 (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
4151 !strconcat(OpcodeStr,
4152 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4158 defm VRNDSCALEPSZ : avx512_rndscale<0x08, "vrndscaleps", f512mem, VR512,
4159 memopv16f32, SSEPackedSingle>, EVEX_V512,
4160 EVEX_CD8<32, CD8VF>;
4162 def : Pat<(v16f32 (int_x86_avx512_mask_rndscale_ps_512 (v16f32 VR512:$src1),
4163 imm:$src2, (v16f32 VR512:$src1), (i16 -1),
4165 (VRNDSCALEPSZr VR512:$src1, imm:$src2)>;
4168 defm VRNDSCALEPDZ : avx512_rndscale<0x09, "vrndscalepd", f512mem, VR512,
4169 memopv8f64, SSEPackedDouble>, EVEX_V512,
4170 VEX_W, EVEX_CD8<64, CD8VF>;
4172 def : Pat<(v8f64 (int_x86_avx512_mask_rndscale_pd_512 (v8f64 VR512:$src1),
4173 imm:$src2, (v8f64 VR512:$src1), (i8 -1),
4175 (VRNDSCALEPDZr VR512:$src1, imm:$src2)>;
4177 multiclass avx512_rndscale_scalar<bits<8> opc, string OpcodeStr,
4178 Operand x86memop, RegisterClass RC, Domain d> {
4179 let ExeDomain = d in {
4180 def r : AVX512AIi8<opc, MRMSrcReg,
4181 (outs RC:$dst), (ins RC:$src1, RC:$src2, i32i8imm:$src3),
4182 !strconcat(OpcodeStr,
4183 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4186 def m : AVX512AIi8<opc, MRMSrcMem,
4187 (outs RC:$dst), (ins RC:$src1, x86memop:$src2, i32i8imm:$src3),
4188 !strconcat(OpcodeStr,
4189 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4194 defm VRNDSCALESS : avx512_rndscale_scalar<0x0A, "vrndscaless", ssmem, FR32X,
4195 SSEPackedSingle>, EVEX_CD8<32, CD8VT1>;
4197 defm VRNDSCALESD : avx512_rndscale_scalar<0x0B, "vrndscalesd", sdmem, FR64X,
4198 SSEPackedDouble>, EVEX_CD8<64, CD8VT1>;
4200 def : Pat<(ffloor FR32X:$src),
4201 (VRNDSCALESSr (f32 (IMPLICIT_DEF)), FR32X:$src, (i32 0x1))>;
4202 def : Pat<(f64 (ffloor FR64X:$src)),
4203 (VRNDSCALESDr (f64 (IMPLICIT_DEF)), FR64X:$src, (i32 0x1))>;
4204 def : Pat<(f32 (fnearbyint FR32X:$src)),
4205 (VRNDSCALESSr (f32 (IMPLICIT_DEF)), FR32X:$src, (i32 0xC))>;
4206 def : Pat<(f64 (fnearbyint FR64X:$src)),
4207 (VRNDSCALESDr (f64 (IMPLICIT_DEF)), FR64X:$src, (i32 0xC))>;
4208 def : Pat<(f32 (fceil FR32X:$src)),
4209 (VRNDSCALESSr (f32 (IMPLICIT_DEF)), FR32X:$src, (i32 0x2))>;
4210 def : Pat<(f64 (fceil FR64X:$src)),
4211 (VRNDSCALESDr (f64 (IMPLICIT_DEF)), FR64X:$src, (i32 0x2))>;
4212 def : Pat<(f32 (frint FR32X:$src)),
4213 (VRNDSCALESSr (f32 (IMPLICIT_DEF)), FR32X:$src, (i32 0x4))>;
4214 def : Pat<(f64 (frint FR64X:$src)),
4215 (VRNDSCALESDr (f64 (IMPLICIT_DEF)), FR64X:$src, (i32 0x4))>;
4216 def : Pat<(f32 (ftrunc FR32X:$src)),
4217 (VRNDSCALESSr (f32 (IMPLICIT_DEF)), FR32X:$src, (i32 0x3))>;
4218 def : Pat<(f64 (ftrunc FR64X:$src)),
4219 (VRNDSCALESDr (f64 (IMPLICIT_DEF)), FR64X:$src, (i32 0x3))>;
4221 def : Pat<(v16f32 (ffloor VR512:$src)),
4222 (VRNDSCALEPSZr VR512:$src, (i32 0x1))>;
4223 def : Pat<(v16f32 (fnearbyint VR512:$src)),
4224 (VRNDSCALEPSZr VR512:$src, (i32 0xC))>;
4225 def : Pat<(v16f32 (fceil VR512:$src)),
4226 (VRNDSCALEPSZr VR512:$src, (i32 0x2))>;
4227 def : Pat<(v16f32 (frint VR512:$src)),
4228 (VRNDSCALEPSZr VR512:$src, (i32 0x4))>;
4229 def : Pat<(v16f32 (ftrunc VR512:$src)),
4230 (VRNDSCALEPSZr VR512:$src, (i32 0x3))>;
4232 def : Pat<(v8f64 (ffloor VR512:$src)),
4233 (VRNDSCALEPDZr VR512:$src, (i32 0x1))>;
4234 def : Pat<(v8f64 (fnearbyint VR512:$src)),
4235 (VRNDSCALEPDZr VR512:$src, (i32 0xC))>;
4236 def : Pat<(v8f64 (fceil VR512:$src)),
4237 (VRNDSCALEPDZr VR512:$src, (i32 0x2))>;
4238 def : Pat<(v8f64 (frint VR512:$src)),
4239 (VRNDSCALEPDZr VR512:$src, (i32 0x4))>;
4240 def : Pat<(v8f64 (ftrunc VR512:$src)),
4241 (VRNDSCALEPDZr VR512:$src, (i32 0x3))>;
4243 //-------------------------------------------------
4244 // Integer truncate and extend operations
4245 //-------------------------------------------------
4247 multiclass avx512_trunc_sat<bits<8> opc, string OpcodeStr,
4248 RegisterClass dstRC, RegisterClass srcRC,
4249 RegisterClass KRC, X86MemOperand x86memop> {
4250 def rr : AVX512XS8I<opc, MRMDestReg, (outs dstRC:$dst),
4252 !strconcat(OpcodeStr," \t{$src, $dst|$dst, $src}"),
4255 def rrk : AVX512XS8I<opc, MRMDestReg, (outs dstRC:$dst),
4256 (ins KRC:$mask, srcRC:$src),
4257 !strconcat(OpcodeStr,
4258 " \t{$src, ${dst} {${mask}}|${dst} {${mask}}, $src}"),
4261 def rrkz : AVX512XS8I<opc, MRMDestReg, (outs dstRC:$dst),
4262 (ins KRC:$mask, srcRC:$src),
4263 !strconcat(OpcodeStr,
4264 " \t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
4267 def mr : AVX512XS8I<opc, MRMDestMem, (outs), (ins x86memop:$dst, srcRC:$src),
4268 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
4271 def mrk : AVX512XS8I<opc, MRMDestMem, (outs),
4272 (ins x86memop:$dst, KRC:$mask, srcRC:$src),
4273 !strconcat(OpcodeStr, " \t{$src, $dst {${mask}}|${dst} {${mask}}, $src}"),
4277 defm VPMOVQB : avx512_trunc_sat<0x32, "vpmovqb", VR128X, VR512, VK8WM,
4278 i128mem>, EVEX_V512, EVEX_CD8<8, CD8VO>;
4279 defm VPMOVSQB : avx512_trunc_sat<0x22, "vpmovsqb", VR128X, VR512, VK8WM,
4280 i128mem>, EVEX_V512, EVEX_CD8<8, CD8VO>;
4281 defm VPMOVUSQB : avx512_trunc_sat<0x12, "vpmovusqb", VR128X, VR512, VK8WM,
4282 i128mem>, EVEX_V512, EVEX_CD8<8, CD8VO>;
4283 defm VPMOVQW : avx512_trunc_sat<0x34, "vpmovqw", VR128X, VR512, VK8WM,
4284 i128mem>, EVEX_V512, EVEX_CD8<16, CD8VQ>;
4285 defm VPMOVSQW : avx512_trunc_sat<0x24, "vpmovsqw", VR128X, VR512, VK8WM,
4286 i128mem>, EVEX_V512, EVEX_CD8<16, CD8VQ>;
4287 defm VPMOVUSQW : avx512_trunc_sat<0x14, "vpmovusqw", VR128X, VR512, VK8WM,
4288 i128mem>, EVEX_V512, EVEX_CD8<16, CD8VQ>;
4289 defm VPMOVQD : avx512_trunc_sat<0x35, "vpmovqd", VR256X, VR512, VK8WM,
4290 i256mem>, EVEX_V512, EVEX_CD8<32, CD8VH>;
4291 defm VPMOVSQD : avx512_trunc_sat<0x25, "vpmovsqd", VR256X, VR512, VK8WM,
4292 i256mem>, EVEX_V512, EVEX_CD8<32, CD8VH>;
4293 defm VPMOVUSQD : avx512_trunc_sat<0x15, "vpmovusqd", VR256X, VR512, VK8WM,
4294 i256mem>, EVEX_V512, EVEX_CD8<32, CD8VH>;
4295 defm VPMOVDW : avx512_trunc_sat<0x33, "vpmovdw", VR256X, VR512, VK16WM,
4296 i256mem>, EVEX_V512, EVEX_CD8<16, CD8VH>;
4297 defm VPMOVSDW : avx512_trunc_sat<0x23, "vpmovsdw", VR256X, VR512, VK16WM,
4298 i256mem>, EVEX_V512, EVEX_CD8<16, CD8VH>;
4299 defm VPMOVUSDW : avx512_trunc_sat<0x13, "vpmovusdw", VR256X, VR512, VK16WM,
4300 i256mem>, EVEX_V512, EVEX_CD8<16, CD8VH>;
4301 defm VPMOVDB : avx512_trunc_sat<0x31, "vpmovdb", VR128X, VR512, VK16WM,
4302 i128mem>, EVEX_V512, EVEX_CD8<8, CD8VQ>;
4303 defm VPMOVSDB : avx512_trunc_sat<0x21, "vpmovsdb", VR128X, VR512, VK16WM,
4304 i128mem>, EVEX_V512, EVEX_CD8<8, CD8VQ>;
4305 defm VPMOVUSDB : avx512_trunc_sat<0x11, "vpmovusdb", VR128X, VR512, VK16WM,
4306 i128mem>, EVEX_V512, EVEX_CD8<8, CD8VQ>;
4308 def : Pat<(v16i8 (X86vtrunc (v8i64 VR512:$src))), (VPMOVQBrr VR512:$src)>;
4309 def : Pat<(v8i16 (X86vtrunc (v8i64 VR512:$src))), (VPMOVQWrr VR512:$src)>;
4310 def : Pat<(v16i16 (X86vtrunc (v16i32 VR512:$src))), (VPMOVDWrr VR512:$src)>;
4311 def : Pat<(v16i8 (X86vtrunc (v16i32 VR512:$src))), (VPMOVDBrr VR512:$src)>;
4312 def : Pat<(v8i32 (X86vtrunc (v8i64 VR512:$src))), (VPMOVQDrr VR512:$src)>;
4314 def : Pat<(v16i8 (X86vtruncm VK16WM:$mask, (v16i32 VR512:$src))),
4315 (VPMOVDBrrkz VK16WM:$mask, VR512:$src)>;
4316 def : Pat<(v16i16 (X86vtruncm VK16WM:$mask, (v16i32 VR512:$src))),
4317 (VPMOVDWrrkz VK16WM:$mask, VR512:$src)>;
4318 def : Pat<(v8i16 (X86vtruncm VK8WM:$mask, (v8i64 VR512:$src))),
4319 (VPMOVQWrrkz VK8WM:$mask, VR512:$src)>;
4320 def : Pat<(v8i32 (X86vtruncm VK8WM:$mask, (v8i64 VR512:$src))),
4321 (VPMOVQDrrkz VK8WM:$mask, VR512:$src)>;
4324 multiclass avx512_extend<bits<8> opc, string OpcodeStr, RegisterClass KRC,
4325 RegisterClass DstRC, RegisterClass SrcRC, SDNode OpNode,
4326 PatFrag mem_frag, X86MemOperand x86memop,
4327 ValueType OpVT, ValueType InVT> {
4329 def rr : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst),
4331 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
4332 [(set DstRC:$dst, (OpVT (OpNode (InVT SrcRC:$src))))]>, EVEX;
4334 def rrk : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst),
4335 (ins KRC:$mask, SrcRC:$src),
4336 !strconcat(OpcodeStr, " \t{$src, $dst {${mask}} |$dst {${mask}}, $src}"),
4339 def rrkz : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst),
4340 (ins KRC:$mask, SrcRC:$src),
4341 !strconcat(OpcodeStr, " \t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}"),
4344 let mayLoad = 1 in {
4345 def rm : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst),
4346 (ins x86memop:$src),
4347 !strconcat(OpcodeStr," \t{$src, $dst|$dst, $src}"),
4349 (OpVT (OpNode (InVT (bitconvert (mem_frag addr:$src))))))]>,
4352 def rmk : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst),
4353 (ins KRC:$mask, x86memop:$src),
4354 !strconcat(OpcodeStr," \t{$src, $dst {${mask}} |$dst {${mask}}, $src}"),
4358 def rmkz : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst),
4359 (ins KRC:$mask, x86memop:$src),
4360 !strconcat(OpcodeStr," \t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}"),
4366 defm VPMOVZXBDZ: avx512_extend<0x31, "vpmovzxbd", VK16WM, VR512, VR128X, X86vzext,
4367 memopv2i64, i128mem, v16i32, v16i8>, EVEX_V512,
4369 defm VPMOVZXBQZ: avx512_extend<0x32, "vpmovzxbq", VK8WM, VR512, VR128X, X86vzext,
4370 memopv2i64, i128mem, v8i64, v16i8>, EVEX_V512,
4372 defm VPMOVZXWDZ: avx512_extend<0x33, "vpmovzxwd", VK16WM, VR512, VR256X, X86vzext,
4373 memopv4i64, i256mem, v16i32, v16i16>, EVEX_V512,
4374 EVEX_CD8<16, CD8VH>;
4375 defm VPMOVZXWQZ: avx512_extend<0x34, "vpmovzxwq", VK8WM, VR512, VR128X, X86vzext,
4376 memopv2i64, i128mem, v8i64, v8i16>, EVEX_V512,
4377 EVEX_CD8<16, CD8VQ>;
4378 defm VPMOVZXDQZ: avx512_extend<0x35, "vpmovzxdq", VK8WM, VR512, VR256X, X86vzext,
4379 memopv4i64, i256mem, v8i64, v8i32>, EVEX_V512,
4380 EVEX_CD8<32, CD8VH>;
4382 defm VPMOVSXBDZ: avx512_extend<0x21, "vpmovsxbd", VK16WM, VR512, VR128X, X86vsext,
4383 memopv2i64, i128mem, v16i32, v16i8>, EVEX_V512,
4385 defm VPMOVSXBQZ: avx512_extend<0x22, "vpmovsxbq", VK8WM, VR512, VR128X, X86vsext,
4386 memopv2i64, i128mem, v8i64, v16i8>, EVEX_V512,
4388 defm VPMOVSXWDZ: avx512_extend<0x23, "vpmovsxwd", VK16WM, VR512, VR256X, X86vsext,
4389 memopv4i64, i256mem, v16i32, v16i16>, EVEX_V512,
4390 EVEX_CD8<16, CD8VH>;
4391 defm VPMOVSXWQZ: avx512_extend<0x24, "vpmovsxwq", VK8WM, VR512, VR128X, X86vsext,
4392 memopv2i64, i128mem, v8i64, v8i16>, EVEX_V512,
4393 EVEX_CD8<16, CD8VQ>;
4394 defm VPMOVSXDQZ: avx512_extend<0x25, "vpmovsxdq", VK8WM, VR512, VR256X, X86vsext,
4395 memopv4i64, i256mem, v8i64, v8i32>, EVEX_V512,
4396 EVEX_CD8<32, CD8VH>;
4398 //===----------------------------------------------------------------------===//
4399 // GATHER - SCATTER Operations
4401 multiclass avx512_gather<bits<8> opc, string OpcodeStr, RegisterClass KRC,
4402 RegisterClass RC, X86MemOperand memop> {
4404 Constraints = "@earlyclobber $dst, $src1 = $dst, $mask = $mask_wb" in
4405 def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst, KRC:$mask_wb),
4406 (ins RC:$src1, KRC:$mask, memop:$src2),
4407 !strconcat(OpcodeStr,
4408 " \t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"),
4412 let ExeDomain = SSEPackedDouble in {
4413 defm VGATHERDPDZ : avx512_gather<0x92, "vgatherdpd", VK8WM, VR512, vy64xmem>,
4414 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
4415 defm VGATHERQPDZ : avx512_gather<0x93, "vgatherqpd", VK8WM, VR512, vz64mem>,
4416 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
4419 let ExeDomain = SSEPackedSingle in {
4420 defm VGATHERDPSZ : avx512_gather<0x92, "vgatherdps", VK16WM, VR512, vz32mem>,
4421 EVEX_V512, EVEX_CD8<32, CD8VT1>;
4422 defm VGATHERQPSZ : avx512_gather<0x93, "vgatherqps", VK8WM, VR256X, vz64mem>,
4423 EVEX_V512, EVEX_CD8<32, CD8VT1>;
4426 defm VPGATHERDQZ : avx512_gather<0x90, "vpgatherdq", VK8WM, VR512, vy64xmem>,
4427 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
4428 defm VPGATHERDDZ : avx512_gather<0x90, "vpgatherdd", VK16WM, VR512, vz32mem>,
4429 EVEX_V512, EVEX_CD8<32, CD8VT1>;
4431 defm VPGATHERQQZ : avx512_gather<0x91, "vpgatherqq", VK8WM, VR512, vz64mem>,
4432 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
4433 defm VPGATHERQDZ : avx512_gather<0x91, "vpgatherqd", VK8WM, VR256X, vz64mem>,
4434 EVEX_V512, EVEX_CD8<32, CD8VT1>;
4436 multiclass avx512_scatter<bits<8> opc, string OpcodeStr, RegisterClass KRC,
4437 RegisterClass RC, X86MemOperand memop> {
4438 let mayStore = 1, Constraints = "$mask = $mask_wb" in
4439 def mr : AVX5128I<opc, MRMDestMem, (outs KRC:$mask_wb),
4440 (ins memop:$dst, KRC:$mask, RC:$src2),
4441 !strconcat(OpcodeStr,
4442 " \t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"),
4446 let ExeDomain = SSEPackedDouble in {
4447 defm VSCATTERDPDZ : avx512_scatter<0xA2, "vscatterdpd", VK8WM, VR512, vy64xmem>,
4448 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
4449 defm VSCATTERQPDZ : avx512_scatter<0xA3, "vscatterqpd", VK8WM, VR512, vz64mem>,
4450 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
4453 let ExeDomain = SSEPackedSingle in {
4454 defm VSCATTERDPSZ : avx512_scatter<0xA2, "vscatterdps", VK16WM, VR512, vz32mem>,
4455 EVEX_V512, EVEX_CD8<32, CD8VT1>;
4456 defm VSCATTERQPSZ : avx512_scatter<0xA3, "vscatterqps", VK8WM, VR256X, vz64mem>,
4457 EVEX_V512, EVEX_CD8<32, CD8VT1>;
4460 defm VPSCATTERDQZ : avx512_scatter<0xA0, "vpscatterdq", VK8WM, VR512, vy64xmem>,
4461 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
4462 defm VPSCATTERDDZ : avx512_scatter<0xA0, "vpscatterdd", VK16WM, VR512, vz32mem>,
4463 EVEX_V512, EVEX_CD8<32, CD8VT1>;
4465 defm VPSCATTERQQZ : avx512_scatter<0xA1, "vpscatterqq", VK8WM, VR512, vz64mem>,
4466 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
4467 defm VPSCATTERQDZ : avx512_scatter<0xA1, "vpscatterqd", VK8WM, VR256X, vz64mem>,
4468 EVEX_V512, EVEX_CD8<32, CD8VT1>;
4471 multiclass avx512_gather_scatter_prefetch<bits<8> opc, Format F, string OpcodeStr,
4472 RegisterClass KRC, X86MemOperand memop> {
4473 let Predicates = [HasPFI], hasSideEffects = 1 in
4474 def m : AVX5128I<opc, F, (outs), (ins KRC:$mask, memop:$src),
4475 !strconcat(OpcodeStr, " \t{$src {${mask}}|{${mask}}, $src}"),
4479 defm VGATHERPF0DPS: avx512_gather_scatter_prefetch<0xC6, MRM1m, "vgatherpf0dps",
4480 VK16WM, vz32mem>, EVEX_V512, EVEX_CD8<32, CD8VT1>;
4482 defm VGATHERPF0QPS: avx512_gather_scatter_prefetch<0xC7, MRM1m, "vgatherpf0qps",
4483 VK8WM, vz64mem>, EVEX_V512, EVEX_CD8<64, CD8VT1>;
4485 defm VGATHERPF0DPD: avx512_gather_scatter_prefetch<0xC6, MRM1m, "vgatherpf0dpd",
4486 VK8WM, vy32mem>, EVEX_V512, VEX_W, EVEX_CD8<32, CD8VT1>;
4488 defm VGATHERPF0QPD: avx512_gather_scatter_prefetch<0xC7, MRM1m, "vgatherpf0qpd",
4489 VK8WM, vz64mem>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
4491 defm VGATHERPF1DPS: avx512_gather_scatter_prefetch<0xC6, MRM2m, "vgatherpf1dps",
4492 VK16WM, vz32mem>, EVEX_V512, EVEX_CD8<32, CD8VT1>;
4494 defm VGATHERPF1QPS: avx512_gather_scatter_prefetch<0xC7, MRM2m, "vgatherpf1qps",
4495 VK8WM, vz64mem>, EVEX_V512, EVEX_CD8<64, CD8VT1>;
4497 defm VGATHERPF1DPD: avx512_gather_scatter_prefetch<0xC6, MRM2m, "vgatherpf1dpd",
4498 VK8WM, vy32mem>, EVEX_V512, VEX_W, EVEX_CD8<32, CD8VT1>;
4500 defm VGATHERPF1QPD: avx512_gather_scatter_prefetch<0xC7, MRM2m, "vgatherpf1qpd",
4501 VK8WM, vz64mem>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
4503 defm VSCATTERPF0DPS: avx512_gather_scatter_prefetch<0xC6, MRM5m, "vscatterpf0dps",
4504 VK16WM, vz32mem>, EVEX_V512, EVEX_CD8<32, CD8VT1>;
4506 defm VSCATTERPF0QPS: avx512_gather_scatter_prefetch<0xC7, MRM5m, "vscatterpf0qps",
4507 VK8WM, vz64mem>, EVEX_V512, EVEX_CD8<64, CD8VT1>;
4509 defm VSCATTERPF0DPD: avx512_gather_scatter_prefetch<0xC6, MRM5m, "vscatterpf0dpd",
4510 VK8WM, vy32mem>, EVEX_V512, VEX_W, EVEX_CD8<32, CD8VT1>;
4512 defm VSCATTERPF0QPD: avx512_gather_scatter_prefetch<0xC7, MRM5m, "vscatterpf0qpd",
4513 VK8WM, vz64mem>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
4515 defm VSCATTERPF1DPS: avx512_gather_scatter_prefetch<0xC6, MRM6m, "vscatterpf1dps",
4516 VK16WM, vz32mem>, EVEX_V512, EVEX_CD8<32, CD8VT1>;
4518 defm VSCATTERPF1QPS: avx512_gather_scatter_prefetch<0xC7, MRM6m, "vscatterpf1qps",
4519 VK8WM, vz64mem>, EVEX_V512, EVEX_CD8<64, CD8VT1>;
4521 defm VSCATTERPF1DPD: avx512_gather_scatter_prefetch<0xC6, MRM6m, "vscatterpf1dpd",
4522 VK8WM, vy32mem>, EVEX_V512, VEX_W, EVEX_CD8<32, CD8VT1>;
4524 defm VSCATTERPF1QPD: avx512_gather_scatter_prefetch<0xC7, MRM6m, "vscatterpf1qpd",
4525 VK8WM, vz64mem>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
4526 //===----------------------------------------------------------------------===//
4527 // VSHUFPS - VSHUFPD Operations
4529 multiclass avx512_shufp<RegisterClass RC, X86MemOperand x86memop,
4530 ValueType vt, string OpcodeStr, PatFrag mem_frag,
4532 def rmi : AVX512PIi8<0xC6, MRMSrcMem, (outs RC:$dst),
4533 (ins RC:$src1, x86memop:$src2, i8imm:$src3),
4534 !strconcat(OpcodeStr,
4535 " \t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4536 [(set RC:$dst, (vt (X86Shufp RC:$src1, (mem_frag addr:$src2),
4537 (i8 imm:$src3))))], d, IIC_SSE_SHUFP>,
4538 EVEX_4V, Sched<[WriteShuffleLd, ReadAfterLd]>;
4539 def rri : AVX512PIi8<0xC6, MRMSrcReg, (outs RC:$dst),
4540 (ins RC:$src1, RC:$src2, i8imm:$src3),
4541 !strconcat(OpcodeStr,
4542 " \t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4543 [(set RC:$dst, (vt (X86Shufp RC:$src1, RC:$src2,
4544 (i8 imm:$src3))))], d, IIC_SSE_SHUFP>,
4545 EVEX_4V, Sched<[WriteShuffle]>;
4548 defm VSHUFPSZ : avx512_shufp<VR512, f512mem, v16f32, "vshufps", memopv16f32,
4549 SSEPackedSingle>, PS, EVEX_V512, EVEX_CD8<32, CD8VF>;
4550 defm VSHUFPDZ : avx512_shufp<VR512, f512mem, v8f64, "vshufpd", memopv8f64,
4551 SSEPackedDouble>, PD, VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
4553 def : Pat<(v16i32 (X86Shufp VR512:$src1, VR512:$src2, (i8 imm:$imm))),
4554 (VSHUFPSZrri VR512:$src1, VR512:$src2, imm:$imm)>;
4555 def : Pat<(v16i32 (X86Shufp VR512:$src1,
4556 (memopv16i32 addr:$src2), (i8 imm:$imm))),
4557 (VSHUFPSZrmi VR512:$src1, addr:$src2, imm:$imm)>;
4559 def : Pat<(v8i64 (X86Shufp VR512:$src1, VR512:$src2, (i8 imm:$imm))),
4560 (VSHUFPDZrri VR512:$src1, VR512:$src2, imm:$imm)>;
4561 def : Pat<(v8i64 (X86Shufp VR512:$src1,
4562 (memopv8i64 addr:$src2), (i8 imm:$imm))),
4563 (VSHUFPDZrmi VR512:$src1, addr:$src2, imm:$imm)>;
4565 multiclass avx512_valign<string Suffix, RegisterClass RC, RegisterClass KRC,
4566 RegisterClass MRC, X86MemOperand x86memop,
4567 ValueType IntVT, ValueType FloatVT> {
4568 defm rri : AVX512_masking<0x03, MRMSrcReg, (outs RC:$dst),
4569 (ins RC:$src1, RC:$src2, i8imm:$src3),
4571 "$src3, $src2, $src1", "$src1, $src2, $src3",
4572 (IntVT (X86VAlign RC:$src2, RC:$src1,
4575 AVX512AIi8Base, EVEX_4V;
4577 // Also match valign of packed floats.
4578 def : Pat<(FloatVT (X86VAlign RC:$src1, RC:$src2, (i8 imm:$imm))),
4579 (!cast<Instruction>(NAME##rri) RC:$src2, RC:$src1, imm:$imm)>;
4582 def rmi : AVX512AIi8<0x03, MRMSrcMem, (outs RC:$dst),
4583 (ins RC:$src1, x86memop:$src2, i8imm:$src3),
4584 !strconcat("valign"##Suffix,
4585 " \t{$src3, $src2, $src1, $dst|"
4586 "$dst, $src1, $src2, $src3}"),
4589 defm VALIGND : avx512_valign<"d", VR512, VK16WM, GR16, i512mem, v16i32, v16f32>,
4590 EVEX_V512, EVEX_CD8<32, CD8VF>;
4591 defm VALIGNQ : avx512_valign<"q", VR512, VK8WM, GR8, i512mem, v8i64, v8f64>,
4592 VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
4594 // Helper fragments to match sext vXi1 to vXiY.
4595 def v16i1sextv16i32 : PatLeaf<(v16i32 (X86vsrai VR512:$src, (i8 31)))>;
4596 def v8i1sextv8i64 : PatLeaf<(v8i64 (X86vsrai VR512:$src, (i8 63)))>;
4598 multiclass avx512_vpabs<bits<8> opc, string OpcodeStr, ValueType OpVT,
4599 RegisterClass KRC, RegisterClass RC,
4600 X86MemOperand x86memop, X86MemOperand x86scalar_mop,
4602 def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
4603 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
4605 def rrk : AVX5128I<opc, MRMSrcReg, (outs RC:$dst), (ins KRC:$mask, RC:$src),
4606 !strconcat(OpcodeStr, " \t{$src, $dst {${mask}}|$dst {${mask}}, $src}"),
4608 def rrkz : AVX5128I<opc, MRMSrcReg, (outs RC:$dst), (ins KRC:$mask, RC:$src),
4609 !strconcat(OpcodeStr,
4610 " \t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}"),
4612 let mayLoad = 1 in {
4613 def rm : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst),
4614 (ins x86memop:$src),
4615 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
4617 def rmk : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst),
4618 (ins KRC:$mask, x86memop:$src),
4619 !strconcat(OpcodeStr,
4620 " \t{$src, $dst {${mask}}|$dst {${mask}}, $src}"),
4622 def rmkz : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst),
4623 (ins KRC:$mask, x86memop:$src),
4624 !strconcat(OpcodeStr,
4625 " \t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}"),
4627 def rmb : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst),
4628 (ins x86scalar_mop:$src),
4629 !strconcat(OpcodeStr, " \t{${src}", BrdcstStr,
4630 ", $dst|$dst, ${src}", BrdcstStr, "}"),
4632 def rmbk : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst),
4633 (ins KRC:$mask, x86scalar_mop:$src),
4634 !strconcat(OpcodeStr, " \t{${src}", BrdcstStr,
4635 ", $dst {${mask}}|$dst {${mask}}, ${src}", BrdcstStr, "}"),
4636 []>, EVEX, EVEX_B, EVEX_K;
4637 def rmbkz : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst),
4638 (ins KRC:$mask, x86scalar_mop:$src),
4639 !strconcat(OpcodeStr, " \t{${src}", BrdcstStr,
4640 ", $dst {${mask}} {z}|$dst {${mask}} {z}, ${src}",
4642 []>, EVEX, EVEX_B, EVEX_KZ;
4646 defm VPABSDZ : avx512_vpabs<0x1E, "vpabsd", v16i32, VK16WM, VR512,
4647 i512mem, i32mem, "{1to16}">, EVEX_V512,
4648 EVEX_CD8<32, CD8VF>;
4649 defm VPABSQZ : avx512_vpabs<0x1F, "vpabsq", v8i64, VK8WM, VR512,
4650 i512mem, i64mem, "{1to8}">, EVEX_V512, VEX_W,
4651 EVEX_CD8<64, CD8VF>;
4654 (bc_v16i32 (v16i1sextv16i32)),
4655 (bc_v16i32 (add (v16i32 VR512:$src), (v16i1sextv16i32)))),
4656 (VPABSDZrr VR512:$src)>;
4658 (bc_v8i64 (v8i1sextv8i64)),
4659 (bc_v8i64 (add (v8i64 VR512:$src), (v8i1sextv8i64)))),
4660 (VPABSQZrr VR512:$src)>;
4662 def : Pat<(v16i32 (int_x86_avx512_mask_pabs_d_512 (v16i32 VR512:$src),
4663 (v16i32 immAllZerosV), (i16 -1))),
4664 (VPABSDZrr VR512:$src)>;
4665 def : Pat<(v8i64 (int_x86_avx512_mask_pabs_q_512 (v8i64 VR512:$src),
4666 (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
4667 (VPABSQZrr VR512:$src)>;
4669 multiclass avx512_conflict<bits<8> opc, string OpcodeStr,
4670 RegisterClass RC, RegisterClass KRC,
4671 X86MemOperand x86memop,
4672 X86MemOperand x86scalar_mop, string BrdcstStr> {
4673 def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
4675 !strconcat(OpcodeStr, " \t{$src, ${dst} |${dst}, $src}"),
4677 def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
4678 (ins x86memop:$src),
4679 !strconcat(OpcodeStr, " \t{$src, ${dst}|${dst}, $src}"),
4681 def rmb : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
4682 (ins x86scalar_mop:$src),
4683 !strconcat(OpcodeStr, " \t{${src}", BrdcstStr,
4684 ", ${dst}|${dst}, ${src}", BrdcstStr, "}"),
4686 def rrkz : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
4687 (ins KRC:$mask, RC:$src),
4688 !strconcat(OpcodeStr,
4689 " \t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
4691 def rmkz : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
4692 (ins KRC:$mask, x86memop:$src),
4693 !strconcat(OpcodeStr,
4694 " \t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
4696 def rmbkz : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
4697 (ins KRC:$mask, x86scalar_mop:$src),
4698 !strconcat(OpcodeStr, " \t{${src}", BrdcstStr,
4699 ", ${dst} {${mask}} {z}|${dst} {${mask}} {z}, ${src}",
4701 []>, EVEX, EVEX_KZ, EVEX_B;
4703 let Constraints = "$src1 = $dst" in {
4704 def rrk : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
4705 (ins RC:$src1, KRC:$mask, RC:$src2),
4706 !strconcat(OpcodeStr,
4707 " \t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"),
4709 def rmk : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
4710 (ins RC:$src1, KRC:$mask, x86memop:$src2),
4711 !strconcat(OpcodeStr,
4712 " \t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"),
4714 def rmbk : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
4715 (ins RC:$src1, KRC:$mask, x86scalar_mop:$src2),
4716 !strconcat(OpcodeStr, " \t{${src2}", BrdcstStr,
4717 ", ${dst} {${mask}}|${dst} {${mask}}, ${src2}", BrdcstStr, "}"),
4718 []>, EVEX, EVEX_K, EVEX_B;
4722 let Predicates = [HasCDI] in {
4723 defm VPCONFLICTD : avx512_conflict<0xC4, "vpconflictd", VR512, VK16WM,
4724 i512mem, i32mem, "{1to16}">,
4725 EVEX_V512, EVEX_CD8<32, CD8VF>;
4728 defm VPCONFLICTQ : avx512_conflict<0xC4, "vpconflictq", VR512, VK8WM,
4729 i512mem, i64mem, "{1to8}">,
4730 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
4734 def : Pat<(int_x86_avx512_mask_conflict_d_512 VR512:$src2, VR512:$src1,
4736 (VPCONFLICTDrrk VR512:$src1,
4737 (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)), VR512:$src2)>;
4739 def : Pat<(int_x86_avx512_mask_conflict_q_512 VR512:$src2, VR512:$src1,
4741 (VPCONFLICTQrrk VR512:$src1,
4742 (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)), VR512:$src2)>;
4744 let Predicates = [HasCDI] in {
4745 defm VPLZCNTD : avx512_conflict<0x44, "vplzcntd", VR512, VK16WM,
4746 i512mem, i32mem, "{1to16}">,
4747 EVEX_V512, EVEX_CD8<32, CD8VF>;
4750 defm VPLZCNTQ : avx512_conflict<0x44, "vplzcntq", VR512, VK8WM,
4751 i512mem, i64mem, "{1to8}">,
4752 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
4756 def : Pat<(int_x86_avx512_mask_lzcnt_d_512 VR512:$src2, VR512:$src1,
4758 (VPLZCNTDrrk VR512:$src1,
4759 (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)), VR512:$src2)>;
4761 def : Pat<(int_x86_avx512_mask_lzcnt_q_512 VR512:$src2, VR512:$src1,
4763 (VPLZCNTQrrk VR512:$src1,
4764 (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)), VR512:$src2)>;
4766 def : Pat<(v16i32 (ctlz (memopv16i32 addr:$src))),
4767 (VPLZCNTDrm addr:$src)>;
4768 def : Pat<(v16i32 (ctlz (v16i32 VR512:$src))),
4769 (VPLZCNTDrr VR512:$src)>;
4770 def : Pat<(v8i64 (ctlz (memopv8i64 addr:$src))),
4771 (VPLZCNTQrm addr:$src)>;
4772 def : Pat<(v8i64 (ctlz (v8i64 VR512:$src))),
4773 (VPLZCNTQrr VR512:$src)>;
4775 def : Pat<(store (i1 -1), addr:$dst), (MOV8mi addr:$dst, (i8 1))>;
4776 def : Pat<(store (i1 1), addr:$dst), (MOV8mi addr:$dst, (i8 1))>;
4777 def : Pat<(store (i1 0), addr:$dst), (MOV8mi addr:$dst, (i8 0))>;
4779 def : Pat<(store VK1:$src, addr:$dst),
4780 (KMOVWmk addr:$dst, (COPY_TO_REGCLASS VK1:$src, VK16))>;
4782 def truncstorei1 : PatFrag<(ops node:$val, node:$ptr),
4783 (truncstore node:$val, node:$ptr), [{
4784 return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i1;
4787 def : Pat<(truncstorei1 GR8:$src, addr:$dst),
4788 (MOV8mr addr:$dst, GR8:$src)>;