1 // Group template arguments that can be derived from the vector type (EltNum x
2 // EltVT). These are things like the register class for the writemask, etc.
3 // The idea is to pass one of these as the template argument rather than the
4 // individual arguments.
5 class X86VectorVTInfo<int numelts, ValueType EltVT, RegisterClass rc,
10 // Corresponding mask register class.
11 RegisterClass KRC = !cast<RegisterClass>("VK" # NumElts);
13 // Corresponding write-mask register class.
14 RegisterClass KRCWM = !cast<RegisterClass>("VK" # NumElts # "WM");
16 // The GPR register class that can hold the write mask. Use GR8 for fewer
17 // than 8 elements. Use shift-right and equal to work around the lack of
20 !cast<RegisterClass>("GR" #
21 !if (!eq (!srl(NumElts, 3), 0), 8, NumElts));
23 // Suffix used in the instruction mnemonic.
24 string Suffix = suffix;
26 string VTName = "v" # NumElts # EltVT;
29 ValueType VT = !cast<ValueType>(VTName);
31 string EltTypeName = !cast<string>(EltVT);
32 // Size of the element type in bits, e.g. 32 for v16i32.
33 string EltSizeName = !subst("i", "", !subst("f", "", EltTypeName));
34 int EltSize = EltVT.Size;
36 // "i" for integer types and "f" for floating-point types
37 string TypeVariantName = !subst(EltSizeName, "", EltTypeName);
39 // Size of RC in bits, e.g. 512 for VR512.
42 // The corresponding memory operand, e.g. i512mem for VR512.
43 X86MemOperand MemOp = !cast<X86MemOperand>(TypeVariantName # Size # "mem");
44 X86MemOperand ScalarMemOp = !cast<X86MemOperand>(EltVT # "mem");
47 // Note: For 128/256-bit integer VT we choose loadv2i64/loadv4i64
48 // due to load promotion during legalization
49 PatFrag LdFrag = !cast<PatFrag>("load" #
50 !if (!eq (TypeVariantName, "i"),
51 !if (!eq (Size, 128), "v2i64",
52 !if (!eq (Size, 256), "v4i64",
54 PatFrag ScalarLdFrag = !cast<PatFrag>("load" # EltVT);
56 // Load patterns used for memory operands. We only have this defined in
57 // case of i64 element types for sub-512 integer vectors. For now, keep
58 // MemOpFrag undefined in these cases.
60 !if (!eq (TypeVariantName, "f"), !cast<PatFrag>("memop" # VTName),
61 !if (!eq (EltTypeName, "i64"), !cast<PatFrag>("memop" # VTName),
62 !if (!eq (VTName, "v16i32"), !cast<PatFrag>("memop" # VTName), ?)));
64 // The corresponding float type, e.g. v16f32 for v16i32
65 // Note: For EltSize < 32, FloatVT is illegal and TableGen
66 // fails to compile, so we choose FloatVT = VT
67 ValueType FloatVT = !cast<ValueType>(
68 !if (!eq (!srl(EltSize,5),0),
70 !if (!eq(TypeVariantName, "i"),
71 "v" # NumElts # "f" # EltSize,
74 // The string to specify embedded broadcast in assembly.
75 string BroadcastStr = "{1to" # NumElts # "}";
77 // 8-bit compressed displacement tuple/subvector format. This is only
78 // defined for NumElts <= 8.
79 CD8VForm CD8TupleForm = !if (!eq (!srl(NumElts, 4), 0),
80 !cast<CD8VForm>("CD8VT" # NumElts), ?);
82 SubRegIndex SubRegIdx = !if (!eq (Size, 128), sub_xmm,
83 !if (!eq (Size, 256), sub_ymm, ?));
85 Domain ExeDomain = !if (!eq (EltTypeName, "f32"), SSEPackedSingle,
86 !if (!eq (EltTypeName, "f64"), SSEPackedDouble,
89 // A vector type of the same width with element type i32. This is used to
90 // create the canonical constant zero node ImmAllZerosV.
91 ValueType i32VT = !cast<ValueType>("v" # !srl(Size, 5) # "i32");
92 dag ImmAllZerosV = (VT (bitconvert (i32VT immAllZerosV)));
95 def v64i8_info : X86VectorVTInfo<64, i8, VR512, "b">;
96 def v32i16_info : X86VectorVTInfo<32, i16, VR512, "w">;
97 def v16i32_info : X86VectorVTInfo<16, i32, VR512, "d">;
98 def v8i64_info : X86VectorVTInfo<8, i64, VR512, "q">;
99 def v16f32_info : X86VectorVTInfo<16, f32, VR512, "ps">;
100 def v8f64_info : X86VectorVTInfo<8, f64, VR512, "pd">;
102 // "x" in v32i8x_info means RC = VR256X
103 def v32i8x_info : X86VectorVTInfo<32, i8, VR256X, "b">;
104 def v16i16x_info : X86VectorVTInfo<16, i16, VR256X, "w">;
105 def v8i32x_info : X86VectorVTInfo<8, i32, VR256X, "d">;
106 def v4i64x_info : X86VectorVTInfo<4, i64, VR256X, "q">;
108 def v16i8x_info : X86VectorVTInfo<16, i8, VR128X, "b">;
109 def v8i16x_info : X86VectorVTInfo<8, i16, VR128X, "w">;
110 def v4i32x_info : X86VectorVTInfo<4, i32, VR128X, "d">;
111 def v2i64x_info : X86VectorVTInfo<2, i64, VR128X, "q">;
113 class AVX512VLVectorVTInfo<X86VectorVTInfo i512, X86VectorVTInfo i256,
114 X86VectorVTInfo i128> {
115 X86VectorVTInfo info512 = i512;
116 X86VectorVTInfo info256 = i256;
117 X86VectorVTInfo info128 = i128;
120 def avx512vl_i8_info : AVX512VLVectorVTInfo<v64i8_info, v32i8x_info,
122 def avx512vl_i16_info : AVX512VLVectorVTInfo<v32i16_info, v16i16x_info,
124 def avx512vl_i32_info : AVX512VLVectorVTInfo<v16i32_info, v8i32x_info,
126 def avx512vl_i64_info : AVX512VLVectorVTInfo<v8i64_info, v4i64x_info,
129 // This multiclass generates the masking variants from the non-masking
130 // variant. It only provides the assembly pieces for the masking variants.
131 // It assumes custom ISel patterns for masking which can be provided as
132 // template arguments.
133 multiclass AVX512_maskable_custom<bits<8> O, Format F,
135 dag Ins, dag MaskingIns, dag ZeroMaskingIns,
137 string AttSrcAsm, string IntelSrcAsm,
139 list<dag> MaskingPattern,
140 list<dag> ZeroMaskingPattern,
141 string MaskingConstraint = "",
142 InstrItinClass itin = NoItinerary,
143 bit IsCommutable = 0> {
144 let isCommutable = IsCommutable in
145 def NAME: AVX512<O, F, Outs, Ins,
146 OpcodeStr#"\t{"#AttSrcAsm#", $dst|"#
147 "$dst, "#IntelSrcAsm#"}",
150 // Prefer over VMOV*rrk Pat<>
151 let AddedComplexity = 20 in
152 def NAME#k: AVX512<O, F, Outs, MaskingIns,
153 OpcodeStr#"\t{"#AttSrcAsm#", $dst {${mask}}|"#
154 "$dst {${mask}}, "#IntelSrcAsm#"}",
155 MaskingPattern, itin>,
157 // In case of the 3src subclass this is overridden with a let.
158 string Constraints = MaskingConstraint;
160 let AddedComplexity = 30 in // Prefer over VMOV*rrkz Pat<>
161 def NAME#kz: AVX512<O, F, Outs, ZeroMaskingIns,
162 OpcodeStr#"\t{"#AttSrcAsm#", $dst {${mask}} {z}|"#
163 "$dst {${mask}} {z}, "#IntelSrcAsm#"}",
170 // Common base class of AVX512_maskable and AVX512_maskable_3src.
171 multiclass AVX512_maskable_common<bits<8> O, Format F, X86VectorVTInfo _,
173 dag Ins, dag MaskingIns, dag ZeroMaskingIns,
175 string AttSrcAsm, string IntelSrcAsm,
176 dag RHS, dag MaskingRHS,
177 string MaskingConstraint = "",
178 InstrItinClass itin = NoItinerary,
179 bit IsCommutable = 0> :
180 AVX512_maskable_custom<O, F, Outs, Ins, MaskingIns, ZeroMaskingIns, OpcodeStr,
181 AttSrcAsm, IntelSrcAsm,
182 [(set _.RC:$dst, RHS)],
183 [(set _.RC:$dst, MaskingRHS)],
185 (vselect _.KRCWM:$mask, RHS, _.ImmAllZerosV))],
186 MaskingConstraint, NoItinerary, IsCommutable>;
188 // This multiclass generates the unconditional/non-masking, the masking and
189 // the zero-masking variant of the instruction. In the masking case, the
190 // perserved vector elements come from a new dummy input operand tied to $dst.
191 multiclass AVX512_maskable<bits<8> O, Format F, X86VectorVTInfo _,
192 dag Outs, dag Ins, string OpcodeStr,
193 string AttSrcAsm, string IntelSrcAsm,
194 dag RHS, InstrItinClass itin = NoItinerary,
195 bit IsCommutable = 0> :
196 AVX512_maskable_common<O, F, _, Outs, Ins,
197 !con((ins _.RC:$src0, _.KRCWM:$mask), Ins),
198 !con((ins _.KRCWM:$mask), Ins),
199 OpcodeStr, AttSrcAsm, IntelSrcAsm, RHS,
200 (vselect _.KRCWM:$mask, RHS, _.RC:$src0),
201 "$src0 = $dst", itin, IsCommutable>;
203 // Similar to AVX512_maskable but in this case one of the source operands
204 // ($src1) is already tied to $dst so we just use that for the preserved
205 // vector elements. NOTE that the NonTiedIns (the ins dag) should exclude
207 multiclass AVX512_maskable_3src<bits<8> O, Format F, X86VectorVTInfo _,
208 dag Outs, dag NonTiedIns, string OpcodeStr,
209 string AttSrcAsm, string IntelSrcAsm,
211 AVX512_maskable_common<O, F, _, Outs,
212 !con((ins _.RC:$src1), NonTiedIns),
213 !con((ins _.RC:$src1, _.KRCWM:$mask), NonTiedIns),
214 !con((ins _.RC:$src1, _.KRCWM:$mask), NonTiedIns),
215 OpcodeStr, AttSrcAsm, IntelSrcAsm, RHS,
216 (vselect _.KRCWM:$mask, RHS, _.RC:$src1)>;
219 multiclass AVX512_maskable_in_asm<bits<8> O, Format F, X86VectorVTInfo _,
222 string AttSrcAsm, string IntelSrcAsm,
224 AVX512_maskable_custom<O, F, Outs, Ins,
225 !con((ins _.RC:$src0, _.KRCWM:$mask), Ins),
226 !con((ins _.KRCWM:$mask), Ins),
227 OpcodeStr, AttSrcAsm, IntelSrcAsm, Pattern, [], [],
230 // Bitcasts between 512-bit vector types. Return the original type since
231 // no instruction is needed for the conversion
232 let Predicates = [HasAVX512] in {
233 def : Pat<(v8f64 (bitconvert (v8i64 VR512:$src))), (v8f64 VR512:$src)>;
234 def : Pat<(v8f64 (bitconvert (v16i32 VR512:$src))), (v8f64 VR512:$src)>;
235 def : Pat<(v8f64 (bitconvert (v32i16 VR512:$src))), (v8f64 VR512:$src)>;
236 def : Pat<(v8f64 (bitconvert (v64i8 VR512:$src))), (v8f64 VR512:$src)>;
237 def : Pat<(v8f64 (bitconvert (v16f32 VR512:$src))), (v8f64 VR512:$src)>;
238 def : Pat<(v16f32 (bitconvert (v8i64 VR512:$src))), (v16f32 VR512:$src)>;
239 def : Pat<(v16f32 (bitconvert (v16i32 VR512:$src))), (v16f32 VR512:$src)>;
240 def : Pat<(v16f32 (bitconvert (v32i16 VR512:$src))), (v16f32 VR512:$src)>;
241 def : Pat<(v16f32 (bitconvert (v64i8 VR512:$src))), (v16f32 VR512:$src)>;
242 def : Pat<(v16f32 (bitconvert (v8f64 VR512:$src))), (v16f32 VR512:$src)>;
243 def : Pat<(v8i64 (bitconvert (v16i32 VR512:$src))), (v8i64 VR512:$src)>;
244 def : Pat<(v8i64 (bitconvert (v32i16 VR512:$src))), (v8i64 VR512:$src)>;
245 def : Pat<(v8i64 (bitconvert (v64i8 VR512:$src))), (v8i64 VR512:$src)>;
246 def : Pat<(v8i64 (bitconvert (v8f64 VR512:$src))), (v8i64 VR512:$src)>;
247 def : Pat<(v8i64 (bitconvert (v16f32 VR512:$src))), (v8i64 VR512:$src)>;
248 def : Pat<(v16i32 (bitconvert (v8i64 VR512:$src))), (v16i32 VR512:$src)>;
249 def : Pat<(v16i32 (bitconvert (v16f32 VR512:$src))), (v16i32 VR512:$src)>;
250 def : Pat<(v16i32 (bitconvert (v32i16 VR512:$src))), (v16i32 VR512:$src)>;
251 def : Pat<(v16i32 (bitconvert (v64i8 VR512:$src))), (v16i32 VR512:$src)>;
252 def : Pat<(v16i32 (bitconvert (v8f64 VR512:$src))), (v16i32 VR512:$src)>;
253 def : Pat<(v32i16 (bitconvert (v8i64 VR512:$src))), (v32i16 VR512:$src)>;
254 def : Pat<(v32i16 (bitconvert (v16i32 VR512:$src))), (v32i16 VR512:$src)>;
255 def : Pat<(v32i16 (bitconvert (v64i8 VR512:$src))), (v32i16 VR512:$src)>;
256 def : Pat<(v32i16 (bitconvert (v8f64 VR512:$src))), (v32i16 VR512:$src)>;
257 def : Pat<(v32i16 (bitconvert (v16f32 VR512:$src))), (v32i16 VR512:$src)>;
258 def : Pat<(v32i16 (bitconvert (v16f32 VR512:$src))), (v32i16 VR512:$src)>;
259 def : Pat<(v64i8 (bitconvert (v8i64 VR512:$src))), (v64i8 VR512:$src)>;
260 def : Pat<(v64i8 (bitconvert (v16i32 VR512:$src))), (v64i8 VR512:$src)>;
261 def : Pat<(v64i8 (bitconvert (v32i16 VR512:$src))), (v64i8 VR512:$src)>;
262 def : Pat<(v64i8 (bitconvert (v8f64 VR512:$src))), (v64i8 VR512:$src)>;
263 def : Pat<(v64i8 (bitconvert (v16f32 VR512:$src))), (v64i8 VR512:$src)>;
265 def : Pat<(v2i64 (bitconvert (v4i32 VR128X:$src))), (v2i64 VR128X:$src)>;
266 def : Pat<(v2i64 (bitconvert (v8i16 VR128X:$src))), (v2i64 VR128X:$src)>;
267 def : Pat<(v2i64 (bitconvert (v16i8 VR128X:$src))), (v2i64 VR128X:$src)>;
268 def : Pat<(v2i64 (bitconvert (v2f64 VR128X:$src))), (v2i64 VR128X:$src)>;
269 def : Pat<(v2i64 (bitconvert (v4f32 VR128X:$src))), (v2i64 VR128X:$src)>;
270 def : Pat<(v4i32 (bitconvert (v2i64 VR128X:$src))), (v4i32 VR128X:$src)>;
271 def : Pat<(v4i32 (bitconvert (v8i16 VR128X:$src))), (v4i32 VR128X:$src)>;
272 def : Pat<(v4i32 (bitconvert (v16i8 VR128X:$src))), (v4i32 VR128X:$src)>;
273 def : Pat<(v4i32 (bitconvert (v2f64 VR128X:$src))), (v4i32 VR128X:$src)>;
274 def : Pat<(v4i32 (bitconvert (v4f32 VR128X:$src))), (v4i32 VR128X:$src)>;
275 def : Pat<(v8i16 (bitconvert (v2i64 VR128X:$src))), (v8i16 VR128X:$src)>;
276 def : Pat<(v8i16 (bitconvert (v4i32 VR128X:$src))), (v8i16 VR128X:$src)>;
277 def : Pat<(v8i16 (bitconvert (v16i8 VR128X:$src))), (v8i16 VR128X:$src)>;
278 def : Pat<(v8i16 (bitconvert (v2f64 VR128X:$src))), (v8i16 VR128X:$src)>;
279 def : Pat<(v8i16 (bitconvert (v4f32 VR128X:$src))), (v8i16 VR128X:$src)>;
280 def : Pat<(v16i8 (bitconvert (v2i64 VR128X:$src))), (v16i8 VR128X:$src)>;
281 def : Pat<(v16i8 (bitconvert (v4i32 VR128X:$src))), (v16i8 VR128X:$src)>;
282 def : Pat<(v16i8 (bitconvert (v8i16 VR128X:$src))), (v16i8 VR128X:$src)>;
283 def : Pat<(v16i8 (bitconvert (v2f64 VR128X:$src))), (v16i8 VR128X:$src)>;
284 def : Pat<(v16i8 (bitconvert (v4f32 VR128X:$src))), (v16i8 VR128X:$src)>;
285 def : Pat<(v4f32 (bitconvert (v2i64 VR128X:$src))), (v4f32 VR128X:$src)>;
286 def : Pat<(v4f32 (bitconvert (v4i32 VR128X:$src))), (v4f32 VR128X:$src)>;
287 def : Pat<(v4f32 (bitconvert (v8i16 VR128X:$src))), (v4f32 VR128X:$src)>;
288 def : Pat<(v4f32 (bitconvert (v16i8 VR128X:$src))), (v4f32 VR128X:$src)>;
289 def : Pat<(v4f32 (bitconvert (v2f64 VR128X:$src))), (v4f32 VR128X:$src)>;
290 def : Pat<(v2f64 (bitconvert (v2i64 VR128X:$src))), (v2f64 VR128X:$src)>;
291 def : Pat<(v2f64 (bitconvert (v4i32 VR128X:$src))), (v2f64 VR128X:$src)>;
292 def : Pat<(v2f64 (bitconvert (v8i16 VR128X:$src))), (v2f64 VR128X:$src)>;
293 def : Pat<(v2f64 (bitconvert (v16i8 VR128X:$src))), (v2f64 VR128X:$src)>;
294 def : Pat<(v2f64 (bitconvert (v4f32 VR128X:$src))), (v2f64 VR128X:$src)>;
296 // Bitcasts between 256-bit vector types. Return the original type since
297 // no instruction is needed for the conversion
298 def : Pat<(v4f64 (bitconvert (v8f32 VR256X:$src))), (v4f64 VR256X:$src)>;
299 def : Pat<(v4f64 (bitconvert (v8i32 VR256X:$src))), (v4f64 VR256X:$src)>;
300 def : Pat<(v4f64 (bitconvert (v4i64 VR256X:$src))), (v4f64 VR256X:$src)>;
301 def : Pat<(v4f64 (bitconvert (v16i16 VR256X:$src))), (v4f64 VR256X:$src)>;
302 def : Pat<(v4f64 (bitconvert (v32i8 VR256X:$src))), (v4f64 VR256X:$src)>;
303 def : Pat<(v8f32 (bitconvert (v8i32 VR256X:$src))), (v8f32 VR256X:$src)>;
304 def : Pat<(v8f32 (bitconvert (v4i64 VR256X:$src))), (v8f32 VR256X:$src)>;
305 def : Pat<(v8f32 (bitconvert (v4f64 VR256X:$src))), (v8f32 VR256X:$src)>;
306 def : Pat<(v8f32 (bitconvert (v32i8 VR256X:$src))), (v8f32 VR256X:$src)>;
307 def : Pat<(v8f32 (bitconvert (v16i16 VR256X:$src))), (v8f32 VR256X:$src)>;
308 def : Pat<(v4i64 (bitconvert (v8f32 VR256X:$src))), (v4i64 VR256X:$src)>;
309 def : Pat<(v4i64 (bitconvert (v8i32 VR256X:$src))), (v4i64 VR256X:$src)>;
310 def : Pat<(v4i64 (bitconvert (v4f64 VR256X:$src))), (v4i64 VR256X:$src)>;
311 def : Pat<(v4i64 (bitconvert (v32i8 VR256X:$src))), (v4i64 VR256X:$src)>;
312 def : Pat<(v4i64 (bitconvert (v16i16 VR256X:$src))), (v4i64 VR256X:$src)>;
313 def : Pat<(v32i8 (bitconvert (v4f64 VR256X:$src))), (v32i8 VR256X:$src)>;
314 def : Pat<(v32i8 (bitconvert (v4i64 VR256X:$src))), (v32i8 VR256X:$src)>;
315 def : Pat<(v32i8 (bitconvert (v8f32 VR256X:$src))), (v32i8 VR256X:$src)>;
316 def : Pat<(v32i8 (bitconvert (v8i32 VR256X:$src))), (v32i8 VR256X:$src)>;
317 def : Pat<(v32i8 (bitconvert (v16i16 VR256X:$src))), (v32i8 VR256X:$src)>;
318 def : Pat<(v8i32 (bitconvert (v32i8 VR256X:$src))), (v8i32 VR256X:$src)>;
319 def : Pat<(v8i32 (bitconvert (v16i16 VR256X:$src))), (v8i32 VR256X:$src)>;
320 def : Pat<(v8i32 (bitconvert (v8f32 VR256X:$src))), (v8i32 VR256X:$src)>;
321 def : Pat<(v8i32 (bitconvert (v4i64 VR256X:$src))), (v8i32 VR256X:$src)>;
322 def : Pat<(v8i32 (bitconvert (v4f64 VR256X:$src))), (v8i32 VR256X:$src)>;
323 def : Pat<(v16i16 (bitconvert (v8f32 VR256X:$src))), (v16i16 VR256X:$src)>;
324 def : Pat<(v16i16 (bitconvert (v8i32 VR256X:$src))), (v16i16 VR256X:$src)>;
325 def : Pat<(v16i16 (bitconvert (v4i64 VR256X:$src))), (v16i16 VR256X:$src)>;
326 def : Pat<(v16i16 (bitconvert (v4f64 VR256X:$src))), (v16i16 VR256X:$src)>;
327 def : Pat<(v16i16 (bitconvert (v32i8 VR256X:$src))), (v16i16 VR256X:$src)>;
331 // AVX-512: VPXOR instruction writes zero to its upper part, it's safe build zeros.
334 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
335 isPseudo = 1, Predicates = [HasAVX512] in {
336 def AVX512_512_SET0 : I<0, Pseudo, (outs VR512:$dst), (ins), "",
337 [(set VR512:$dst, (v16f32 immAllZerosV))]>;
340 let Predicates = [HasAVX512] in {
341 def : Pat<(v8i64 immAllZerosV), (AVX512_512_SET0)>;
342 def : Pat<(v16i32 immAllZerosV), (AVX512_512_SET0)>;
343 def : Pat<(v8f64 immAllZerosV), (AVX512_512_SET0)>;
346 //===----------------------------------------------------------------------===//
347 // AVX-512 - VECTOR INSERT
350 multiclass vinsert_for_size_no_alt<int Opcode,
351 X86VectorVTInfo From, X86VectorVTInfo To,
352 PatFrag vinsert_insert,
353 SDNodeXForm INSERT_get_vinsert_imm> {
354 let hasSideEffects = 0, ExeDomain = To.ExeDomain in {
355 def rr : AVX512AIi8<Opcode, MRMSrcReg, (outs VR512:$dst),
356 (ins VR512:$src1, From.RC:$src2, i8imm:$src3),
357 "vinsert" # From.EltTypeName # "x" # From.NumElts #
358 "\t{$src3, $src2, $src1, $dst|"
359 "$dst, $src1, $src2, $src3}",
360 [(set To.RC:$dst, (vinsert_insert:$src3 (To.VT VR512:$src1),
361 (From.VT From.RC:$src2),
366 def rm : AVX512AIi8<Opcode, MRMSrcMem, (outs VR512:$dst),
367 (ins VR512:$src1, From.MemOp:$src2, i8imm:$src3),
368 "vinsert" # From.EltTypeName # "x" # From.NumElts #
369 "\t{$src3, $src2, $src1, $dst|"
370 "$dst, $src1, $src2, $src3}",
372 EVEX_4V, EVEX_V512, EVEX_CD8<From.EltSize, From.CD8TupleForm>;
376 multiclass vinsert_for_size<int Opcode,
377 X86VectorVTInfo From, X86VectorVTInfo To,
378 X86VectorVTInfo AltFrom, X86VectorVTInfo AltTo,
379 PatFrag vinsert_insert,
380 SDNodeXForm INSERT_get_vinsert_imm> :
381 vinsert_for_size_no_alt<Opcode, From, To,
382 vinsert_insert, INSERT_get_vinsert_imm> {
383 // Codegen pattern with the alternative types, e.g. v2i64 -> v8i64 for
384 // vinserti32x4. Only add this if 64x2 and friends are not supported
385 // natively via AVX512DQ.
386 let Predicates = [NoDQI] in
387 def : Pat<(vinsert_insert:$ins
388 (AltTo.VT VR512:$src1), (AltFrom.VT From.RC:$src2), (iPTR imm)),
389 (AltTo.VT (!cast<Instruction>(NAME # From.EltSize # "x4rr")
390 VR512:$src1, From.RC:$src2,
391 (INSERT_get_vinsert_imm VR512:$ins)))>;
394 multiclass vinsert_for_type<ValueType EltVT32, int Opcode128,
395 ValueType EltVT64, int Opcode256> {
396 defm NAME # "32x4" : vinsert_for_size<Opcode128,
397 X86VectorVTInfo< 4, EltVT32, VR128X>,
398 X86VectorVTInfo<16, EltVT32, VR512>,
399 X86VectorVTInfo< 2, EltVT64, VR128X>,
400 X86VectorVTInfo< 8, EltVT64, VR512>,
402 INSERT_get_vinsert128_imm>;
403 let Predicates = [HasDQI] in
404 defm NAME # "64x2" : vinsert_for_size_no_alt<Opcode128,
405 X86VectorVTInfo< 2, EltVT64, VR128X>,
406 X86VectorVTInfo< 8, EltVT64, VR512>,
408 INSERT_get_vinsert128_imm>, VEX_W;
409 defm NAME # "64x4" : vinsert_for_size<Opcode256,
410 X86VectorVTInfo< 4, EltVT64, VR256X>,
411 X86VectorVTInfo< 8, EltVT64, VR512>,
412 X86VectorVTInfo< 8, EltVT32, VR256>,
413 X86VectorVTInfo<16, EltVT32, VR512>,
415 INSERT_get_vinsert256_imm>, VEX_W;
416 let Predicates = [HasDQI] in
417 defm NAME # "32x8" : vinsert_for_size_no_alt<Opcode256,
418 X86VectorVTInfo< 8, EltVT32, VR256X>,
419 X86VectorVTInfo<16, EltVT32, VR512>,
421 INSERT_get_vinsert256_imm>;
424 defm VINSERTF : vinsert_for_type<f32, 0x18, f64, 0x1a>;
425 defm VINSERTI : vinsert_for_type<i32, 0x38, i64, 0x3a>;
427 // vinsertps - insert f32 to XMM
428 def VINSERTPSzrr : AVX512AIi8<0x21, MRMSrcReg, (outs VR128X:$dst),
429 (ins VR128X:$src1, VR128X:$src2, i8imm:$src3),
430 "vinsertps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
431 [(set VR128X:$dst, (X86insertps VR128X:$src1, VR128X:$src2, imm:$src3))]>,
433 def VINSERTPSzrm: AVX512AIi8<0x21, MRMSrcMem, (outs VR128X:$dst),
434 (ins VR128X:$src1, f32mem:$src2, i8imm:$src3),
435 "vinsertps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
436 [(set VR128X:$dst, (X86insertps VR128X:$src1,
437 (v4f32 (scalar_to_vector (loadf32 addr:$src2))),
438 imm:$src3))]>, EVEX_4V, EVEX_CD8<32, CD8VT1>;
440 //===----------------------------------------------------------------------===//
441 // AVX-512 VECTOR EXTRACT
444 multiclass vextract_for_size<int Opcode,
445 X86VectorVTInfo From, X86VectorVTInfo To,
446 X86VectorVTInfo AltFrom, X86VectorVTInfo AltTo,
447 PatFrag vextract_extract,
448 SDNodeXForm EXTRACT_get_vextract_imm> {
449 let hasSideEffects = 0, ExeDomain = To.ExeDomain in {
450 defm rr : AVX512_maskable_in_asm<Opcode, MRMDestReg, To, (outs To.RC:$dst),
451 (ins VR512:$src1, i8imm:$idx),
452 "vextract" # To.EltTypeName # "x4",
453 "$idx, $src1", "$src1, $idx",
454 [(set To.RC:$dst, (vextract_extract:$idx (From.VT VR512:$src1),
456 AVX512AIi8Base, EVEX, EVEX_V512;
458 def rm : AVX512AIi8<Opcode, MRMDestMem, (outs),
459 (ins To.MemOp:$dst, VR512:$src1, i8imm:$src2),
460 "vextract" # To.EltTypeName # "x4\t{$src2, $src1, $dst|"
461 "$dst, $src1, $src2}",
462 []>, EVEX, EVEX_V512, EVEX_CD8<To.EltSize, CD8VT4>;
465 // Codegen pattern with the alternative types, e.g. v8i64 -> v2i64 for
467 def : Pat<(vextract_extract:$ext (AltFrom.VT VR512:$src1), (iPTR imm)),
468 (AltTo.VT (!cast<Instruction>(NAME # To.EltSize # "x4rr")
470 (EXTRACT_get_vextract_imm To.RC:$ext)))>;
472 // A 128/256-bit subvector extract from the first 512-bit vector position is
473 // a subregister copy that needs no instruction.
474 def : Pat<(To.VT (extract_subvector (From.VT VR512:$src), (iPTR 0))),
476 (EXTRACT_SUBREG (From.VT VR512:$src), To.SubRegIdx))>;
478 // And for the alternative types.
479 def : Pat<(AltTo.VT (extract_subvector (AltFrom.VT VR512:$src), (iPTR 0))),
481 (EXTRACT_SUBREG (AltFrom.VT VR512:$src), AltTo.SubRegIdx))>;
483 // Intrinsic call with masking.
484 def : Pat<(!cast<Intrinsic>("int_x86_avx512_mask_vextract" # To.EltTypeName #
486 VR512:$src1, (iPTR imm:$idx), To.RC:$src0, GR8:$mask),
487 (!cast<Instruction>(NAME # To.EltSize # "x4rrk") To.RC:$src0,
488 (v4i1 (COPY_TO_REGCLASS GR8:$mask, VK4WM)),
489 VR512:$src1, imm:$idx)>;
491 // Intrinsic call with zero-masking.
492 def : Pat<(!cast<Intrinsic>("int_x86_avx512_mask_vextract" # To.EltTypeName #
494 VR512:$src1, (iPTR imm:$idx), To.ImmAllZerosV, GR8:$mask),
495 (!cast<Instruction>(NAME # To.EltSize # "x4rrkz")
496 (v4i1 (COPY_TO_REGCLASS GR8:$mask, VK4WM)),
497 VR512:$src1, imm:$idx)>;
499 // Intrinsic call without masking.
500 def : Pat<(!cast<Intrinsic>("int_x86_avx512_mask_vextract" # To.EltTypeName #
502 VR512:$src1, (iPTR imm:$idx), To.ImmAllZerosV, (i8 -1)),
503 (!cast<Instruction>(NAME # To.EltSize # "x4rr")
504 VR512:$src1, imm:$idx)>;
507 multiclass vextract_for_type<ValueType EltVT32, int Opcode32,
508 ValueType EltVT64, int Opcode64> {
509 defm NAME # "32x4" : vextract_for_size<Opcode32,
510 X86VectorVTInfo<16, EltVT32, VR512>,
511 X86VectorVTInfo< 4, EltVT32, VR128X>,
512 X86VectorVTInfo< 8, EltVT64, VR512>,
513 X86VectorVTInfo< 2, EltVT64, VR128X>,
515 EXTRACT_get_vextract128_imm>;
516 defm NAME # "64x4" : vextract_for_size<Opcode64,
517 X86VectorVTInfo< 8, EltVT64, VR512>,
518 X86VectorVTInfo< 4, EltVT64, VR256X>,
519 X86VectorVTInfo<16, EltVT32, VR512>,
520 X86VectorVTInfo< 8, EltVT32, VR256>,
522 EXTRACT_get_vextract256_imm>, VEX_W;
525 defm VEXTRACTF : vextract_for_type<f32, 0x19, f64, 0x1b>;
526 defm VEXTRACTI : vextract_for_type<i32, 0x39, i64, 0x3b>;
528 // A 128-bit subvector insert to the first 512-bit vector position
529 // is a subregister copy that needs no instruction.
530 def : Pat<(insert_subvector undef, (v2i64 VR128X:$src), (iPTR 0)),
531 (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)),
532 (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
534 def : Pat<(insert_subvector undef, (v2f64 VR128X:$src), (iPTR 0)),
535 (INSERT_SUBREG (v8f64 (IMPLICIT_DEF)),
536 (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
538 def : Pat<(insert_subvector undef, (v4i32 VR128X:$src), (iPTR 0)),
539 (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)),
540 (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
542 def : Pat<(insert_subvector undef, (v4f32 VR128X:$src), (iPTR 0)),
543 (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)),
544 (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
547 def : Pat<(insert_subvector undef, (v4i64 VR256X:$src), (iPTR 0)),
548 (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
549 def : Pat<(insert_subvector undef, (v4f64 VR256X:$src), (iPTR 0)),
550 (INSERT_SUBREG (v8f64 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
551 def : Pat<(insert_subvector undef, (v8i32 VR256X:$src), (iPTR 0)),
552 (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
553 def : Pat<(insert_subvector undef, (v8f32 VR256X:$src), (iPTR 0)),
554 (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
556 // vextractps - extract 32 bits from XMM
557 def VEXTRACTPSzrr : AVX512AIi8<0x17, MRMDestReg, (outs GR32:$dst),
558 (ins VR128X:$src1, i32i8imm:$src2),
559 "vextractps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
560 [(set GR32:$dst, (extractelt (bc_v4i32 (v4f32 VR128X:$src1)), imm:$src2))]>,
563 def VEXTRACTPSzmr : AVX512AIi8<0x17, MRMDestMem, (outs),
564 (ins f32mem:$dst, VR128X:$src1, i32i8imm:$src2),
565 "vextractps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
566 [(store (extractelt (bc_v4i32 (v4f32 VR128X:$src1)), imm:$src2),
567 addr:$dst)]>, EVEX, EVEX_CD8<32, CD8VT1>;
569 //===---------------------------------------------------------------------===//
572 multiclass avx512_fp_broadcast<bits<8> opc, string OpcodeStr,
573 RegisterClass DestRC,
574 RegisterClass SrcRC, X86MemOperand x86memop> {
575 def rr : AVX5128I<opc, MRMSrcReg, (outs DestRC:$dst), (ins SrcRC:$src),
576 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
578 def rm : AVX5128I<opc, MRMSrcMem, (outs DestRC:$dst), (ins x86memop:$src),
579 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),[]>, EVEX;
581 let ExeDomain = SSEPackedSingle in {
582 defm VBROADCASTSSZ : avx512_fp_broadcast<0x18, "vbroadcastss", VR512,
584 EVEX_V512, EVEX_CD8<32, CD8VT1>;
587 let ExeDomain = SSEPackedDouble in {
588 defm VBROADCASTSDZ : avx512_fp_broadcast<0x19, "vbroadcastsd", VR512,
590 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
593 def : Pat<(v16f32 (X86VBroadcast (loadf32 addr:$src))),
594 (VBROADCASTSSZrm addr:$src)>;
595 def : Pat<(v8f64 (X86VBroadcast (loadf64 addr:$src))),
596 (VBROADCASTSDZrm addr:$src)>;
598 def : Pat<(int_x86_avx512_vbroadcast_ss_512 addr:$src),
599 (VBROADCASTSSZrm addr:$src)>;
600 def : Pat<(int_x86_avx512_vbroadcast_sd_512 addr:$src),
601 (VBROADCASTSDZrm addr:$src)>;
603 multiclass avx512_int_broadcast_reg<bits<8> opc, string OpcodeStr,
604 RegisterClass SrcRC, RegisterClass KRC> {
605 def Zrr : AVX5128I<opc, MRMSrcReg, (outs VR512:$dst), (ins SrcRC:$src),
606 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
607 []>, EVEX, EVEX_V512;
608 def Zkrr : AVX5128I<opc, MRMSrcReg, (outs VR512:$dst),
609 (ins KRC:$mask, SrcRC:$src),
610 !strconcat(OpcodeStr,
611 " \t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}"),
612 []>, EVEX, EVEX_V512, EVEX_KZ;
615 defm VPBROADCASTDr : avx512_int_broadcast_reg<0x7C, "vpbroadcastd", GR32, VK16WM>;
616 defm VPBROADCASTQr : avx512_int_broadcast_reg<0x7C, "vpbroadcastq", GR64, VK8WM>,
619 def : Pat <(v16i32 (X86vzext VK16WM:$mask)),
620 (VPBROADCASTDrZkrr VK16WM:$mask, (i32 (MOV32ri 0x1)))>;
622 def : Pat <(v8i64 (X86vzext VK8WM:$mask)),
623 (VPBROADCASTQrZkrr VK8WM:$mask, (i64 (MOV64ri 0x1)))>;
625 def : Pat<(v16i32 (X86VBroadcast (i32 GR32:$src))),
626 (VPBROADCASTDrZrr GR32:$src)>;
627 def : Pat<(v16i32 (X86VBroadcastm VK16WM:$mask, (i32 GR32:$src))),
628 (VPBROADCASTDrZkrr VK16WM:$mask, GR32:$src)>;
629 def : Pat<(v8i64 (X86VBroadcast (i64 GR64:$src))),
630 (VPBROADCASTQrZrr GR64:$src)>;
631 def : Pat<(v8i64 (X86VBroadcastm VK8WM:$mask, (i64 GR64:$src))),
632 (VPBROADCASTQrZkrr VK8WM:$mask, GR64:$src)>;
634 def : Pat<(v16i32 (int_x86_avx512_pbroadcastd_i32_512 (i32 GR32:$src))),
635 (VPBROADCASTDrZrr GR32:$src)>;
636 def : Pat<(v8i64 (int_x86_avx512_pbroadcastq_i64_512 (i64 GR64:$src))),
637 (VPBROADCASTQrZrr GR64:$src)>;
639 def : Pat<(v16i32 (int_x86_avx512_mask_pbroadcast_d_gpr_512 (i32 GR32:$src),
640 (v16i32 immAllZerosV), (i16 GR16:$mask))),
641 (VPBROADCASTDrZkrr (COPY_TO_REGCLASS GR16:$mask, VK16WM), GR32:$src)>;
642 def : Pat<(v8i64 (int_x86_avx512_mask_pbroadcast_q_gpr_512 (i64 GR64:$src),
643 (bc_v8i64 (v16i32 immAllZerosV)), (i8 GR8:$mask))),
644 (VPBROADCASTQrZkrr (COPY_TO_REGCLASS GR8:$mask, VK8WM), GR64:$src)>;
646 multiclass avx512_int_broadcast_rm<bits<8> opc, string OpcodeStr,
647 X86MemOperand x86memop, PatFrag ld_frag,
648 RegisterClass DstRC, ValueType OpVT, ValueType SrcVT,
650 def rr : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst), (ins VR128X:$src),
651 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
653 (OpVT (X86VBroadcast (SrcVT VR128X:$src))))]>, EVEX;
654 def krr : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst), (ins KRC:$mask,
656 !strconcat(OpcodeStr,
657 " \t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
659 (OpVT (X86VBroadcastm KRC:$mask, (SrcVT VR128X:$src))))]>,
662 def rm : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
663 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
665 (OpVT (X86VBroadcast (ld_frag addr:$src))))]>, EVEX;
666 def krm : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst), (ins KRC:$mask,
668 !strconcat(OpcodeStr,
669 " \t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
670 [(set DstRC:$dst, (OpVT (X86VBroadcastm KRC:$mask,
671 (ld_frag addr:$src))))]>, EVEX, EVEX_KZ;
675 defm VPBROADCASTDZ : avx512_int_broadcast_rm<0x58, "vpbroadcastd", i32mem,
676 loadi32, VR512, v16i32, v4i32, VK16WM>,
677 EVEX_V512, EVEX_CD8<32, CD8VT1>;
678 defm VPBROADCASTQZ : avx512_int_broadcast_rm<0x59, "vpbroadcastq", i64mem,
679 loadi64, VR512, v8i64, v2i64, VK8WM>, EVEX_V512, VEX_W,
680 EVEX_CD8<64, CD8VT1>;
682 multiclass avx512_int_subvec_broadcast_rm<bits<8> opc, string OpcodeStr,
683 X86MemOperand x86memop, PatFrag ld_frag,
686 def rm : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst), (ins x86memop:$src),
687 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
689 def krm : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst), (ins KRC:$mask,
691 !strconcat(OpcodeStr,
692 " \t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
697 defm VBROADCASTI32X4 : avx512_int_subvec_broadcast_rm<0x5a, "vbroadcasti32x4",
698 i128mem, loadv2i64, VK16WM>,
699 EVEX_V512, EVEX_CD8<32, CD8VT4>;
700 defm VBROADCASTI64X4 : avx512_int_subvec_broadcast_rm<0x5b, "vbroadcasti64x4",
701 i256mem, loadv4i64, VK16WM>, VEX_W,
702 EVEX_V512, EVEX_CD8<64, CD8VT4>;
704 def : Pat<(v16i32 (int_x86_avx512_pbroadcastd_512 (v4i32 VR128X:$src))),
705 (VPBROADCASTDZrr VR128X:$src)>;
706 def : Pat<(v8i64 (int_x86_avx512_pbroadcastq_512 (v2i64 VR128X:$src))),
707 (VPBROADCASTQZrr VR128X:$src)>;
709 def : Pat<(v16f32 (X86VBroadcast (v4f32 VR128X:$src))),
710 (VBROADCASTSSZrr VR128X:$src)>;
711 def : Pat<(v8f64 (X86VBroadcast (v2f64 VR128X:$src))),
712 (VBROADCASTSDZrr VR128X:$src)>;
714 def : Pat<(v16f32 (int_x86_avx512_vbroadcast_ss_ps_512 (v4f32 VR128X:$src))),
715 (VBROADCASTSSZrr VR128X:$src)>;
716 def : Pat<(v8f64 (int_x86_avx512_vbroadcast_sd_pd_512 (v2f64 VR128X:$src))),
717 (VBROADCASTSDZrr VR128X:$src)>;
719 // Provide fallback in case the load node that is used in the patterns above
720 // is used by additional users, which prevents the pattern selection.
721 def : Pat<(v16f32 (X86VBroadcast FR32X:$src)),
722 (VBROADCASTSSZrr (COPY_TO_REGCLASS FR32X:$src, VR128X))>;
723 def : Pat<(v8f64 (X86VBroadcast FR64X:$src)),
724 (VBROADCASTSDZrr (COPY_TO_REGCLASS FR64X:$src, VR128X))>;
727 let Predicates = [HasAVX512] in {
728 def : Pat<(v8i32 (X86VBroadcastm (v8i1 VK8WM:$mask), (loadi32 addr:$src))),
730 (v16i32 (VPBROADCASTDZkrm (COPY_TO_REGCLASS VK8WM:$mask, VK16WM),
731 addr:$src)), sub_ymm)>;
733 //===----------------------------------------------------------------------===//
734 // AVX-512 BROADCAST MASK TO VECTOR REGISTER
737 multiclass avx512_mask_broadcast<bits<8> opc, string OpcodeStr,
739 let Predicates = [HasCDI] in
740 def Zrr : AVX512XS8I<opc, MRMSrcReg, (outs VR512:$dst), (ins KRC:$src),
741 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
742 []>, EVEX, EVEX_V512;
744 let Predicates = [HasCDI, HasVLX] in {
745 def Z128rr : AVX512XS8I<opc, MRMSrcReg, (outs VR128:$dst), (ins KRC:$src),
746 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
747 []>, EVEX, EVEX_V128;
748 def Z256rr : AVX512XS8I<opc, MRMSrcReg, (outs VR256:$dst), (ins KRC:$src),
749 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
750 []>, EVEX, EVEX_V256;
754 let Predicates = [HasCDI] in {
755 defm VPBROADCASTMW2D : avx512_mask_broadcast<0x3A, "vpbroadcastmw2d",
757 defm VPBROADCASTMB2Q : avx512_mask_broadcast<0x2A, "vpbroadcastmb2q",
761 //===----------------------------------------------------------------------===//
764 // -- immediate form --
765 multiclass avx512_perm_imm<bits<8> opc, string OpcodeStr, RegisterClass RC,
766 SDNode OpNode, PatFrag mem_frag,
767 X86MemOperand x86memop, ValueType OpVT> {
768 def ri : AVX512AIi8<opc, MRMSrcReg, (outs RC:$dst),
769 (ins RC:$src1, i8imm:$src2),
770 !strconcat(OpcodeStr,
771 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
773 (OpVT (OpNode RC:$src1, (i8 imm:$src2))))]>,
775 def mi : AVX512AIi8<opc, MRMSrcMem, (outs RC:$dst),
776 (ins x86memop:$src1, i8imm:$src2),
777 !strconcat(OpcodeStr,
778 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
780 (OpVT (OpNode (mem_frag addr:$src1),
781 (i8 imm:$src2))))]>, EVEX;
784 defm VPERMQZ : avx512_perm_imm<0x00, "vpermq", VR512, X86VPermi, memopv8i64,
785 i512mem, v8i64>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
786 let ExeDomain = SSEPackedDouble in
787 defm VPERMPDZ : avx512_perm_imm<0x01, "vpermpd", VR512, X86VPermi, memopv8f64,
788 f512mem, v8f64>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
790 // -- VPERM - register form --
791 multiclass avx512_perm<bits<8> opc, string OpcodeStr, RegisterClass RC,
792 PatFrag mem_frag, X86MemOperand x86memop, ValueType OpVT> {
794 def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
795 (ins RC:$src1, RC:$src2),
796 !strconcat(OpcodeStr,
797 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
799 (OpVT (X86VPermv RC:$src1, RC:$src2)))]>, EVEX_4V;
801 def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
802 (ins RC:$src1, x86memop:$src2),
803 !strconcat(OpcodeStr,
804 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
806 (OpVT (X86VPermv RC:$src1, (mem_frag addr:$src2))))]>,
810 defm VPERMDZ : avx512_perm<0x36, "vpermd", VR512, memopv16i32, i512mem,
811 v16i32>, EVEX_V512, EVEX_CD8<32, CD8VF>;
812 defm VPERMQZ : avx512_perm<0x36, "vpermq", VR512, memopv8i64, i512mem,
813 v8i64>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
814 let ExeDomain = SSEPackedSingle in
815 defm VPERMPSZ : avx512_perm<0x16, "vpermps", VR512, memopv16f32, f512mem,
816 v16f32>, EVEX_V512, EVEX_CD8<32, CD8VF>;
817 let ExeDomain = SSEPackedDouble in
818 defm VPERMPDZ : avx512_perm<0x16, "vpermpd", VR512, memopv8f64, f512mem,
819 v8f64>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
821 // -- VPERM2I - 3 source operands form --
822 multiclass avx512_perm_3src<bits<8> opc, string OpcodeStr, RegisterClass RC,
823 PatFrag mem_frag, X86MemOperand x86memop,
824 SDNode OpNode, ValueType OpVT, RegisterClass KRC> {
825 let Constraints = "$src1 = $dst" in {
826 def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
827 (ins RC:$src1, RC:$src2, RC:$src3),
828 !strconcat(OpcodeStr,
829 " \t{$src3, $src2, $dst|$dst, $src2, $src3}"),
831 (OpVT (OpNode RC:$src1, RC:$src2, RC:$src3)))]>,
834 def rrk : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
835 (ins RC:$src1, KRC:$mask, RC:$src2, RC:$src3),
836 !strconcat(OpcodeStr,
837 " \t{$src3, $src2, $dst {${mask}}|"
838 "$dst {${mask}}, $src2, $src3}"),
839 [(set RC:$dst, (OpVT (vselect KRC:$mask,
840 (OpNode RC:$src1, RC:$src2,
845 let AddedComplexity = 30 in // Prefer over VMOV*rrkz Pat<>
846 def rrkz : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
847 (ins RC:$src1, KRC:$mask, RC:$src2, RC:$src3),
848 !strconcat(OpcodeStr,
849 " \t{$src3, $src2, $dst {${mask}} {z} |",
850 "$dst {${mask}} {z}, $src2, $src3}"),
851 [(set RC:$dst, (OpVT (vselect KRC:$mask,
852 (OpNode RC:$src1, RC:$src2,
855 (v16i32 immAllZerosV))))))]>,
858 def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
859 (ins RC:$src1, RC:$src2, x86memop:$src3),
860 !strconcat(OpcodeStr,
861 " \t{$src3, $src2, $dst|$dst, $src2, $src3}"),
863 (OpVT (OpNode RC:$src1, RC:$src2,
864 (mem_frag addr:$src3))))]>, EVEX_4V;
866 def rmk : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
867 (ins RC:$src1, KRC:$mask, RC:$src2, x86memop:$src3),
868 !strconcat(OpcodeStr,
869 " \t{$src3, $src2, $dst {${mask}}|"
870 "$dst {${mask}}, $src2, $src3}"),
872 (OpVT (vselect KRC:$mask,
873 (OpNode RC:$src1, RC:$src2,
874 (mem_frag addr:$src3)),
878 let AddedComplexity = 10 in // Prefer over the rrkz variant
879 def rmkz : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
880 (ins RC:$src1, KRC:$mask, RC:$src2, x86memop:$src3),
881 !strconcat(OpcodeStr,
882 " \t{$src3, $src2, $dst {${mask}} {z}|"
883 "$dst {${mask}} {z}, $src2, $src3}"),
885 (OpVT (vselect KRC:$mask,
886 (OpNode RC:$src1, RC:$src2,
887 (mem_frag addr:$src3)),
889 (v16i32 immAllZerosV))))))]>,
893 defm VPERMI2D : avx512_perm_3src<0x76, "vpermi2d", VR512, memopv16i32,
894 i512mem, X86VPermiv3, v16i32, VK16WM>,
895 EVEX_V512, EVEX_CD8<32, CD8VF>;
896 defm VPERMI2Q : avx512_perm_3src<0x76, "vpermi2q", VR512, memopv8i64,
897 i512mem, X86VPermiv3, v8i64, VK8WM>,
898 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
899 defm VPERMI2PS : avx512_perm_3src<0x77, "vpermi2ps", VR512, memopv16f32,
900 i512mem, X86VPermiv3, v16f32, VK16WM>,
901 EVEX_V512, EVEX_CD8<32, CD8VF>;
902 defm VPERMI2PD : avx512_perm_3src<0x77, "vpermi2pd", VR512, memopv8f64,
903 i512mem, X86VPermiv3, v8f64, VK8WM>,
904 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
906 multiclass avx512_perm_table_3src<bits<8> opc, string Suffix, RegisterClass RC,
907 PatFrag mem_frag, X86MemOperand x86memop,
908 SDNode OpNode, ValueType OpVT, RegisterClass KRC,
909 ValueType MaskVT, RegisterClass MRC> :
910 avx512_perm_3src<opc, "vpermt2"##Suffix, RC, mem_frag, x86memop, OpNode,
912 def : Pat<(OpVT (!cast<Intrinsic>("int_x86_avx512_mask_vpermt_"##Suffix##"_512")
913 VR512:$idx, VR512:$src1, VR512:$src2, -1)),
914 (!cast<Instruction>(NAME#rr) VR512:$src1, VR512:$idx, VR512:$src2)>;
916 def : Pat<(OpVT (!cast<Intrinsic>("int_x86_avx512_mask_vpermt_"##Suffix##"_512")
917 VR512:$idx, VR512:$src1, VR512:$src2, MRC:$mask)),
918 (!cast<Instruction>(NAME#rrk) VR512:$src1,
919 (MaskVT (COPY_TO_REGCLASS MRC:$mask, KRC)), VR512:$idx, VR512:$src2)>;
922 defm VPERMT2D : avx512_perm_table_3src<0x7E, "d", VR512, memopv16i32, i512mem,
923 X86VPermv3, v16i32, VK16WM, v16i1, GR16>,
924 EVEX_V512, EVEX_CD8<32, CD8VF>;
925 defm VPERMT2Q : avx512_perm_table_3src<0x7E, "q", VR512, memopv8i64, i512mem,
926 X86VPermv3, v8i64, VK8WM, v8i1, GR8>,
927 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
928 defm VPERMT2PS : avx512_perm_table_3src<0x7F, "ps", VR512, memopv16f32, i512mem,
929 X86VPermv3, v16f32, VK16WM, v16i1, GR16>,
930 EVEX_V512, EVEX_CD8<32, CD8VF>;
931 defm VPERMT2PD : avx512_perm_table_3src<0x7F, "pd", VR512, memopv8f64, i512mem,
932 X86VPermv3, v8f64, VK8WM, v8i1, GR8>,
933 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
935 //===----------------------------------------------------------------------===//
936 // AVX-512 - BLEND using mask
938 multiclass avx512_blendmask<bits<8> opc, string OpcodeStr,
939 RegisterClass KRC, RegisterClass RC,
940 X86MemOperand x86memop, PatFrag mem_frag,
941 SDNode OpNode, ValueType vt> {
942 def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
943 (ins KRC:$mask, RC:$src1, RC:$src2),
944 !strconcat(OpcodeStr,
945 " \t{$src2, $src1, ${dst} {${mask}}|${dst} {${mask}}, $src1, $src2}"),
946 [(set RC:$dst, (OpNode KRC:$mask, (vt RC:$src2),
947 (vt RC:$src1)))]>, EVEX_4V, EVEX_K;
949 def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
950 (ins KRC:$mask, RC:$src1, x86memop:$src2),
951 !strconcat(OpcodeStr,
952 " \t{$src2, $src1, ${dst} {${mask}}|${dst} {${mask}}, $src1, $src2}"),
953 []>, EVEX_4V, EVEX_K;
956 let ExeDomain = SSEPackedSingle in
957 defm VBLENDMPSZ : avx512_blendmask<0x65, "vblendmps",
958 VK16WM, VR512, f512mem,
959 memopv16f32, vselect, v16f32>,
960 EVEX_CD8<32, CD8VF>, EVEX_V512;
961 let ExeDomain = SSEPackedDouble in
962 defm VBLENDMPDZ : avx512_blendmask<0x65, "vblendmpd",
963 VK8WM, VR512, f512mem,
964 memopv8f64, vselect, v8f64>,
965 VEX_W, EVEX_CD8<64, CD8VF>, EVEX_V512;
967 def : Pat<(v16f32 (int_x86_avx512_mask_blend_ps_512 (v16f32 VR512:$src1),
968 (v16f32 VR512:$src2), (i16 GR16:$mask))),
969 (VBLENDMPSZrr (COPY_TO_REGCLASS GR16:$mask, VK16WM),
970 VR512:$src1, VR512:$src2)>;
972 def : Pat<(v8f64 (int_x86_avx512_mask_blend_pd_512 (v8f64 VR512:$src1),
973 (v8f64 VR512:$src2), (i8 GR8:$mask))),
974 (VBLENDMPDZrr (COPY_TO_REGCLASS GR8:$mask, VK8WM),
975 VR512:$src1, VR512:$src2)>;
977 defm VPBLENDMDZ : avx512_blendmask<0x64, "vpblendmd",
978 VK16WM, VR512, f512mem,
979 memopv16i32, vselect, v16i32>,
980 EVEX_CD8<32, CD8VF>, EVEX_V512;
982 defm VPBLENDMQZ : avx512_blendmask<0x64, "vpblendmq",
983 VK8WM, VR512, f512mem,
984 memopv8i64, vselect, v8i64>,
985 VEX_W, EVEX_CD8<64, CD8VF>, EVEX_V512;
987 def : Pat<(v16i32 (int_x86_avx512_mask_blend_d_512 (v16i32 VR512:$src1),
988 (v16i32 VR512:$src2), (i16 GR16:$mask))),
989 (VPBLENDMDZrr (COPY_TO_REGCLASS GR16:$mask, VK16),
990 VR512:$src1, VR512:$src2)>;
992 def : Pat<(v8i64 (int_x86_avx512_mask_blend_q_512 (v8i64 VR512:$src1),
993 (v8i64 VR512:$src2), (i8 GR8:$mask))),
994 (VPBLENDMQZrr (COPY_TO_REGCLASS GR8:$mask, VK8),
995 VR512:$src1, VR512:$src2)>;
997 let Predicates = [HasAVX512] in {
998 def : Pat<(v8f32 (vselect (v8i1 VK8WM:$mask), (v8f32 VR256X:$src1),
999 (v8f32 VR256X:$src2))),
1001 (v16f32 (VBLENDMPSZrr (COPY_TO_REGCLASS VK8WM:$mask, VK16WM),
1002 (v16f32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)),
1003 (v16f32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)))), sub_ymm)>;
1005 def : Pat<(v8i32 (vselect (v8i1 VK8WM:$mask), (v8i32 VR256X:$src1),
1006 (v8i32 VR256X:$src2))),
1008 (v16i32 (VPBLENDMDZrr (COPY_TO_REGCLASS VK8WM:$mask, VK16WM),
1009 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)),
1010 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)))), sub_ymm)>;
1012 //===----------------------------------------------------------------------===//
1013 // Compare Instructions
1014 //===----------------------------------------------------------------------===//
1016 // avx512_cmp_scalar - AVX512 CMPSS and CMPSD
1017 multiclass avx512_cmp_scalar<RegisterClass RC, X86MemOperand x86memop,
1018 Operand CC, SDNode OpNode, ValueType VT,
1019 PatFrag ld_frag, string asm, string asm_alt> {
1020 def rr : AVX512Ii8<0xC2, MRMSrcReg,
1021 (outs VK1:$dst), (ins RC:$src1, RC:$src2, CC:$cc), asm,
1022 [(set VK1:$dst, (OpNode (VT RC:$src1), RC:$src2, imm:$cc))],
1023 IIC_SSE_ALU_F32S_RR>, EVEX_4V;
1024 def rm : AVX512Ii8<0xC2, MRMSrcMem,
1025 (outs VK1:$dst), (ins RC:$src1, x86memop:$src2, CC:$cc), asm,
1026 [(set VK1:$dst, (OpNode (VT RC:$src1),
1027 (ld_frag addr:$src2), imm:$cc))], IIC_SSE_ALU_F32P_RM>, EVEX_4V;
1028 let isAsmParserOnly = 1, hasSideEffects = 0 in {
1029 def rri_alt : AVX512Ii8<0xC2, MRMSrcReg,
1030 (outs VK1:$dst), (ins RC:$src1, RC:$src2, i8imm:$cc),
1031 asm_alt, [], IIC_SSE_ALU_F32S_RR>, EVEX_4V;
1032 def rmi_alt : AVX512Ii8<0xC2, MRMSrcMem,
1033 (outs VK1:$dst), (ins RC:$src1, x86memop:$src2, i8imm:$cc),
1034 asm_alt, [], IIC_SSE_ALU_F32P_RM>, EVEX_4V;
1038 let Predicates = [HasAVX512] in {
1039 defm VCMPSSZ : avx512_cmp_scalar<FR32X, f32mem, AVXCC, X86cmpms, f32, loadf32,
1040 "vcmp${cc}ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1041 "vcmpss\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}">,
1043 defm VCMPSDZ : avx512_cmp_scalar<FR64X, f64mem, AVXCC, X86cmpms, f64, loadf64,
1044 "vcmp${cc}sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1045 "vcmpsd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}">,
1049 multiclass avx512_icmp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
1050 X86VectorVTInfo _> {
1051 def rr : AVX512BI<opc, MRMSrcReg,
1052 (outs _.KRC:$dst), (ins _.RC:$src1, _.RC:$src2),
1053 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1054 [(set _.KRC:$dst, (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2)))],
1055 IIC_SSE_ALU_F32P_RR>, EVEX_4V;
1057 def rm : AVX512BI<opc, MRMSrcMem,
1058 (outs _.KRC:$dst), (ins _.RC:$src1, _.MemOp:$src2),
1059 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1060 [(set _.KRC:$dst, (OpNode (_.VT _.RC:$src1),
1061 (_.VT (bitconvert (_.LdFrag addr:$src2)))))],
1062 IIC_SSE_ALU_F32P_RM>, EVEX_4V;
1063 def rrk : AVX512BI<opc, MRMSrcReg,
1064 (outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1, _.RC:$src2),
1065 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst {${mask}}|",
1066 "$dst {${mask}}, $src1, $src2}"),
1067 [(set _.KRC:$dst, (and _.KRCWM:$mask,
1068 (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2))))],
1069 IIC_SSE_ALU_F32P_RR>, EVEX_4V, EVEX_K;
1071 def rmk : AVX512BI<opc, MRMSrcMem,
1072 (outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1, _.MemOp:$src2),
1073 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst {${mask}}|",
1074 "$dst {${mask}}, $src1, $src2}"),
1075 [(set _.KRC:$dst, (and _.KRCWM:$mask,
1076 (OpNode (_.VT _.RC:$src1),
1078 (_.LdFrag addr:$src2))))))],
1079 IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_K;
1082 multiclass avx512_icmp_packed_rmb<bits<8> opc, string OpcodeStr, SDNode OpNode,
1083 X86VectorVTInfo _> :
1084 avx512_icmp_packed<opc, OpcodeStr, OpNode, _> {
1085 let mayLoad = 1 in {
1086 def rmb : AVX512BI<opc, MRMSrcMem,
1087 (outs _.KRC:$dst), (ins _.RC:$src1, _.ScalarMemOp:$src2),
1088 !strconcat(OpcodeStr, "\t{${src2}", _.BroadcastStr, ", $src1, $dst",
1089 "|$dst, $src1, ${src2}", _.BroadcastStr, "}"),
1090 [(set _.KRC:$dst, (OpNode (_.VT _.RC:$src1),
1091 (X86VBroadcast (_.ScalarLdFrag addr:$src2))))],
1092 IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_B;
1093 def rmbk : AVX512BI<opc, MRMSrcMem,
1094 (outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1,
1095 _.ScalarMemOp:$src2),
1096 !strconcat(OpcodeStr,
1097 "\t{${src2}", _.BroadcastStr, ", $src1, $dst {${mask}}|",
1098 "$dst {${mask}}, $src1, ${src2}", _.BroadcastStr, "}"),
1099 [(set _.KRC:$dst, (and _.KRCWM:$mask,
1100 (OpNode (_.VT _.RC:$src1),
1102 (_.ScalarLdFrag addr:$src2)))))],
1103 IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_K, EVEX_B;
1107 multiclass avx512_icmp_packed_vl<bits<8> opc, string OpcodeStr, SDNode OpNode,
1108 AVX512VLVectorVTInfo VTInfo, Predicate prd> {
1109 let Predicates = [prd] in
1110 defm Z : avx512_icmp_packed<opc, OpcodeStr, OpNode, VTInfo.info512>,
1113 let Predicates = [prd, HasVLX] in {
1114 defm Z256 : avx512_icmp_packed<opc, OpcodeStr, OpNode, VTInfo.info256>,
1116 defm Z128 : avx512_icmp_packed<opc, OpcodeStr, OpNode, VTInfo.info128>,
1121 multiclass avx512_icmp_packed_rmb_vl<bits<8> opc, string OpcodeStr,
1122 SDNode OpNode, AVX512VLVectorVTInfo VTInfo,
1124 let Predicates = [prd] in
1125 defm Z : avx512_icmp_packed_rmb<opc, OpcodeStr, OpNode, VTInfo.info512>,
1128 let Predicates = [prd, HasVLX] in {
1129 defm Z256 : avx512_icmp_packed_rmb<opc, OpcodeStr, OpNode, VTInfo.info256>,
1131 defm Z128 : avx512_icmp_packed_rmb<opc, OpcodeStr, OpNode, VTInfo.info128>,
1136 defm VPCMPEQB : avx512_icmp_packed_vl<0x74, "vpcmpeqb", X86pcmpeqm,
1137 avx512vl_i8_info, HasBWI>,
1140 defm VPCMPEQW : avx512_icmp_packed_vl<0x75, "vpcmpeqw", X86pcmpeqm,
1141 avx512vl_i16_info, HasBWI>,
1142 EVEX_CD8<16, CD8VF>;
1144 defm VPCMPEQD : avx512_icmp_packed_rmb_vl<0x76, "vpcmpeqd", X86pcmpeqm,
1145 avx512vl_i32_info, HasAVX512>,
1146 EVEX_CD8<32, CD8VF>;
1148 defm VPCMPEQQ : avx512_icmp_packed_rmb_vl<0x29, "vpcmpeqq", X86pcmpeqm,
1149 avx512vl_i64_info, HasAVX512>,
1150 T8PD, VEX_W, EVEX_CD8<64, CD8VF>;
1152 defm VPCMPGTB : avx512_icmp_packed_vl<0x64, "vpcmpgtb", X86pcmpgtm,
1153 avx512vl_i8_info, HasBWI>,
1156 defm VPCMPGTW : avx512_icmp_packed_vl<0x65, "vpcmpgtw", X86pcmpgtm,
1157 avx512vl_i16_info, HasBWI>,
1158 EVEX_CD8<16, CD8VF>;
1160 defm VPCMPGTD : avx512_icmp_packed_rmb_vl<0x66, "vpcmpgtd", X86pcmpgtm,
1161 avx512vl_i32_info, HasAVX512>,
1162 EVEX_CD8<32, CD8VF>;
1164 defm VPCMPGTQ : avx512_icmp_packed_rmb_vl<0x37, "vpcmpgtq", X86pcmpgtm,
1165 avx512vl_i64_info, HasAVX512>,
1166 T8PD, VEX_W, EVEX_CD8<64, CD8VF>;
1168 def : Pat<(v8i1 (X86pcmpgtm (v8i32 VR256X:$src1), (v8i32 VR256X:$src2))),
1169 (COPY_TO_REGCLASS (VPCMPGTDZrr
1170 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)),
1171 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm))), VK8)>;
1173 def : Pat<(v8i1 (X86pcmpeqm (v8i32 VR256X:$src1), (v8i32 VR256X:$src2))),
1174 (COPY_TO_REGCLASS (VPCMPEQDZrr
1175 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)),
1176 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm))), VK8)>;
1178 multiclass avx512_icmp_cc<bits<8> opc, string Suffix, SDNode OpNode,
1179 X86VectorVTInfo _> {
1180 def rri : AVX512AIi8<opc, MRMSrcReg,
1181 (outs _.KRC:$dst), (ins _.RC:$src1, _.RC:$src2, AVXCC:$cc),
1182 !strconcat("vpcmp${cc}", Suffix,
1183 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1184 [(set _.KRC:$dst, (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2),
1186 IIC_SSE_ALU_F32P_RR>, EVEX_4V;
1188 def rmi : AVX512AIi8<opc, MRMSrcMem,
1189 (outs _.KRC:$dst), (ins _.RC:$src1, _.MemOp:$src2, AVXCC:$cc),
1190 !strconcat("vpcmp${cc}", Suffix,
1191 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1192 [(set _.KRC:$dst, (OpNode (_.VT _.RC:$src1),
1193 (_.VT (bitconvert (_.LdFrag addr:$src2))),
1195 IIC_SSE_ALU_F32P_RM>, EVEX_4V;
1196 def rrik : AVX512AIi8<opc, MRMSrcReg,
1197 (outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1, _.RC:$src2,
1199 !strconcat("vpcmp${cc}", Suffix,
1200 "\t{$src2, $src1, $dst {${mask}}|",
1201 "$dst {${mask}}, $src1, $src2}"),
1202 [(set _.KRC:$dst, (and _.KRCWM:$mask,
1203 (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2),
1205 IIC_SSE_ALU_F32P_RR>, EVEX_4V, EVEX_K;
1207 def rmik : AVX512AIi8<opc, MRMSrcMem,
1208 (outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1, _.MemOp:$src2,
1210 !strconcat("vpcmp${cc}", Suffix,
1211 "\t{$src2, $src1, $dst {${mask}}|",
1212 "$dst {${mask}}, $src1, $src2}"),
1213 [(set _.KRC:$dst, (and _.KRCWM:$mask,
1214 (OpNode (_.VT _.RC:$src1),
1215 (_.VT (bitconvert (_.LdFrag addr:$src2))),
1217 IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_K;
1219 // Accept explicit immediate argument form instead of comparison code.
1220 let isAsmParserOnly = 1, hasSideEffects = 0 in {
1221 def rri_alt : AVX512AIi8<opc, MRMSrcReg,
1222 (outs _.KRC:$dst), (ins _.RC:$src1, _.RC:$src2, i8imm:$cc),
1223 !strconcat("vpcmp", Suffix, "\t{$cc, $src2, $src1, $dst|",
1224 "$dst, $src1, $src2, $cc}"),
1225 [], IIC_SSE_ALU_F32P_RR>, EVEX_4V;
1226 def rmi_alt : AVX512AIi8<opc, MRMSrcMem,
1227 (outs _.KRC:$dst), (ins _.RC:$src1, _.MemOp:$src2, i8imm:$cc),
1228 !strconcat("vpcmp", Suffix, "\t{$cc, $src2, $src1, $dst|",
1229 "$dst, $src1, $src2, $cc}"),
1230 [], IIC_SSE_ALU_F32P_RM>, EVEX_4V;
1231 def rrik_alt : AVX512AIi8<opc, MRMSrcReg,
1232 (outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1, _.RC:$src2,
1234 !strconcat("vpcmp", Suffix,
1235 "\t{$cc, $src2, $src1, $dst {${mask}}|",
1236 "$dst {${mask}}, $src1, $src2, $cc}"),
1237 [], IIC_SSE_ALU_F32P_RR>, EVEX_4V, EVEX_K;
1238 def rmik_alt : AVX512AIi8<opc, MRMSrcMem,
1239 (outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1, _.MemOp:$src2,
1241 !strconcat("vpcmp", Suffix,
1242 "\t{$cc, $src2, $src1, $dst {${mask}}|",
1243 "$dst {${mask}}, $src1, $src2, $cc}"),
1244 [], IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_K;
1248 multiclass avx512_icmp_cc_rmb<bits<8> opc, string Suffix, SDNode OpNode,
1249 X86VectorVTInfo _> :
1250 avx512_icmp_cc<opc, Suffix, OpNode, _> {
1251 let mayLoad = 1 in {
1252 def rmib : AVX512AIi8<opc, MRMSrcMem,
1253 (outs _.KRC:$dst), (ins _.RC:$src1, _.ScalarMemOp:$src2,
1255 !strconcat("vpcmp${cc}", Suffix,
1256 "\t{${src2}", _.BroadcastStr, ", $src1, $dst|",
1257 "$dst, $src1, ${src2}", _.BroadcastStr, "}"),
1258 [(set _.KRC:$dst, (OpNode (_.VT _.RC:$src1),
1259 (X86VBroadcast (_.ScalarLdFrag addr:$src2)),
1261 IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_B;
1262 def rmibk : AVX512AIi8<opc, MRMSrcMem,
1263 (outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1,
1264 _.ScalarMemOp:$src2, AVXCC:$cc),
1265 !strconcat("vpcmp${cc}", Suffix,
1266 "\t{${src2}", _.BroadcastStr, ", $src1, $dst {${mask}}|",
1267 "$dst {${mask}}, $src1, ${src2}", _.BroadcastStr, "}"),
1268 [(set _.KRC:$dst, (and _.KRCWM:$mask,
1269 (OpNode (_.VT _.RC:$src1),
1270 (X86VBroadcast (_.ScalarLdFrag addr:$src2)),
1272 IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_K, EVEX_B;
1275 // Accept explicit immediate argument form instead of comparison code.
1276 let isAsmParserOnly = 1, hasSideEffects = 0 in {
1277 def rmib_alt : AVX512AIi8<opc, MRMSrcMem,
1278 (outs _.KRC:$dst), (ins _.RC:$src1, _.ScalarMemOp:$src2,
1280 !strconcat("vpcmp", Suffix,
1281 "\t{$cc, ${src2}", _.BroadcastStr, ", $src1, $dst|",
1282 "$dst, $src1, ${src2}", _.BroadcastStr, ", $cc}"),
1283 [], IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_B;
1284 def rmibk_alt : AVX512AIi8<opc, MRMSrcMem,
1285 (outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1,
1286 _.ScalarMemOp:$src2, i8imm:$cc),
1287 !strconcat("vpcmp", Suffix,
1288 "\t{$cc, ${src2}", _.BroadcastStr, ", $src1, $dst {${mask}}|",
1289 "$dst {${mask}}, $src1, ${src2}", _.BroadcastStr, ", $cc}"),
1290 [], IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_K, EVEX_B;
1294 multiclass avx512_icmp_cc_vl<bits<8> opc, string Suffix, SDNode OpNode,
1295 AVX512VLVectorVTInfo VTInfo, Predicate prd> {
1296 let Predicates = [prd] in
1297 defm Z : avx512_icmp_cc<opc, Suffix, OpNode, VTInfo.info512>, EVEX_V512;
1299 let Predicates = [prd, HasVLX] in {
1300 defm Z256 : avx512_icmp_cc<opc, Suffix, OpNode, VTInfo.info256>, EVEX_V256;
1301 defm Z128 : avx512_icmp_cc<opc, Suffix, OpNode, VTInfo.info128>, EVEX_V128;
1305 multiclass avx512_icmp_cc_rmb_vl<bits<8> opc, string Suffix, SDNode OpNode,
1306 AVX512VLVectorVTInfo VTInfo, Predicate prd> {
1307 let Predicates = [prd] in
1308 defm Z : avx512_icmp_cc_rmb<opc, Suffix, OpNode, VTInfo.info512>,
1311 let Predicates = [prd, HasVLX] in {
1312 defm Z256 : avx512_icmp_cc_rmb<opc, Suffix, OpNode, VTInfo.info256>,
1314 defm Z128 : avx512_icmp_cc_rmb<opc, Suffix, OpNode, VTInfo.info128>,
1319 defm VPCMPB : avx512_icmp_cc_vl<0x3F, "b", X86cmpm, avx512vl_i8_info,
1320 HasBWI>, EVEX_CD8<8, CD8VF>;
1321 defm VPCMPUB : avx512_icmp_cc_vl<0x3E, "ub", X86cmpmu, avx512vl_i8_info,
1322 HasBWI>, EVEX_CD8<8, CD8VF>;
1324 defm VPCMPW : avx512_icmp_cc_vl<0x3F, "w", X86cmpm, avx512vl_i16_info,
1325 HasBWI>, VEX_W, EVEX_CD8<16, CD8VF>;
1326 defm VPCMPUW : avx512_icmp_cc_vl<0x3E, "uw", X86cmpmu, avx512vl_i16_info,
1327 HasBWI>, VEX_W, EVEX_CD8<16, CD8VF>;
1329 defm VPCMPD : avx512_icmp_cc_rmb_vl<0x1F, "d", X86cmpm, avx512vl_i32_info,
1330 HasAVX512>, EVEX_CD8<32, CD8VF>;
1331 defm VPCMPUD : avx512_icmp_cc_rmb_vl<0x1E, "ud", X86cmpmu, avx512vl_i32_info,
1332 HasAVX512>, EVEX_CD8<32, CD8VF>;
1334 defm VPCMPQ : avx512_icmp_cc_rmb_vl<0x1F, "q", X86cmpm, avx512vl_i64_info,
1335 HasAVX512>, VEX_W, EVEX_CD8<64, CD8VF>;
1336 defm VPCMPUQ : avx512_icmp_cc_rmb_vl<0x1E, "uq", X86cmpmu, avx512vl_i64_info,
1337 HasAVX512>, VEX_W, EVEX_CD8<64, CD8VF>;
1339 // avx512_cmp_packed - compare packed instructions
1340 multiclass avx512_cmp_packed<RegisterClass KRC, RegisterClass RC,
1341 X86MemOperand x86memop, ValueType vt,
1342 string suffix, Domain d> {
1343 def rri : AVX512PIi8<0xC2, MRMSrcReg,
1344 (outs KRC:$dst), (ins RC:$src1, RC:$src2, AVXCC:$cc),
1345 !strconcat("vcmp${cc}", suffix,
1346 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1347 [(set KRC:$dst, (X86cmpm (vt RC:$src1), (vt RC:$src2), imm:$cc))], d>;
1348 def rrib: AVX512PIi8<0xC2, MRMSrcReg,
1349 (outs KRC:$dst), (ins RC:$src1, RC:$src2, AVXCC:$cc),
1350 !strconcat("vcmp${cc}", suffix,
1351 " \t{{sae}, $src2, $src1, $dst|$dst, $src1, $src2, {sae}}"),
1353 def rmi : AVX512PIi8<0xC2, MRMSrcMem,
1354 (outs KRC:$dst), (ins RC:$src1, x86memop:$src2, AVXCC:$cc),
1355 !strconcat("vcmp${cc}", suffix,
1356 " \t{$src2, $src1, $dst|$dst, $src1, $src2, $cc}"),
1358 (X86cmpm (vt RC:$src1), (memop addr:$src2), imm:$cc))], d>;
1360 // Accept explicit immediate argument form instead of comparison code.
1361 let isAsmParserOnly = 1, hasSideEffects = 0 in {
1362 def rri_alt : AVX512PIi8<0xC2, MRMSrcReg,
1363 (outs KRC:$dst), (ins RC:$src1, RC:$src2, i8imm:$cc),
1364 !strconcat("vcmp", suffix,
1365 " \t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}"), [], d>;
1366 def rmi_alt : AVX512PIi8<0xC2, MRMSrcMem,
1367 (outs KRC:$dst), (ins RC:$src1, x86memop:$src2, i8imm:$cc),
1368 !strconcat("vcmp", suffix,
1369 " \t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}"), [], d>;
1373 defm VCMPPSZ : avx512_cmp_packed<VK16, VR512, f512mem, v16f32,
1374 "ps", SSEPackedSingle>, PS, EVEX_4V, EVEX_V512,
1375 EVEX_CD8<32, CD8VF>;
1376 defm VCMPPDZ : avx512_cmp_packed<VK8, VR512, f512mem, v8f64,
1377 "pd", SSEPackedDouble>, PD, EVEX_4V, VEX_W, EVEX_V512,
1378 EVEX_CD8<64, CD8VF>;
1380 def : Pat<(v8i1 (X86cmpm (v8f32 VR256X:$src1), (v8f32 VR256X:$src2), imm:$cc)),
1381 (COPY_TO_REGCLASS (VCMPPSZrri
1382 (v16f32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)),
1383 (v16f32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)),
1385 def : Pat<(v8i1 (X86cmpm (v8i32 VR256X:$src1), (v8i32 VR256X:$src2), imm:$cc)),
1386 (COPY_TO_REGCLASS (VPCMPDZrri
1387 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)),
1388 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)),
1390 def : Pat<(v8i1 (X86cmpmu (v8i32 VR256X:$src1), (v8i32 VR256X:$src2), imm:$cc)),
1391 (COPY_TO_REGCLASS (VPCMPUDZrri
1392 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)),
1393 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)),
1396 def : Pat<(i16 (int_x86_avx512_mask_cmp_ps_512 (v16f32 VR512:$src1),
1397 (v16f32 VR512:$src2), imm:$cc, (i16 -1),
1399 (COPY_TO_REGCLASS (VCMPPSZrrib VR512:$src1, VR512:$src2,
1400 (I8Imm imm:$cc)), GR16)>;
1402 def : Pat<(i8 (int_x86_avx512_mask_cmp_pd_512 (v8f64 VR512:$src1),
1403 (v8f64 VR512:$src2), imm:$cc, (i8 -1),
1405 (COPY_TO_REGCLASS (VCMPPDZrrib VR512:$src1, VR512:$src2,
1406 (I8Imm imm:$cc)), GR8)>;
1408 def : Pat<(i16 (int_x86_avx512_mask_cmp_ps_512 (v16f32 VR512:$src1),
1409 (v16f32 VR512:$src2), imm:$cc, (i16 -1),
1411 (COPY_TO_REGCLASS (VCMPPSZrri VR512:$src1, VR512:$src2,
1412 (I8Imm imm:$cc)), GR16)>;
1414 def : Pat<(i8 (int_x86_avx512_mask_cmp_pd_512 (v8f64 VR512:$src1),
1415 (v8f64 VR512:$src2), imm:$cc, (i8 -1),
1417 (COPY_TO_REGCLASS (VCMPPDZrri VR512:$src1, VR512:$src2,
1418 (I8Imm imm:$cc)), GR8)>;
1420 // Mask register copy, including
1421 // - copy between mask registers
1422 // - load/store mask registers
1423 // - copy from GPR to mask register and vice versa
1425 multiclass avx512_mask_mov<bits<8> opc_kk, bits<8> opc_km, bits<8> opc_mk,
1426 string OpcodeStr, RegisterClass KRC,
1427 ValueType vvt, ValueType ivt, X86MemOperand x86memop> {
1428 let hasSideEffects = 0 in {
1429 def kk : I<opc_kk, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src),
1430 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"), []>;
1432 def km : I<opc_km, MRMSrcMem, (outs KRC:$dst), (ins x86memop:$src),
1433 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
1434 [(set KRC:$dst, (vvt (bitconvert (ivt (load addr:$src)))))]>;
1436 def mk : I<opc_mk, MRMDestMem, (outs), (ins x86memop:$dst, KRC:$src),
1437 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"), []>;
1441 multiclass avx512_mask_mov_gpr<bits<8> opc_kr, bits<8> opc_rk,
1443 RegisterClass KRC, RegisterClass GRC> {
1444 let hasSideEffects = 0 in {
1445 def kr : I<opc_kr, MRMSrcReg, (outs KRC:$dst), (ins GRC:$src),
1446 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"), []>;
1447 def rk : I<opc_rk, MRMSrcReg, (outs GRC:$dst), (ins KRC:$src),
1448 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"), []>;
1452 let Predicates = [HasDQI] in
1453 defm KMOVB : avx512_mask_mov<0x90, 0x90, 0x91, "kmovb", VK8, v8i1, i8,
1455 avx512_mask_mov_gpr<0x92, 0x93, "kmovb", VK8, GR32>,
1458 let Predicates = [HasAVX512] in
1459 defm KMOVW : avx512_mask_mov<0x90, 0x90, 0x91, "kmovw", VK16, v16i1, i16,
1461 avx512_mask_mov_gpr<0x92, 0x93, "kmovw", VK16, GR32>,
1464 let Predicates = [HasBWI] in {
1465 defm KMOVD : avx512_mask_mov<0x90, 0x90, 0x91, "kmovd", VK32, v32i1, i32,
1466 i32mem>, VEX, PD, VEX_W;
1467 defm KMOVD : avx512_mask_mov_gpr<0x92, 0x93, "kmovd", VK32, GR32>,
1471 let Predicates = [HasBWI] in {
1472 defm KMOVQ : avx512_mask_mov<0x90, 0x90, 0x91, "kmovq", VK64, v64i1, i64,
1473 i64mem>, VEX, PS, VEX_W;
1474 defm KMOVQ : avx512_mask_mov_gpr<0x92, 0x93, "kmovq", VK64, GR64>,
1478 // GR from/to mask register
1479 let Predicates = [HasDQI] in {
1480 def : Pat<(v8i1 (bitconvert (i8 GR8:$src))),
1481 (KMOVBkr (SUBREG_TO_REG (i32 0), GR8:$src, sub_8bit))>;
1482 def : Pat<(i8 (bitconvert (v8i1 VK8:$src))),
1483 (EXTRACT_SUBREG (KMOVBrk VK8:$src), sub_8bit)>;
1485 let Predicates = [HasAVX512] in {
1486 def : Pat<(v16i1 (bitconvert (i16 GR16:$src))),
1487 (KMOVWkr (SUBREG_TO_REG (i32 0), GR16:$src, sub_16bit))>;
1488 def : Pat<(i16 (bitconvert (v16i1 VK16:$src))),
1489 (EXTRACT_SUBREG (KMOVWrk VK16:$src), sub_16bit)>;
1491 let Predicates = [HasBWI] in {
1492 def : Pat<(v32i1 (bitconvert (i32 GR32:$src))), (KMOVDkr GR32:$src)>;
1493 def : Pat<(i32 (bitconvert (v32i1 VK32:$src))), (KMOVDrk VK32:$src)>;
1495 let Predicates = [HasBWI] in {
1496 def : Pat<(v64i1 (bitconvert (i64 GR64:$src))), (KMOVQkr GR64:$src)>;
1497 def : Pat<(i64 (bitconvert (v64i1 VK64:$src))), (KMOVQrk VK64:$src)>;
1501 let Predicates = [HasDQI] in {
1502 def : Pat<(store (i8 (bitconvert (v8i1 VK8:$src))), addr:$dst),
1503 (KMOVBmk addr:$dst, VK8:$src)>;
1505 let Predicates = [HasAVX512] in {
1506 def : Pat<(store (i16 (bitconvert (v16i1 VK16:$src))), addr:$dst),
1507 (KMOVWmk addr:$dst, VK16:$src)>;
1508 def : Pat<(store (i8 (bitconvert (v8i1 VK8:$src))), addr:$dst),
1509 (KMOVWmk addr:$dst, (COPY_TO_REGCLASS VK8:$src, VK16))>;
1510 def : Pat<(i1 (load addr:$src)),
1511 (COPY_TO_REGCLASS (KMOVWkm addr:$src), VK1)>;
1512 def : Pat<(v8i1 (bitconvert (i8 (load addr:$src)))),
1513 (COPY_TO_REGCLASS (KMOVWkm addr:$src), VK8)>;
1515 let Predicates = [HasBWI] in {
1516 def : Pat<(store (i32 (bitconvert (v32i1 VK32:$src))), addr:$dst),
1517 (KMOVDmk addr:$dst, VK32:$src)>;
1519 let Predicates = [HasBWI] in {
1520 def : Pat<(store (i64 (bitconvert (v64i1 VK64:$src))), addr:$dst),
1521 (KMOVQmk addr:$dst, VK64:$src)>;
1524 let Predicates = [HasAVX512] in {
1525 def : Pat<(i1 (trunc (i64 GR64:$src))),
1526 (COPY_TO_REGCLASS (KMOVWkr (AND32ri (EXTRACT_SUBREG $src, sub_32bit),
1529 def : Pat<(i1 (trunc (i32 GR32:$src))),
1530 (COPY_TO_REGCLASS (KMOVWkr (AND32ri $src, (i32 1))), VK1)>;
1532 def : Pat<(i1 (trunc (i8 GR8:$src))),
1534 (KMOVWkr (AND32ri (SUBREG_TO_REG (i32 0), GR8:$src, sub_8bit), (i32 1))),
1536 def : Pat<(i1 (trunc (i16 GR16:$src))),
1538 (KMOVWkr (AND32ri (SUBREG_TO_REG (i32 0), $src, sub_16bit), (i32 1))),
1541 def : Pat<(i32 (zext VK1:$src)),
1542 (AND32ri (KMOVWrk (COPY_TO_REGCLASS VK1:$src, VK16)), (i32 1))>;
1543 def : Pat<(i8 (zext VK1:$src)),
1546 (COPY_TO_REGCLASS VK1:$src, VK16)), (i32 1)), sub_8bit)>;
1547 def : Pat<(i64 (zext VK1:$src)),
1548 (AND64ri8 (SUBREG_TO_REG (i64 0),
1549 (KMOVWrk (COPY_TO_REGCLASS VK1:$src, VK16)), sub_32bit), (i64 1))>;
1550 def : Pat<(i16 (zext VK1:$src)),
1552 (AND32ri (KMOVWrk (COPY_TO_REGCLASS VK1:$src, VK16)), (i32 1)),
1554 def : Pat<(v16i1 (scalar_to_vector VK1:$src)),
1555 (COPY_TO_REGCLASS VK1:$src, VK16)>;
1556 def : Pat<(v8i1 (scalar_to_vector VK1:$src)),
1557 (COPY_TO_REGCLASS VK1:$src, VK8)>;
1559 let Predicates = [HasBWI] in {
1560 def : Pat<(v32i1 (scalar_to_vector VK1:$src)),
1561 (COPY_TO_REGCLASS VK1:$src, VK32)>;
1562 def : Pat<(v64i1 (scalar_to_vector VK1:$src)),
1563 (COPY_TO_REGCLASS VK1:$src, VK64)>;
1567 // With AVX-512 only, 8-bit mask is promoted to 16-bit mask.
1568 let Predicates = [HasAVX512] in {
1569 // GR from/to 8-bit mask without native support
1570 def : Pat<(v8i1 (bitconvert (i8 GR8:$src))),
1572 (KMOVWkr (SUBREG_TO_REG (i32 0), GR8:$src, sub_8bit)),
1574 def : Pat<(i8 (bitconvert (v8i1 VK8:$src))),
1576 (KMOVWrk (COPY_TO_REGCLASS VK8:$src, VK16)),
1579 def : Pat<(i1 (X86Vextract VK16:$src, (iPTR 0))),
1580 (COPY_TO_REGCLASS VK16:$src, VK1)>;
1581 def : Pat<(i1 (X86Vextract VK8:$src, (iPTR 0))),
1582 (COPY_TO_REGCLASS VK8:$src, VK1)>;
1584 let Predicates = [HasBWI] in {
1585 def : Pat<(i1 (X86Vextract VK32:$src, (iPTR 0))),
1586 (COPY_TO_REGCLASS VK32:$src, VK1)>;
1587 def : Pat<(i1 (X86Vextract VK64:$src, (iPTR 0))),
1588 (COPY_TO_REGCLASS VK64:$src, VK1)>;
1591 // Mask unary operation
1593 multiclass avx512_mask_unop<bits<8> opc, string OpcodeStr,
1594 RegisterClass KRC, SDPatternOperator OpNode,
1596 let Predicates = [prd] in
1597 def rr : I<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src),
1598 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
1599 [(set KRC:$dst, (OpNode KRC:$src))]>;
1602 multiclass avx512_mask_unop_all<bits<8> opc, string OpcodeStr,
1603 SDPatternOperator OpNode> {
1604 defm B : avx512_mask_unop<opc, !strconcat(OpcodeStr, "b"), VK8, OpNode,
1606 defm W : avx512_mask_unop<opc, !strconcat(OpcodeStr, "w"), VK16, OpNode,
1607 HasAVX512>, VEX, PS;
1608 defm D : avx512_mask_unop<opc, !strconcat(OpcodeStr, "d"), VK32, OpNode,
1609 HasBWI>, VEX, PD, VEX_W;
1610 defm Q : avx512_mask_unop<opc, !strconcat(OpcodeStr, "q"), VK64, OpNode,
1611 HasBWI>, VEX, PS, VEX_W;
1614 defm KNOT : avx512_mask_unop_all<0x44, "knot", not>;
1616 multiclass avx512_mask_unop_int<string IntName, string InstName> {
1617 let Predicates = [HasAVX512] in
1618 def : Pat<(!cast<Intrinsic>("int_x86_avx512_"##IntName##"_w")
1620 (COPY_TO_REGCLASS (!cast<Instruction>(InstName##"Wrr")
1621 (v16i1 (COPY_TO_REGCLASS GR16:$src, VK16))), GR16)>;
1623 defm : avx512_mask_unop_int<"knot", "KNOT">;
1625 let Predicates = [HasDQI] in
1626 def : Pat<(xor VK8:$src1, (v8i1 immAllOnesV)), (KNOTBrr VK8:$src1)>;
1627 let Predicates = [HasAVX512] in
1628 def : Pat<(xor VK16:$src1, (v16i1 immAllOnesV)), (KNOTWrr VK16:$src1)>;
1629 let Predicates = [HasBWI] in
1630 def : Pat<(xor VK32:$src1, (v32i1 immAllOnesV)), (KNOTDrr VK32:$src1)>;
1631 let Predicates = [HasBWI] in
1632 def : Pat<(xor VK64:$src1, (v64i1 immAllOnesV)), (KNOTQrr VK64:$src1)>;
1634 // KNL does not support KMOVB, 8-bit mask is promoted to 16-bit
1635 let Predicates = [HasAVX512] in {
1636 def : Pat<(xor VK8:$src1, (v8i1 immAllOnesV)),
1637 (COPY_TO_REGCLASS (KNOTWrr (COPY_TO_REGCLASS VK8:$src1, VK16)), VK8)>;
1639 def : Pat<(not VK8:$src),
1641 (KNOTWrr (COPY_TO_REGCLASS VK8:$src, VK16)), VK8)>;
1644 // Mask binary operation
1645 // - KAND, KANDN, KOR, KXNOR, KXOR
1646 multiclass avx512_mask_binop<bits<8> opc, string OpcodeStr,
1647 RegisterClass KRC, SDPatternOperator OpNode,
1649 let Predicates = [prd] in
1650 def rr : I<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src1, KRC:$src2),
1651 !strconcat(OpcodeStr,
1652 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1653 [(set KRC:$dst, (OpNode KRC:$src1, KRC:$src2))]>;
1656 multiclass avx512_mask_binop_all<bits<8> opc, string OpcodeStr,
1657 SDPatternOperator OpNode> {
1658 defm B : avx512_mask_binop<opc, !strconcat(OpcodeStr, "b"), VK8, OpNode,
1659 HasDQI>, VEX_4V, VEX_L, PD;
1660 defm W : avx512_mask_binop<opc, !strconcat(OpcodeStr, "w"), VK16, OpNode,
1661 HasAVX512>, VEX_4V, VEX_L, PS;
1662 defm D : avx512_mask_binop<opc, !strconcat(OpcodeStr, "d"), VK32, OpNode,
1663 HasBWI>, VEX_4V, VEX_L, VEX_W, PD;
1664 defm Q : avx512_mask_binop<opc, !strconcat(OpcodeStr, "q"), VK64, OpNode,
1665 HasBWI>, VEX_4V, VEX_L, VEX_W, PS;
1668 def andn : PatFrag<(ops node:$i0, node:$i1), (and (not node:$i0), node:$i1)>;
1669 def xnor : PatFrag<(ops node:$i0, node:$i1), (not (xor node:$i0, node:$i1))>;
1671 let isCommutable = 1 in {
1672 defm KAND : avx512_mask_binop_all<0x41, "kand", and>;
1673 defm KOR : avx512_mask_binop_all<0x45, "kor", or>;
1674 defm KXNOR : avx512_mask_binop_all<0x46, "kxnor", xnor>;
1675 defm KXOR : avx512_mask_binop_all<0x47, "kxor", xor>;
1677 let isCommutable = 0 in
1678 defm KANDN : avx512_mask_binop_all<0x42, "kandn", andn>;
1680 def : Pat<(xor VK1:$src1, VK1:$src2),
1681 (COPY_TO_REGCLASS (KXORWrr (COPY_TO_REGCLASS VK1:$src1, VK16),
1682 (COPY_TO_REGCLASS VK1:$src2, VK16)), VK1)>;
1684 def : Pat<(or VK1:$src1, VK1:$src2),
1685 (COPY_TO_REGCLASS (KORWrr (COPY_TO_REGCLASS VK1:$src1, VK16),
1686 (COPY_TO_REGCLASS VK1:$src2, VK16)), VK1)>;
1688 def : Pat<(and VK1:$src1, VK1:$src2),
1689 (COPY_TO_REGCLASS (KANDWrr (COPY_TO_REGCLASS VK1:$src1, VK16),
1690 (COPY_TO_REGCLASS VK1:$src2, VK16)), VK1)>;
1692 multiclass avx512_mask_binop_int<string IntName, string InstName> {
1693 let Predicates = [HasAVX512] in
1694 def : Pat<(!cast<Intrinsic>("int_x86_avx512_"##IntName##"_w")
1695 (i16 GR16:$src1), (i16 GR16:$src2)),
1696 (COPY_TO_REGCLASS (!cast<Instruction>(InstName##"Wrr")
1697 (v16i1 (COPY_TO_REGCLASS GR16:$src1, VK16)),
1698 (v16i1 (COPY_TO_REGCLASS GR16:$src2, VK16))), GR16)>;
1701 defm : avx512_mask_binop_int<"kand", "KAND">;
1702 defm : avx512_mask_binop_int<"kandn", "KANDN">;
1703 defm : avx512_mask_binop_int<"kor", "KOR">;
1704 defm : avx512_mask_binop_int<"kxnor", "KXNOR">;
1705 defm : avx512_mask_binop_int<"kxor", "KXOR">;
1707 // With AVX-512, 8-bit mask is promoted to 16-bit mask.
1708 multiclass avx512_binop_pat<SDPatternOperator OpNode, Instruction Inst> {
1709 let Predicates = [HasAVX512] in
1710 def : Pat<(OpNode VK8:$src1, VK8:$src2),
1712 (Inst (COPY_TO_REGCLASS VK8:$src1, VK16),
1713 (COPY_TO_REGCLASS VK8:$src2, VK16)), VK8)>;
1716 defm : avx512_binop_pat<and, KANDWrr>;
1717 defm : avx512_binop_pat<andn, KANDNWrr>;
1718 defm : avx512_binop_pat<or, KORWrr>;
1719 defm : avx512_binop_pat<xnor, KXNORWrr>;
1720 defm : avx512_binop_pat<xor, KXORWrr>;
1723 multiclass avx512_mask_unpck<bits<8> opc, string OpcodeStr,
1724 RegisterClass KRC> {
1725 let Predicates = [HasAVX512] in
1726 def rr : I<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src1, KRC:$src2),
1727 !strconcat(OpcodeStr,
1728 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1731 multiclass avx512_mask_unpck_bw<bits<8> opc, string OpcodeStr> {
1732 defm BW : avx512_mask_unpck<opc, !strconcat(OpcodeStr, "bw"), VK16>,
1736 defm KUNPCK : avx512_mask_unpck_bw<0x4b, "kunpck">;
1737 def : Pat<(v16i1 (concat_vectors (v8i1 VK8:$src1), (v8i1 VK8:$src2))),
1738 (KUNPCKBWrr (COPY_TO_REGCLASS VK8:$src2, VK16),
1739 (COPY_TO_REGCLASS VK8:$src1, VK16))>;
1742 multiclass avx512_mask_unpck_int<string IntName, string InstName> {
1743 let Predicates = [HasAVX512] in
1744 def : Pat<(!cast<Intrinsic>("int_x86_avx512_"##IntName##"_bw")
1745 (i16 GR16:$src1), (i16 GR16:$src2)),
1746 (COPY_TO_REGCLASS (!cast<Instruction>(InstName##"BWrr")
1747 (v16i1 (COPY_TO_REGCLASS GR16:$src1, VK16)),
1748 (v16i1 (COPY_TO_REGCLASS GR16:$src2, VK16))), GR16)>;
1750 defm : avx512_mask_unpck_int<"kunpck", "KUNPCK">;
1753 multiclass avx512_mask_testop<bits<8> opc, string OpcodeStr, RegisterClass KRC,
1755 let Predicates = [HasAVX512], Defs = [EFLAGS] in
1756 def rr : I<opc, MRMSrcReg, (outs), (ins KRC:$src1, KRC:$src2),
1757 !strconcat(OpcodeStr, " \t{$src2, $src1|$src1, $src2}"),
1758 [(set EFLAGS, (OpNode KRC:$src1, KRC:$src2))]>;
1761 multiclass avx512_mask_testop_w<bits<8> opc, string OpcodeStr, SDNode OpNode> {
1762 defm W : avx512_mask_testop<opc, !strconcat(OpcodeStr, "w"), VK16, OpNode>,
1766 defm KORTEST : avx512_mask_testop_w<0x98, "kortest", X86kortest>;
1768 def : Pat<(X86cmp VK1:$src1, (i1 0)),
1769 (KORTESTWrr (COPY_TO_REGCLASS VK1:$src1, VK16),
1770 (COPY_TO_REGCLASS VK1:$src1, VK16))>;
1773 multiclass avx512_mask_shiftop<bits<8> opc, string OpcodeStr, RegisterClass KRC,
1775 let Predicates = [HasAVX512] in
1776 def ri : Ii8<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src, i8imm:$imm),
1777 !strconcat(OpcodeStr,
1778 " \t{$imm, $src, $dst|$dst, $src, $imm}"),
1779 [(set KRC:$dst, (OpNode KRC:$src, (i8 imm:$imm)))]>;
1782 multiclass avx512_mask_shiftop_w<bits<8> opc1, bits<8> opc2, string OpcodeStr,
1784 defm W : avx512_mask_shiftop<opc1, !strconcat(OpcodeStr, "w"), VK16, OpNode>,
1788 defm KSHIFTL : avx512_mask_shiftop_w<0x32, 0x33, "kshiftl", X86vshli>;
1789 defm KSHIFTR : avx512_mask_shiftop_w<0x30, 0x31, "kshiftr", X86vsrli>;
1791 // Mask setting all 0s or 1s
1792 multiclass avx512_mask_setop<RegisterClass KRC, ValueType VT, PatFrag Val> {
1793 let Predicates = [HasAVX512] in
1794 let isReMaterializable = 1, isAsCheapAsAMove = 1, isPseudo = 1 in
1795 def #NAME# : I<0, Pseudo, (outs KRC:$dst), (ins), "",
1796 [(set KRC:$dst, (VT Val))]>;
1799 multiclass avx512_mask_setop_w<PatFrag Val> {
1800 defm B : avx512_mask_setop<VK8, v8i1, Val>;
1801 defm W : avx512_mask_setop<VK16, v16i1, Val>;
1804 defm KSET0 : avx512_mask_setop_w<immAllZerosV>;
1805 defm KSET1 : avx512_mask_setop_w<immAllOnesV>;
1807 // With AVX-512 only, 8-bit mask is promoted to 16-bit mask.
1808 let Predicates = [HasAVX512] in {
1809 def : Pat<(v8i1 immAllZerosV), (COPY_TO_REGCLASS (KSET0W), VK8)>;
1810 def : Pat<(v8i1 immAllOnesV), (COPY_TO_REGCLASS (KSET1W), VK8)>;
1811 def : Pat<(i1 0), (COPY_TO_REGCLASS (KSET0W), VK1)>;
1812 def : Pat<(i1 1), (COPY_TO_REGCLASS (KSET1W), VK1)>;
1813 def : Pat<(i1 -1), (COPY_TO_REGCLASS (KSET1W), VK1)>;
1815 def : Pat<(v8i1 (extract_subvector (v16i1 VK16:$src), (iPTR 0))),
1816 (v8i1 (COPY_TO_REGCLASS VK16:$src, VK8))>;
1818 def : Pat<(v16i1 (insert_subvector undef, (v8i1 VK8:$src), (iPTR 0))),
1819 (v16i1 (COPY_TO_REGCLASS VK8:$src, VK16))>;
1821 def : Pat<(v8i1 (extract_subvector (v16i1 VK16:$src), (iPTR 8))),
1822 (v8i1 (COPY_TO_REGCLASS (KSHIFTRWri VK16:$src, (i8 8)), VK8))>;
1824 let Predicates = [HasVLX] in {
1825 def : Pat<(v8i1 (insert_subvector undef, (v4i1 VK4:$src), (iPTR 0))),
1826 (v8i1 (COPY_TO_REGCLASS VK4:$src, VK8))>;
1827 def : Pat<(v8i1 (insert_subvector undef, (v2i1 VK2:$src), (iPTR 0))),
1828 (v8i1 (COPY_TO_REGCLASS VK2:$src, VK8))>;
1829 def : Pat<(v4i1 (extract_subvector (v8i1 VK8:$src), (iPTR 0))),
1830 (v4i1 (COPY_TO_REGCLASS VK8:$src, VK4))>;
1831 def : Pat<(v2i1 (extract_subvector (v8i1 VK8:$src), (iPTR 0))),
1832 (v2i1 (COPY_TO_REGCLASS VK8:$src, VK2))>;
1835 def : Pat<(v8i1 (X86vshli VK8:$src, (i8 imm:$imm))),
1836 (v8i1 (COPY_TO_REGCLASS (KSHIFTLWri (COPY_TO_REGCLASS VK8:$src, VK16), (I8Imm $imm)), VK8))>;
1838 def : Pat<(v8i1 (X86vsrli VK8:$src, (i8 imm:$imm))),
1839 (v8i1 (COPY_TO_REGCLASS (KSHIFTRWri (COPY_TO_REGCLASS VK8:$src, VK16), (I8Imm $imm)), VK8))>;
1840 //===----------------------------------------------------------------------===//
1841 // AVX-512 - Aligned and unaligned load and store
1844 multiclass avx512_load<bits<8> opc, string OpcodeStr, PatFrag ld_frag,
1845 RegisterClass KRC, RegisterClass RC,
1846 ValueType vt, ValueType zvt, X86MemOperand memop,
1847 Domain d, bit IsReMaterializable = 1> {
1848 let hasSideEffects = 0 in {
1849 def rr : AVX512PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
1850 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), [],
1852 def rrkz : AVX512PI<opc, MRMSrcReg, (outs RC:$dst), (ins KRC:$mask, RC:$src),
1853 !strconcat(OpcodeStr, "\t{$src, ${dst} {${mask}} {z}|",
1854 "${dst} {${mask}} {z}, $src}"), [], d>, EVEX, EVEX_KZ;
1856 let canFoldAsLoad = 1, isReMaterializable = IsReMaterializable,
1857 SchedRW = [WriteLoad] in
1858 def rm : AVX512PI<opc, MRMSrcMem, (outs RC:$dst), (ins memop:$src),
1859 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
1860 [(set RC:$dst, (vt (bitconvert (ld_frag addr:$src))))],
1863 let AddedComplexity = 20 in {
1864 let Constraints = "$src0 = $dst", hasSideEffects = 0 in {
1865 let hasSideEffects = 0 in
1866 def rrk : AVX512PI<opc, MRMSrcReg, (outs RC:$dst),
1867 (ins RC:$src0, KRC:$mask, RC:$src1),
1868 !strconcat(OpcodeStr, "\t{$src1, ${dst} {${mask}}|",
1869 "${dst} {${mask}}, $src1}"),
1870 [(set RC:$dst, (vt (vselect KRC:$mask,
1874 let mayLoad = 1, SchedRW = [WriteLoad] in
1875 def rmk : AVX512PI<opc, MRMSrcMem, (outs RC:$dst),
1876 (ins RC:$src0, KRC:$mask, memop:$src1),
1877 !strconcat(OpcodeStr, "\t{$src1, ${dst} {${mask}}|",
1878 "${dst} {${mask}}, $src1}"),
1881 (vt (bitconvert (ld_frag addr:$src1))),
1885 let mayLoad = 1, SchedRW = [WriteLoad] in
1886 def rmkz : AVX512PI<opc, MRMSrcMem, (outs RC:$dst),
1887 (ins KRC:$mask, memop:$src),
1888 !strconcat(OpcodeStr, "\t{$src, ${dst} {${mask}} {z}|",
1889 "${dst} {${mask}} {z}, $src}"),
1892 (vt (bitconvert (ld_frag addr:$src))),
1893 (vt (bitconvert (zvt immAllZerosV))))))],
1898 multiclass avx512_load_vl<bits<8> opc, string OpcodeStr, string ld_pat,
1899 string elty, string elsz, string vsz512,
1900 string vsz256, string vsz128, Domain d,
1901 Predicate prd, bit IsReMaterializable = 1> {
1902 let Predicates = [prd] in
1903 defm Z : avx512_load<opc, OpcodeStr,
1904 !cast<PatFrag>(ld_pat##"v"##vsz512##elty##elsz),
1905 !cast<RegisterClass>("VK"##vsz512##"WM"), VR512,
1906 !cast<ValueType>("v"##vsz512##elty##elsz), v16i32,
1907 !cast<X86MemOperand>(elty##"512mem"), d,
1908 IsReMaterializable>, EVEX_V512;
1910 let Predicates = [prd, HasVLX] in {
1911 defm Z256 : avx512_load<opc, OpcodeStr,
1912 !cast<PatFrag>(ld_pat##!if(!eq(elty,"f"),
1913 "v"##vsz256##elty##elsz, "v4i64")),
1914 !cast<RegisterClass>("VK"##vsz256##"WM"), VR256X,
1915 !cast<ValueType>("v"##vsz256##elty##elsz), v8i32,
1916 !cast<X86MemOperand>(elty##"256mem"), d,
1917 IsReMaterializable>, EVEX_V256;
1919 defm Z128 : avx512_load<opc, OpcodeStr,
1920 !cast<PatFrag>(ld_pat##!if(!eq(elty,"f"),
1921 "v"##vsz128##elty##elsz, "v2i64")),
1922 !cast<RegisterClass>("VK"##vsz128##"WM"), VR128X,
1923 !cast<ValueType>("v"##vsz128##elty##elsz), v4i32,
1924 !cast<X86MemOperand>(elty##"128mem"), d,
1925 IsReMaterializable>, EVEX_V128;
1930 multiclass avx512_store<bits<8> opc, string OpcodeStr, PatFrag st_frag,
1931 ValueType OpVT, RegisterClass KRC, RegisterClass RC,
1932 X86MemOperand memop, Domain d> {
1933 let isAsmParserOnly = 1, hasSideEffects = 0 in {
1934 def rr_alt : AVX512PI<opc, MRMDestReg, (outs RC:$dst), (ins RC:$src),
1935 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), [], d>,
1937 let Constraints = "$src1 = $dst" in
1938 def rrk_alt : AVX512PI<opc, MRMDestReg, (outs RC:$dst),
1939 (ins RC:$src1, KRC:$mask, RC:$src2),
1940 !strconcat(OpcodeStr,
1941 "\t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"), [], d>,
1943 def rrkz_alt : AVX512PI<opc, MRMDestReg, (outs RC:$dst),
1944 (ins KRC:$mask, RC:$src),
1945 !strconcat(OpcodeStr,
1946 "\t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
1947 [], d>, EVEX, EVEX_KZ;
1949 let mayStore = 1 in {
1950 def mr : AVX512PI<opc, MRMDestMem, (outs), (ins memop:$dst, RC:$src),
1951 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
1952 [(st_frag (OpVT RC:$src), addr:$dst)], d>, EVEX;
1953 def mrk : AVX512PI<opc, MRMDestMem, (outs),
1954 (ins memop:$dst, KRC:$mask, RC:$src),
1955 !strconcat(OpcodeStr,
1956 "\t{$src, ${dst} {${mask}}|${dst} {${mask}}, $src}"),
1957 [], d>, EVEX, EVEX_K;
1962 multiclass avx512_store_vl<bits<8> opc, string OpcodeStr, string st_pat,
1963 string st_suff_512, string st_suff_256,
1964 string st_suff_128, string elty, string elsz,
1965 string vsz512, string vsz256, string vsz128,
1966 Domain d, Predicate prd> {
1967 let Predicates = [prd] in
1968 defm Z : avx512_store<opc, OpcodeStr, !cast<PatFrag>(st_pat##st_suff_512),
1969 !cast<ValueType>("v"##vsz512##elty##elsz),
1970 !cast<RegisterClass>("VK"##vsz512##"WM"), VR512,
1971 !cast<X86MemOperand>(elty##"512mem"), d>, EVEX_V512;
1973 let Predicates = [prd, HasVLX] in {
1974 defm Z256 : avx512_store<opc, OpcodeStr, !cast<PatFrag>(st_pat##st_suff_256),
1975 !cast<ValueType>("v"##vsz256##elty##elsz),
1976 !cast<RegisterClass>("VK"##vsz256##"WM"), VR256X,
1977 !cast<X86MemOperand>(elty##"256mem"), d>, EVEX_V256;
1979 defm Z128 : avx512_store<opc, OpcodeStr, !cast<PatFrag>(st_pat##st_suff_128),
1980 !cast<ValueType>("v"##vsz128##elty##elsz),
1981 !cast<RegisterClass>("VK"##vsz128##"WM"), VR128X,
1982 !cast<X86MemOperand>(elty##"128mem"), d>, EVEX_V128;
1986 defm VMOVAPS : avx512_load_vl<0x28, "vmovaps", "alignedload", "f", "32",
1987 "16", "8", "4", SSEPackedSingle, HasAVX512>,
1988 avx512_store_vl<0x29, "vmovaps", "alignedstore",
1989 "512", "256", "", "f", "32", "16", "8", "4",
1990 SSEPackedSingle, HasAVX512>,
1991 PS, EVEX_CD8<32, CD8VF>;
1993 defm VMOVAPD : avx512_load_vl<0x28, "vmovapd", "alignedload", "f", "64",
1994 "8", "4", "2", SSEPackedDouble, HasAVX512>,
1995 avx512_store_vl<0x29, "vmovapd", "alignedstore",
1996 "512", "256", "", "f", "64", "8", "4", "2",
1997 SSEPackedDouble, HasAVX512>,
1998 PD, VEX_W, EVEX_CD8<64, CD8VF>;
2000 defm VMOVUPS : avx512_load_vl<0x10, "vmovups", "load", "f", "32",
2001 "16", "8", "4", SSEPackedSingle, HasAVX512>,
2002 avx512_store_vl<0x11, "vmovups", "store", "", "", "", "f", "32",
2003 "16", "8", "4", SSEPackedSingle, HasAVX512>,
2004 PS, EVEX_CD8<32, CD8VF>;
2006 defm VMOVUPD : avx512_load_vl<0x10, "vmovupd", "load", "f", "64",
2007 "8", "4", "2", SSEPackedDouble, HasAVX512, 0>,
2008 avx512_store_vl<0x11, "vmovupd", "store", "", "", "", "f", "64",
2009 "8", "4", "2", SSEPackedDouble, HasAVX512>,
2010 PD, VEX_W, EVEX_CD8<64, CD8VF>;
2012 def: Pat<(v8f64 (int_x86_avx512_mask_loadu_pd_512 addr:$ptr,
2013 (bc_v8f64 (v16i32 immAllZerosV)), GR8:$mask)),
2014 (VMOVUPDZrmkz (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)), addr:$ptr)>;
2016 def: Pat<(v16f32 (int_x86_avx512_mask_loadu_ps_512 addr:$ptr,
2017 (bc_v16f32 (v16i32 immAllZerosV)), GR16:$mask)),
2018 (VMOVUPSZrmkz (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)), addr:$ptr)>;
2020 def: Pat<(int_x86_avx512_mask_storeu_ps_512 addr:$ptr, (v16f32 VR512:$src),
2022 (VMOVUPSZmrk addr:$ptr, (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)),
2024 def: Pat<(int_x86_avx512_mask_storeu_pd_512 addr:$ptr, (v8f64 VR512:$src),
2026 (VMOVUPDZmrk addr:$ptr, (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)),
2029 defm VMOVDQA32 : avx512_load_vl<0x6F, "vmovdqa32", "alignedload", "i", "32",
2030 "16", "8", "4", SSEPackedInt, HasAVX512>,
2031 avx512_store_vl<0x7F, "vmovdqa32", "alignedstore",
2032 "512", "256", "", "i", "32", "16", "8", "4",
2033 SSEPackedInt, HasAVX512>,
2034 PD, EVEX_CD8<32, CD8VF>;
2036 defm VMOVDQA64 : avx512_load_vl<0x6F, "vmovdqa64", "alignedload", "i", "64",
2037 "8", "4", "2", SSEPackedInt, HasAVX512>,
2038 avx512_store_vl<0x7F, "vmovdqa64", "alignedstore",
2039 "512", "256", "", "i", "64", "8", "4", "2",
2040 SSEPackedInt, HasAVX512>,
2041 PD, VEX_W, EVEX_CD8<64, CD8VF>;
2043 defm VMOVDQU8 : avx512_load_vl<0x6F, "vmovdqu8", "load", "i", "8",
2044 "64", "32", "16", SSEPackedInt, HasBWI>,
2045 avx512_store_vl<0x7F, "vmovdqu8", "store", "", "", "",
2046 "i", "8", "64", "32", "16", SSEPackedInt,
2047 HasBWI>, XD, EVEX_CD8<8, CD8VF>;
2049 defm VMOVDQU16 : avx512_load_vl<0x6F, "vmovdqu16", "load", "i", "16",
2050 "32", "16", "8", SSEPackedInt, HasBWI>,
2051 avx512_store_vl<0x7F, "vmovdqu16", "store", "", "", "",
2052 "i", "16", "32", "16", "8", SSEPackedInt,
2053 HasBWI>, XD, VEX_W, EVEX_CD8<16, CD8VF>;
2055 defm VMOVDQU32 : avx512_load_vl<0x6F, "vmovdqu32", "load", "i", "32",
2056 "16", "8", "4", SSEPackedInt, HasAVX512>,
2057 avx512_store_vl<0x7F, "vmovdqu32", "store", "", "", "",
2058 "i", "32", "16", "8", "4", SSEPackedInt,
2059 HasAVX512>, XS, EVEX_CD8<32, CD8VF>;
2061 defm VMOVDQU64 : avx512_load_vl<0x6F, "vmovdqu64", "load", "i", "64",
2062 "8", "4", "2", SSEPackedInt, HasAVX512>,
2063 avx512_store_vl<0x7F, "vmovdqu64", "store", "", "", "",
2064 "i", "64", "8", "4", "2", SSEPackedInt,
2065 HasAVX512>, XS, VEX_W, EVEX_CD8<64, CD8VF>;
2067 def: Pat<(v16i32 (int_x86_avx512_mask_loadu_d_512 addr:$ptr,
2068 (v16i32 immAllZerosV), GR16:$mask)),
2069 (VMOVDQU32Zrmkz (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)), addr:$ptr)>;
2071 def: Pat<(v8i64 (int_x86_avx512_mask_loadu_q_512 addr:$ptr,
2072 (bc_v8i64 (v16i32 immAllZerosV)), GR8:$mask)),
2073 (VMOVDQU64Zrmkz (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)), addr:$ptr)>;
2075 def: Pat<(int_x86_avx512_mask_storeu_d_512 addr:$ptr, (v16i32 VR512:$src),
2077 (VMOVDQU32Zmrk addr:$ptr, (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)),
2079 def: Pat<(int_x86_avx512_mask_storeu_q_512 addr:$ptr, (v8i64 VR512:$src),
2081 (VMOVDQU64Zmrk addr:$ptr, (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)),
2084 let AddedComplexity = 20 in {
2085 def : Pat<(v8i64 (vselect VK8WM:$mask, (v8i64 VR512:$src),
2086 (bc_v8i64 (v16i32 immAllZerosV)))),
2087 (VMOVDQU64Zrrkz VK8WM:$mask, VR512:$src)>;
2089 def : Pat<(v8i64 (vselect VK8WM:$mask, (bc_v8i64 (v16i32 immAllZerosV)),
2090 (v8i64 VR512:$src))),
2091 (VMOVDQU64Zrrkz (COPY_TO_REGCLASS (KNOTWrr (COPY_TO_REGCLASS VK8:$mask, VK16)),
2094 def : Pat<(v16i32 (vselect VK16WM:$mask, (v16i32 VR512:$src),
2095 (v16i32 immAllZerosV))),
2096 (VMOVDQU32Zrrkz VK16WM:$mask, VR512:$src)>;
2098 def : Pat<(v16i32 (vselect VK16WM:$mask, (v16i32 immAllZerosV),
2099 (v16i32 VR512:$src))),
2100 (VMOVDQU32Zrrkz (KNOTWrr VK16WM:$mask), VR512:$src)>;
2103 // Move Int Doubleword to Packed Double Int
2105 def VMOVDI2PDIZrr : AVX512BI<0x6E, MRMSrcReg, (outs VR128X:$dst), (ins GR32:$src),
2106 "vmovd\t{$src, $dst|$dst, $src}",
2108 (v4i32 (scalar_to_vector GR32:$src)))], IIC_SSE_MOVDQ>,
2110 def VMOVDI2PDIZrm : AVX512BI<0x6E, MRMSrcMem, (outs VR128X:$dst), (ins i32mem:$src),
2111 "vmovd\t{$src, $dst|$dst, $src}",
2113 (v4i32 (scalar_to_vector (loadi32 addr:$src))))],
2114 IIC_SSE_MOVDQ>, EVEX, VEX_LIG, EVEX_CD8<32, CD8VT1>;
2115 def VMOV64toPQIZrr : AVX512BI<0x6E, MRMSrcReg, (outs VR128X:$dst), (ins GR64:$src),
2116 "vmovq\t{$src, $dst|$dst, $src}",
2118 (v2i64 (scalar_to_vector GR64:$src)))],
2119 IIC_SSE_MOVDQ>, EVEX, VEX_W, VEX_LIG;
2120 let isCodeGenOnly = 1 in {
2121 def VMOV64toSDZrr : AVX512BI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
2122 "vmovq\t{$src, $dst|$dst, $src}",
2123 [(set FR64:$dst, (bitconvert GR64:$src))],
2124 IIC_SSE_MOVDQ>, EVEX, VEX_W, Sched<[WriteMove]>;
2125 def VMOVSDto64Zrr : AVX512BI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
2126 "vmovq\t{$src, $dst|$dst, $src}",
2127 [(set GR64:$dst, (bitconvert FR64:$src))],
2128 IIC_SSE_MOVDQ>, EVEX, VEX_W, Sched<[WriteMove]>;
2130 def VMOVSDto64Zmr : AVX512BI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
2131 "vmovq\t{$src, $dst|$dst, $src}",
2132 [(store (i64 (bitconvert FR64:$src)), addr:$dst)],
2133 IIC_SSE_MOVDQ>, EVEX, VEX_W, Sched<[WriteStore]>,
2134 EVEX_CD8<64, CD8VT1>;
2136 // Move Int Doubleword to Single Scalar
2138 let isCodeGenOnly = 1 in {
2139 def VMOVDI2SSZrr : AVX512BI<0x6E, MRMSrcReg, (outs FR32X:$dst), (ins GR32:$src),
2140 "vmovd\t{$src, $dst|$dst, $src}",
2141 [(set FR32X:$dst, (bitconvert GR32:$src))],
2142 IIC_SSE_MOVDQ>, EVEX, VEX_LIG;
2144 def VMOVDI2SSZrm : AVX512BI<0x6E, MRMSrcMem, (outs FR32X:$dst), (ins i32mem:$src),
2145 "vmovd\t{$src, $dst|$dst, $src}",
2146 [(set FR32X:$dst, (bitconvert (loadi32 addr:$src)))],
2147 IIC_SSE_MOVDQ>, EVEX, VEX_LIG, EVEX_CD8<32, CD8VT1>;
2150 // Move doubleword from xmm register to r/m32
2152 def VMOVPDI2DIZrr : AVX512BI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128X:$src),
2153 "vmovd\t{$src, $dst|$dst, $src}",
2154 [(set GR32:$dst, (vector_extract (v4i32 VR128X:$src),
2155 (iPTR 0)))], IIC_SSE_MOVD_ToGP>,
2157 def VMOVPDI2DIZmr : AVX512BI<0x7E, MRMDestMem, (outs),
2158 (ins i32mem:$dst, VR128X:$src),
2159 "vmovd\t{$src, $dst|$dst, $src}",
2160 [(store (i32 (vector_extract (v4i32 VR128X:$src),
2161 (iPTR 0))), addr:$dst)], IIC_SSE_MOVDQ>,
2162 EVEX, VEX_LIG, EVEX_CD8<32, CD8VT1>;
2164 // Move quadword from xmm1 register to r/m64
2166 def VMOVPQIto64Zrr : I<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128X:$src),
2167 "vmovq\t{$src, $dst|$dst, $src}",
2168 [(set GR64:$dst, (extractelt (v2i64 VR128X:$src),
2170 IIC_SSE_MOVD_ToGP>, PD, EVEX, VEX_LIG, VEX_W,
2171 Requires<[HasAVX512, In64BitMode]>;
2173 def VMOVPQIto64Zmr : I<0xD6, MRMDestMem, (outs),
2174 (ins i64mem:$dst, VR128X:$src),
2175 "vmovq\t{$src, $dst|$dst, $src}",
2176 [(store (extractelt (v2i64 VR128X:$src), (iPTR 0)),
2177 addr:$dst)], IIC_SSE_MOVDQ>,
2178 EVEX, PD, VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>,
2179 Sched<[WriteStore]>, Requires<[HasAVX512, In64BitMode]>;
2181 // Move Scalar Single to Double Int
2183 let isCodeGenOnly = 1 in {
2184 def VMOVSS2DIZrr : AVX512BI<0x7E, MRMDestReg, (outs GR32:$dst),
2186 "vmovd\t{$src, $dst|$dst, $src}",
2187 [(set GR32:$dst, (bitconvert FR32X:$src))],
2188 IIC_SSE_MOVD_ToGP>, EVEX, VEX_LIG;
2189 def VMOVSS2DIZmr : AVX512BI<0x7E, MRMDestMem, (outs),
2190 (ins i32mem:$dst, FR32X:$src),
2191 "vmovd\t{$src, $dst|$dst, $src}",
2192 [(store (i32 (bitconvert FR32X:$src)), addr:$dst)],
2193 IIC_SSE_MOVDQ>, EVEX, VEX_LIG, EVEX_CD8<32, CD8VT1>;
2196 // Move Quadword Int to Packed Quadword Int
2198 def VMOVQI2PQIZrm : AVX512BI<0x6E, MRMSrcMem, (outs VR128X:$dst),
2200 "vmovq\t{$src, $dst|$dst, $src}",
2202 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>,
2203 EVEX, VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
2205 //===----------------------------------------------------------------------===//
2206 // AVX-512 MOVSS, MOVSD
2207 //===----------------------------------------------------------------------===//
2209 multiclass avx512_move_scalar <string asm, RegisterClass RC,
2210 SDNode OpNode, ValueType vt,
2211 X86MemOperand x86memop, PatFrag mem_pat> {
2212 let hasSideEffects = 0 in {
2213 def rr : SI<0x10, MRMSrcReg, (outs VR128X:$dst), (ins VR128X:$src1, RC:$src2),
2214 !strconcat(asm, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2215 [(set VR128X:$dst, (vt (OpNode VR128X:$src1,
2216 (scalar_to_vector RC:$src2))))],
2217 IIC_SSE_MOV_S_RR>, EVEX_4V, VEX_LIG;
2218 let Constraints = "$src1 = $dst" in
2219 def rrk : SI<0x10, MRMSrcReg, (outs VR128X:$dst),
2220 (ins VR128X:$src1, VK1WM:$mask, RC:$src2, RC:$src3),
2222 " \t{$src3, $src2, $dst {${mask}}|$dst {${mask}}, $src2, $src3}"),
2223 [], IIC_SSE_MOV_S_RR>, EVEX_4V, VEX_LIG, EVEX_K;
2224 def rm : SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
2225 !strconcat(asm, " \t{$src, $dst|$dst, $src}"),
2226 [(set RC:$dst, (mem_pat addr:$src))], IIC_SSE_MOV_S_RM>,
2228 let mayStore = 1 in {
2229 def mr: SI<0x11, MRMDestMem, (outs), (ins x86memop:$dst, RC:$src),
2230 !strconcat(asm, " \t{$src, $dst|$dst, $src}"),
2231 [(store RC:$src, addr:$dst)], IIC_SSE_MOV_S_MR>,
2233 def mrk: SI<0x11, MRMDestMem, (outs), (ins x86memop:$dst, VK1WM:$mask, RC:$src),
2234 !strconcat(asm, " \t{$src, $dst {${mask}}|$dst {${mask}}, $src}"),
2235 [], IIC_SSE_MOV_S_MR>,
2236 EVEX, VEX_LIG, EVEX_K;
2238 } //hasSideEffects = 0
2241 let ExeDomain = SSEPackedSingle in
2242 defm VMOVSSZ : avx512_move_scalar<"movss", FR32X, X86Movss, v4f32, f32mem,
2243 loadf32>, XS, EVEX_CD8<32, CD8VT1>;
2245 let ExeDomain = SSEPackedDouble in
2246 defm VMOVSDZ : avx512_move_scalar<"movsd", FR64X, X86Movsd, v2f64, f64mem,
2247 loadf64>, XD, VEX_W, EVEX_CD8<64, CD8VT1>;
2249 def : Pat<(f32 (X86select VK1WM:$mask, (f32 FR32X:$src1), (f32 FR32X:$src2))),
2250 (COPY_TO_REGCLASS (VMOVSSZrrk (COPY_TO_REGCLASS FR32X:$src2, VR128X),
2251 VK1WM:$mask, (f32 (IMPLICIT_DEF)), FR32X:$src1), FR32X)>;
2253 def : Pat<(f64 (X86select VK1WM:$mask, (f64 FR64X:$src1), (f64 FR64X:$src2))),
2254 (COPY_TO_REGCLASS (VMOVSDZrrk (COPY_TO_REGCLASS FR64X:$src2, VR128X),
2255 VK1WM:$mask, (f64 (IMPLICIT_DEF)), FR64X:$src1), FR64X)>;
2257 def : Pat<(int_x86_avx512_mask_store_ss addr:$dst, VR128X:$src, GR8:$mask),
2258 (VMOVSSZmrk addr:$dst, (i1 (COPY_TO_REGCLASS GR8:$mask, VK1WM)),
2259 (COPY_TO_REGCLASS VR128X:$src, FR32X))>;
2261 // For the disassembler
2262 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in {
2263 def VMOVSSZrr_REV : SI<0x11, MRMDestReg, (outs VR128X:$dst),
2264 (ins VR128X:$src1, FR32X:$src2),
2265 "movss\t{$src2, $src1, $dst|$dst, $src1, $src2}", [],
2267 XS, EVEX_4V, VEX_LIG;
2268 def VMOVSDZrr_REV : SI<0x11, MRMDestReg, (outs VR128X:$dst),
2269 (ins VR128X:$src1, FR64X:$src2),
2270 "movsd\t{$src2, $src1, $dst|$dst, $src1, $src2}", [],
2272 XD, EVEX_4V, VEX_LIG, VEX_W;
2275 let Predicates = [HasAVX512] in {
2276 let AddedComplexity = 15 in {
2277 // Move scalar to XMM zero-extended, zeroing a VR128X then do a
2278 // MOVS{S,D} to the lower bits.
2279 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32X:$src)))),
2280 (VMOVSSZrr (v4f32 (V_SET0)), FR32X:$src)>;
2281 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128X:$src))),
2282 (VMOVSSZrr (v4f32 (V_SET0)), (COPY_TO_REGCLASS VR128X:$src, FR32X))>;
2283 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128X:$src))),
2284 (VMOVSSZrr (v4i32 (V_SET0)), (COPY_TO_REGCLASS VR128X:$src, FR32X))>;
2285 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64X:$src)))),
2286 (VMOVSDZrr (v2f64 (V_SET0)), FR64X:$src)>;
2288 // Move low f32 and clear high bits.
2289 def : Pat<(v8f32 (X86vzmovl (v8f32 VR256X:$src))),
2290 (SUBREG_TO_REG (i32 0),
2291 (VMOVSSZrr (v4f32 (V_SET0)),
2292 (EXTRACT_SUBREG (v8f32 VR256X:$src), sub_xmm)), sub_xmm)>;
2293 def : Pat<(v8i32 (X86vzmovl (v8i32 VR256X:$src))),
2294 (SUBREG_TO_REG (i32 0),
2295 (VMOVSSZrr (v4i32 (V_SET0)),
2296 (EXTRACT_SUBREG (v8i32 VR256X:$src), sub_xmm)), sub_xmm)>;
2299 let AddedComplexity = 20 in {
2300 // MOVSSrm zeros the high parts of the register; represent this
2301 // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
2302 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
2303 (COPY_TO_REGCLASS (VMOVSSZrm addr:$src), VR128X)>;
2304 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
2305 (COPY_TO_REGCLASS (VMOVSSZrm addr:$src), VR128X)>;
2306 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
2307 (COPY_TO_REGCLASS (VMOVSSZrm addr:$src), VR128X)>;
2309 // MOVSDrm zeros the high parts of the register; represent this
2310 // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
2311 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
2312 (COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>;
2313 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
2314 (COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>;
2315 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
2316 (COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>;
2317 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
2318 (COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>;
2319 def : Pat<(v2f64 (X86vzload addr:$src)),
2320 (COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>;
2322 // Represent the same patterns above but in the form they appear for
2324 def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
2325 (v4i32 (scalar_to_vector (loadi32 addr:$src))), (iPTR 0)))),
2326 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrm addr:$src), sub_xmm)>;
2327 def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
2328 (v4f32 (scalar_to_vector (loadf32 addr:$src))), (iPTR 0)))),
2329 (SUBREG_TO_REG (i32 0), (VMOVSSZrm addr:$src), sub_xmm)>;
2330 def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,
2331 (v2f64 (scalar_to_vector (loadf64 addr:$src))), (iPTR 0)))),
2332 (SUBREG_TO_REG (i32 0), (VMOVSDZrm addr:$src), sub_xmm)>;
2334 def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
2335 (v4f32 (scalar_to_vector FR32X:$src)), (iPTR 0)))),
2336 (SUBREG_TO_REG (i32 0), (v4f32 (VMOVSSZrr (v4f32 (V_SET0)),
2337 FR32X:$src)), sub_xmm)>;
2338 def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,
2339 (v2f64 (scalar_to_vector FR64X:$src)), (iPTR 0)))),
2340 (SUBREG_TO_REG (i64 0), (v2f64 (VMOVSDZrr (v2f64 (V_SET0)),
2341 FR64X:$src)), sub_xmm)>;
2342 def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
2343 (v2i64 (scalar_to_vector (loadi64 addr:$src))), (iPTR 0)))),
2344 (SUBREG_TO_REG (i64 0), (VMOVQI2PQIZrm addr:$src), sub_xmm)>;
2346 // Move low f64 and clear high bits.
2347 def : Pat<(v4f64 (X86vzmovl (v4f64 VR256X:$src))),
2348 (SUBREG_TO_REG (i32 0),
2349 (VMOVSDZrr (v2f64 (V_SET0)),
2350 (EXTRACT_SUBREG (v4f64 VR256X:$src), sub_xmm)), sub_xmm)>;
2352 def : Pat<(v4i64 (X86vzmovl (v4i64 VR256X:$src))),
2353 (SUBREG_TO_REG (i32 0), (VMOVSDZrr (v2i64 (V_SET0)),
2354 (EXTRACT_SUBREG (v4i64 VR256X:$src), sub_xmm)), sub_xmm)>;
2356 // Extract and store.
2357 def : Pat<(store (f32 (vector_extract (v4f32 VR128X:$src), (iPTR 0))),
2359 (VMOVSSZmr addr:$dst, (COPY_TO_REGCLASS (v4f32 VR128X:$src), FR32X))>;
2360 def : Pat<(store (f64 (vector_extract (v2f64 VR128X:$src), (iPTR 0))),
2362 (VMOVSDZmr addr:$dst, (COPY_TO_REGCLASS (v2f64 VR128X:$src), FR64X))>;
2364 // Shuffle with VMOVSS
2365 def : Pat<(v4i32 (X86Movss VR128X:$src1, VR128X:$src2)),
2366 (VMOVSSZrr (v4i32 VR128X:$src1),
2367 (COPY_TO_REGCLASS (v4i32 VR128X:$src2), FR32X))>;
2368 def : Pat<(v4f32 (X86Movss VR128X:$src1, VR128X:$src2)),
2369 (VMOVSSZrr (v4f32 VR128X:$src1),
2370 (COPY_TO_REGCLASS (v4f32 VR128X:$src2), FR32X))>;
2373 def : Pat<(v8i32 (X86Movss VR256X:$src1, VR256X:$src2)),
2374 (SUBREG_TO_REG (i32 0),
2375 (VMOVSSZrr (EXTRACT_SUBREG (v8i32 VR256X:$src1), sub_xmm),
2376 (EXTRACT_SUBREG (v8i32 VR256X:$src2), sub_xmm)),
2378 def : Pat<(v8f32 (X86Movss VR256X:$src1, VR256X:$src2)),
2379 (SUBREG_TO_REG (i32 0),
2380 (VMOVSSZrr (EXTRACT_SUBREG (v8f32 VR256X:$src1), sub_xmm),
2381 (EXTRACT_SUBREG (v8f32 VR256X:$src2), sub_xmm)),
2384 // Shuffle with VMOVSD
2385 def : Pat<(v2i64 (X86Movsd VR128X:$src1, VR128X:$src2)),
2386 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
2387 def : Pat<(v2f64 (X86Movsd VR128X:$src1, VR128X:$src2)),
2388 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
2389 def : Pat<(v4f32 (X86Movsd VR128X:$src1, VR128X:$src2)),
2390 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
2391 def : Pat<(v4i32 (X86Movsd VR128X:$src1, VR128X:$src2)),
2392 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
2395 def : Pat<(v4i64 (X86Movsd VR256X:$src1, VR256X:$src2)),
2396 (SUBREG_TO_REG (i32 0),
2397 (VMOVSDZrr (EXTRACT_SUBREG (v4i64 VR256X:$src1), sub_xmm),
2398 (EXTRACT_SUBREG (v4i64 VR256X:$src2), sub_xmm)),
2400 def : Pat<(v4f64 (X86Movsd VR256X:$src1, VR256X:$src2)),
2401 (SUBREG_TO_REG (i32 0),
2402 (VMOVSDZrr (EXTRACT_SUBREG (v4f64 VR256X:$src1), sub_xmm),
2403 (EXTRACT_SUBREG (v4f64 VR256X:$src2), sub_xmm)),
2406 def : Pat<(v2f64 (X86Movlpd VR128X:$src1, VR128X:$src2)),
2407 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
2408 def : Pat<(v2i64 (X86Movlpd VR128X:$src1, VR128X:$src2)),
2409 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
2410 def : Pat<(v4f32 (X86Movlps VR128X:$src1, VR128X:$src2)),
2411 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
2412 def : Pat<(v4i32 (X86Movlps VR128X:$src1, VR128X:$src2)),
2413 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
2416 let AddedComplexity = 15 in
2417 def VMOVZPQILo2PQIZrr : AVX512XSI<0x7E, MRMSrcReg, (outs VR128X:$dst),
2419 "vmovq\t{$src, $dst|$dst, $src}",
2420 [(set VR128X:$dst, (v2i64 (X86vzmovl
2421 (v2i64 VR128X:$src))))],
2422 IIC_SSE_MOVQ_RR>, EVEX, VEX_W;
2424 let AddedComplexity = 20 in
2425 def VMOVZPQILo2PQIZrm : AVX512XSI<0x7E, MRMSrcMem, (outs VR128X:$dst),
2427 "vmovq\t{$src, $dst|$dst, $src}",
2428 [(set VR128X:$dst, (v2i64 (X86vzmovl
2429 (loadv2i64 addr:$src))))],
2430 IIC_SSE_MOVDQ>, EVEX, VEX_W,
2431 EVEX_CD8<8, CD8VT8>;
2433 let Predicates = [HasAVX512] in {
2434 // AVX 128-bit movd/movq instruction write zeros in the high 128-bit part.
2435 let AddedComplexity = 20 in {
2436 def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector (loadi32 addr:$src))))),
2437 (VMOVDI2PDIZrm addr:$src)>;
2438 def : Pat<(v2i64 (X86vzmovl (v2i64 (scalar_to_vector GR64:$src)))),
2439 (VMOV64toPQIZrr GR64:$src)>;
2440 def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector GR32:$src)))),
2441 (VMOVDI2PDIZrr GR32:$src)>;
2443 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
2444 (VMOVDI2PDIZrm addr:$src)>;
2445 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
2446 (VMOVDI2PDIZrm addr:$src)>;
2447 def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
2448 (VMOVZPQILo2PQIZrm addr:$src)>;
2449 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128X:$src))),
2450 (VMOVZPQILo2PQIZrr VR128X:$src)>;
2451 def : Pat<(v2i64 (X86vzload addr:$src)),
2452 (VMOVZPQILo2PQIZrm addr:$src)>;
2455 // Use regular 128-bit instructions to match 256-bit scalar_to_vec+zext.
2456 def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
2457 (v4i32 (scalar_to_vector GR32:$src)),(iPTR 0)))),
2458 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrr GR32:$src), sub_xmm)>;
2459 def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
2460 (v2i64 (scalar_to_vector GR64:$src)),(iPTR 0)))),
2461 (SUBREG_TO_REG (i64 0), (VMOV64toPQIZrr GR64:$src), sub_xmm)>;
2464 def : Pat<(v16i32 (X86Vinsert (v16i32 immAllZerosV), GR32:$src2, (iPTR 0))),
2465 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrr GR32:$src2), sub_xmm)>;
2467 def : Pat<(v8i64 (X86Vinsert (bc_v8i64 (v16i32 immAllZerosV)), GR64:$src2, (iPTR 0))),
2468 (SUBREG_TO_REG (i32 0), (VMOV64toPQIZrr GR64:$src2), sub_xmm)>;
2470 def : Pat<(v16i32 (X86Vinsert undef, GR32:$src2, (iPTR 0))),
2471 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrr GR32:$src2), sub_xmm)>;
2473 def : Pat<(v8i64 (X86Vinsert undef, GR64:$src2, (iPTR 0))),
2474 (SUBREG_TO_REG (i32 0), (VMOV64toPQIZrr GR64:$src2), sub_xmm)>;
2476 //===----------------------------------------------------------------------===//
2477 // AVX-512 - Non-temporals
2478 //===----------------------------------------------------------------------===//
2479 let SchedRW = [WriteLoad] in {
2480 def VMOVNTDQAZrm : AVX512PI<0x2A, MRMSrcMem, (outs VR512:$dst),
2481 (ins i512mem:$src), "vmovntdqa\t{$src, $dst|$dst, $src}",
2482 [(set VR512:$dst, (int_x86_avx512_movntdqa addr:$src))],
2483 SSEPackedInt>, EVEX, T8PD, EVEX_V512,
2484 EVEX_CD8<64, CD8VF>;
2486 let Predicates = [HasAVX512, HasVLX] in {
2487 def VMOVNTDQAZ256rm : AVX512PI<0x2A, MRMSrcMem, (outs VR256X:$dst),
2489 "vmovntdqa\t{$src, $dst|$dst, $src}", [],
2490 SSEPackedInt>, EVEX, T8PD, EVEX_V256,
2491 EVEX_CD8<64, CD8VF>;
2493 def VMOVNTDQAZ128rm : AVX512PI<0x2A, MRMSrcMem, (outs VR128X:$dst),
2495 "vmovntdqa\t{$src, $dst|$dst, $src}", [],
2496 SSEPackedInt>, EVEX, T8PD, EVEX_V128,
2497 EVEX_CD8<64, CD8VF>;
2501 multiclass avx512_movnt<bits<8> opc, string OpcodeStr, PatFrag st_frag,
2502 ValueType OpVT, RegisterClass RC, X86MemOperand memop,
2503 Domain d, InstrItinClass itin = IIC_SSE_MOVNT> {
2504 let SchedRW = [WriteStore], mayStore = 1,
2505 AddedComplexity = 400 in
2506 def mr : AVX512PI<opc, MRMDestMem, (outs), (ins memop:$dst, RC:$src),
2507 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2508 [(st_frag (OpVT RC:$src), addr:$dst)], d, itin>, EVEX;
2511 multiclass avx512_movnt_vl<bits<8> opc, string OpcodeStr, PatFrag st_frag,
2512 string elty, string elsz, string vsz512,
2513 string vsz256, string vsz128, Domain d,
2514 Predicate prd, InstrItinClass itin = IIC_SSE_MOVNT> {
2515 let Predicates = [prd] in
2516 defm Z : avx512_movnt<opc, OpcodeStr, st_frag,
2517 !cast<ValueType>("v"##vsz512##elty##elsz), VR512,
2518 !cast<X86MemOperand>(elty##"512mem"), d, itin>,
2521 let Predicates = [prd, HasVLX] in {
2522 defm Z256 : avx512_movnt<opc, OpcodeStr, st_frag,
2523 !cast<ValueType>("v"##vsz256##elty##elsz), VR256X,
2524 !cast<X86MemOperand>(elty##"256mem"), d, itin>,
2527 defm Z128 : avx512_movnt<opc, OpcodeStr, st_frag,
2528 !cast<ValueType>("v"##vsz128##elty##elsz), VR128X,
2529 !cast<X86MemOperand>(elty##"128mem"), d, itin>,
2534 defm VMOVNTDQ : avx512_movnt_vl<0xE7, "vmovntdq", alignednontemporalstore,
2535 "i", "64", "8", "4", "2", SSEPackedInt,
2536 HasAVX512>, PD, EVEX_CD8<64, CD8VF>;
2538 defm VMOVNTPD : avx512_movnt_vl<0x2B, "vmovntpd", alignednontemporalstore,
2539 "f", "64", "8", "4", "2", SSEPackedDouble,
2540 HasAVX512>, PD, VEX_W, EVEX_CD8<64, CD8VF>;
2542 defm VMOVNTPS : avx512_movnt_vl<0x2B, "vmovntps", alignednontemporalstore,
2543 "f", "32", "16", "8", "4", SSEPackedSingle,
2544 HasAVX512>, PS, EVEX_CD8<32, CD8VF>;
2546 //===----------------------------------------------------------------------===//
2547 // AVX-512 - Integer arithmetic
2549 multiclass avx512_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
2550 X86VectorVTInfo _, OpndItins itins,
2551 bit IsCommutable = 0> {
2552 defm rr : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
2553 (ins _.RC:$src1, _.RC:$src2), OpcodeStr##_.Suffix,
2554 "$src2, $src1", "$src1, $src2",
2555 (_.VT (OpNode _.RC:$src1, _.RC:$src2)),
2556 itins.rr, IsCommutable>,
2557 AVX512BIBase, EVEX_4V;
2560 defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
2561 (ins _.RC:$src1, _.MemOp:$src2), OpcodeStr##_.Suffix,
2562 "$src2, $src1", "$src1, $src2",
2563 (_.VT (OpNode _.RC:$src1,
2564 (bitconvert (_.LdFrag addr:$src2)))),
2566 AVX512BIBase, EVEX_4V;
2569 multiclass avx512_binop_rmb<bits<8> opc, string OpcodeStr, SDNode OpNode,
2570 X86VectorVTInfo _, OpndItins itins,
2571 bit IsCommutable = 0> :
2572 avx512_binop_rm<opc, OpcodeStr, OpNode, _, itins, IsCommutable> {
2574 defm rmb : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
2575 (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr##_.Suffix,
2576 "${src2}"##_.BroadcastStr##", $src1",
2577 "$src1, ${src2}"##_.BroadcastStr,
2578 (_.VT (OpNode _.RC:$src1,
2580 (_.ScalarLdFrag addr:$src2)))),
2582 AVX512BIBase, EVEX_4V, EVEX_B;
2585 multiclass avx512_binop_rm_vl<bits<8> opc, string OpcodeStr, SDNode OpNode,
2586 AVX512VLVectorVTInfo VTInfo, OpndItins itins,
2587 Predicate prd, bit IsCommutable = 0> {
2588 let Predicates = [prd] in
2589 defm Z : avx512_binop_rm<opc, OpcodeStr, OpNode, VTInfo.info512, itins,
2590 IsCommutable>, EVEX_V512;
2592 let Predicates = [prd, HasVLX] in {
2593 defm Z256 : avx512_binop_rm<opc, OpcodeStr, OpNode, VTInfo.info256, itins,
2594 IsCommutable>, EVEX_V256;
2595 defm Z128 : avx512_binop_rm<opc, OpcodeStr, OpNode, VTInfo.info128, itins,
2596 IsCommutable>, EVEX_V128;
2600 multiclass avx512_binop_rmb_vl<bits<8> opc, string OpcodeStr, SDNode OpNode,
2601 AVX512VLVectorVTInfo VTInfo, OpndItins itins,
2602 Predicate prd, bit IsCommutable = 0> {
2603 let Predicates = [prd] in
2604 defm Z : avx512_binop_rmb<opc, OpcodeStr, OpNode, VTInfo.info512, itins,
2605 IsCommutable>, EVEX_V512;
2607 let Predicates = [prd, HasVLX] in {
2608 defm Z256 : avx512_binop_rmb<opc, OpcodeStr, OpNode, VTInfo.info256, itins,
2609 IsCommutable>, EVEX_V256;
2610 defm Z128 : avx512_binop_rmb<opc, OpcodeStr, OpNode, VTInfo.info128, itins,
2611 IsCommutable>, EVEX_V128;
2615 multiclass avx512_binop_rm_vl_q<bits<8> opc, string OpcodeStr, SDNode OpNode,
2616 OpndItins itins, Predicate prd,
2617 bit IsCommutable = 0> {
2618 defm NAME : avx512_binop_rmb_vl<opc, OpcodeStr, OpNode, avx512vl_i64_info,
2619 itins, prd, IsCommutable>,
2620 VEX_W, EVEX_CD8<64, CD8VF>;
2623 multiclass avx512_binop_rm_vl_d<bits<8> opc, string OpcodeStr, SDNode OpNode,
2624 OpndItins itins, Predicate prd,
2625 bit IsCommutable = 0> {
2626 defm NAME : avx512_binop_rmb_vl<opc, OpcodeStr, OpNode, avx512vl_i32_info,
2627 itins, prd, IsCommutable>, EVEX_CD8<32, CD8VF>;
2630 multiclass avx512_binop_rm_vl_w<bits<8> opc, string OpcodeStr, SDNode OpNode,
2631 OpndItins itins, Predicate prd,
2632 bit IsCommutable = 0> {
2633 defm NAME : avx512_binop_rm_vl<opc, OpcodeStr, OpNode, avx512vl_i16_info,
2634 itins, prd, IsCommutable>, EVEX_CD8<16, CD8VF>;
2637 multiclass avx512_binop_rm_vl_b<bits<8> opc, string OpcodeStr, SDNode OpNode,
2638 OpndItins itins, Predicate prd,
2639 bit IsCommutable = 0> {
2640 defm NAME : avx512_binop_rm_vl<opc, OpcodeStr, OpNode, avx512vl_i8_info,
2641 itins, prd, IsCommutable>, EVEX_CD8<8, CD8VF>;
2644 multiclass avx512_binop_rm_vl_dq<bits<8> opc_d, bits<8> opc_q, string OpcodeStr,
2645 SDNode OpNode, OpndItins itins, Predicate prd,
2646 bit IsCommutable = 0> {
2647 defm Q : avx512_binop_rm_vl_q<opc_q, OpcodeStr, OpNode, itins, prd,
2650 defm D : avx512_binop_rm_vl_d<opc_d, OpcodeStr, OpNode, itins, prd,
2654 multiclass avx512_binop_rm_vl_bw<bits<8> opc_b, bits<8> opc_w, string OpcodeStr,
2655 SDNode OpNode, OpndItins itins, Predicate prd,
2656 bit IsCommutable = 0> {
2657 defm W : avx512_binop_rm_vl_w<opc_w, OpcodeStr, OpNode, itins, prd,
2660 defm B : avx512_binop_rm_vl_b<opc_b, OpcodeStr, OpNode, itins, prd,
2664 multiclass avx512_binop_rm_vl_all<bits<8> opc_b, bits<8> opc_w,
2665 bits<8> opc_d, bits<8> opc_q,
2666 string OpcodeStr, SDNode OpNode,
2667 OpndItins itins, bit IsCommutable = 0> {
2668 defm NAME : avx512_binop_rm_vl_dq<opc_d, opc_q, OpcodeStr, OpNode,
2669 itins, HasAVX512, IsCommutable>,
2670 avx512_binop_rm_vl_bw<opc_b, opc_w, OpcodeStr, OpNode,
2671 itins, HasBWI, IsCommutable>;
2674 multiclass avx512_binop_rm2<bits<8> opc, string OpcodeStr, ValueType DstVT,
2675 ValueType SrcVT, RegisterClass KRC, RegisterClass RC,
2676 PatFrag memop_frag, X86MemOperand x86memop,
2677 PatFrag scalar_mfrag, X86MemOperand x86scalar_mop,
2678 string BrdcstStr, OpndItins itins, bit IsCommutable = 0> {
2679 let isCommutable = IsCommutable in
2681 def rr : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
2682 (ins RC:$src1, RC:$src2),
2683 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2685 def rrk : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
2686 (ins KRC:$mask, RC:$src1, RC:$src2),
2687 !strconcat(OpcodeStr,
2688 " \t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
2689 [], itins.rr>, EVEX_4V, EVEX_K;
2690 def rrkz : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
2691 (ins KRC:$mask, RC:$src1, RC:$src2),
2692 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst {${mask}} {z}" ,
2693 "|$dst {${mask}} {z}, $src1, $src2}"),
2694 [], itins.rr>, EVEX_4V, EVEX_KZ;
2696 let mayLoad = 1 in {
2697 def rm : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
2698 (ins RC:$src1, x86memop:$src2),
2699 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2701 def rmk : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
2702 (ins KRC:$mask, RC:$src1, x86memop:$src2),
2703 !strconcat(OpcodeStr,
2704 " \t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
2705 [], itins.rm>, EVEX_4V, EVEX_K;
2706 def rmkz : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
2707 (ins KRC:$mask, RC:$src1, x86memop:$src2),
2708 !strconcat(OpcodeStr,
2709 " \t{$src2, $src1, $dst {${mask}} {z}|$dst {${mask}} {z}, $src1, $src2}"),
2710 [], itins.rm>, EVEX_4V, EVEX_KZ;
2711 def rmb : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
2712 (ins RC:$src1, x86scalar_mop:$src2),
2713 !strconcat(OpcodeStr, " \t{${src2}", BrdcstStr,
2714 ", $src1, $dst|$dst, $src1, ${src2}", BrdcstStr, "}"),
2715 [], itins.rm>, EVEX_4V, EVEX_B;
2716 def rmbk : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
2717 (ins KRC:$mask, RC:$src1, x86scalar_mop:$src2),
2718 !strconcat(OpcodeStr, " \t{${src2}", BrdcstStr,
2719 ", $src1, $dst {${mask}}|$dst {${mask}}, $src1, ${src2}",
2721 [], itins.rm>, EVEX_4V, EVEX_B, EVEX_K;
2722 def rmbkz : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
2723 (ins KRC:$mask, RC:$src1, x86scalar_mop:$src2),
2724 !strconcat(OpcodeStr, " \t{${src2}", BrdcstStr,
2725 ", $src1, $dst {${mask}} {z}|$dst {${mask}} {z}, $src1, ${src2}",
2727 [], itins.rm>, EVEX_4V, EVEX_B, EVEX_KZ;
2731 defm VPADD : avx512_binop_rm_vl_all<0xFC, 0xFD, 0xFE, 0xD4, "vpadd", add,
2732 SSE_INTALU_ITINS_P, 1>;
2733 defm VPSUB : avx512_binop_rm_vl_all<0xF8, 0xF9, 0xFA, 0xFB, "vpsub", sub,
2734 SSE_INTALU_ITINS_P, 0>;
2735 defm VPMULLD : avx512_binop_rm_vl_d<0x40, "vpmull", mul,
2736 SSE_INTALU_ITINS_P, HasAVX512, 1>, T8PD;
2737 defm VPMULLW : avx512_binop_rm_vl_w<0xD5, "vpmull", mul,
2738 SSE_INTALU_ITINS_P, HasBWI, 1>;
2739 defm VPMULLQ : avx512_binop_rm_vl_q<0x40, "vpmull", mul,
2740 SSE_INTALU_ITINS_P, HasDQI, 1>, T8PD;
2742 defm VPMULDQZ : avx512_binop_rm2<0x28, "vpmuldq", v8i64, v16i32, VK8WM, VR512,
2743 memopv8i64, i512mem, loadi64, i64mem, "{1to8}",
2744 SSE_INTALU_ITINS_P, 1>, T8PD, EVEX_V512,
2745 EVEX_CD8<64, CD8VF>, VEX_W;
2747 defm VPMULUDQZ : avx512_binop_rm2<0xF4, "vpmuludq", v8i64, v16i32, VK8WM, VR512,
2748 memopv8i64, i512mem, loadi64, i64mem, "{1to8}",
2749 SSE_INTMUL_ITINS_P, 1>, EVEX_V512, EVEX_CD8<64, CD8VF>, VEX_W;
2751 def : Pat<(v8i64 (X86pmuludq (v16i32 VR512:$src1), (v16i32 VR512:$src2))),
2752 (VPMULUDQZrr VR512:$src1, VR512:$src2)>;
2754 def : Pat<(v8i64 (int_x86_avx512_mask_pmulu_dq_512 (v16i32 VR512:$src1),
2755 (v16i32 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
2756 (VPMULUDQZrr VR512:$src1, VR512:$src2)>;
2757 def : Pat<(v8i64 (int_x86_avx512_mask_pmul_dq_512 (v16i32 VR512:$src1),
2758 (v16i32 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
2759 (VPMULDQZrr VR512:$src1, VR512:$src2)>;
2761 defm VPMAXSB : avx512_binop_rm_vl_b<0x3C, "vpmaxs", X86smax,
2762 SSE_INTALU_ITINS_P, HasBWI, 1>, T8PD;
2763 defm VPMAXSW : avx512_binop_rm_vl_w<0xEE, "vpmaxs", X86smax,
2764 SSE_INTALU_ITINS_P, HasBWI, 1>;
2765 defm VPMAXS : avx512_binop_rm_vl_dq<0x3D, 0x3D, "vpmaxs", X86smax,
2766 SSE_INTALU_ITINS_P, HasAVX512, 1>, T8PD;
2768 defm VPMAXUB : avx512_binop_rm_vl_b<0xDE, "vpmaxu", X86umax,
2769 SSE_INTALU_ITINS_P, HasBWI, 1>;
2770 defm VPMAXUW : avx512_binop_rm_vl_w<0x3E, "vpmaxu", X86umax,
2771 SSE_INTALU_ITINS_P, HasBWI, 1>, T8PD;
2772 defm VPMAXU : avx512_binop_rm_vl_dq<0x3F, 0x3F, "vpmaxu", X86umax,
2773 SSE_INTALU_ITINS_P, HasAVX512, 1>, T8PD;
2775 defm VPMINSB : avx512_binop_rm_vl_b<0x38, "vpmins", X86smin,
2776 SSE_INTALU_ITINS_P, HasBWI, 1>, T8PD;
2777 defm VPMINSW : avx512_binop_rm_vl_w<0xEA, "vpmins", X86smin,
2778 SSE_INTALU_ITINS_P, HasBWI, 1>;
2779 defm VPMINS : avx512_binop_rm_vl_dq<0x39, 0x39, "vpmins", X86smin,
2780 SSE_INTALU_ITINS_P, HasAVX512, 1>, T8PD;
2782 defm VPMINUB : avx512_binop_rm_vl_b<0xDA, "vpminu", X86umin,
2783 SSE_INTALU_ITINS_P, HasBWI, 1>;
2784 defm VPMINUW : avx512_binop_rm_vl_w<0x3A, "vpminu", X86umin,
2785 SSE_INTALU_ITINS_P, HasBWI, 1>, T8PD;
2786 defm VPMINU : avx512_binop_rm_vl_dq<0x3B, 0x3B, "vpminu", X86umin,
2787 SSE_INTALU_ITINS_P, HasAVX512, 1>, T8PD;
2789 def : Pat <(v16i32 (int_x86_avx512_mask_pmaxs_d_512 (v16i32 VR512:$src1),
2790 (v16i32 VR512:$src2), (v16i32 immAllZerosV), (i16 -1))),
2791 (VPMAXSDZrr VR512:$src1, VR512:$src2)>;
2792 def : Pat <(v16i32 (int_x86_avx512_mask_pmaxu_d_512 (v16i32 VR512:$src1),
2793 (v16i32 VR512:$src2), (v16i32 immAllZerosV), (i16 -1))),
2794 (VPMAXUDZrr VR512:$src1, VR512:$src2)>;
2795 def : Pat <(v8i64 (int_x86_avx512_mask_pmaxs_q_512 (v8i64 VR512:$src1),
2796 (v8i64 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
2797 (VPMAXSQZrr VR512:$src1, VR512:$src2)>;
2798 def : Pat <(v8i64 (int_x86_avx512_mask_pmaxu_q_512 (v8i64 VR512:$src1),
2799 (v8i64 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
2800 (VPMAXUQZrr VR512:$src1, VR512:$src2)>;
2801 def : Pat <(v16i32 (int_x86_avx512_mask_pmins_d_512 (v16i32 VR512:$src1),
2802 (v16i32 VR512:$src2), (v16i32 immAllZerosV), (i16 -1))),
2803 (VPMINSDZrr VR512:$src1, VR512:$src2)>;
2804 def : Pat <(v16i32 (int_x86_avx512_mask_pminu_d_512 (v16i32 VR512:$src1),
2805 (v16i32 VR512:$src2), (v16i32 immAllZerosV), (i16 -1))),
2806 (VPMINUDZrr VR512:$src1, VR512:$src2)>;
2807 def : Pat <(v8i64 (int_x86_avx512_mask_pmins_q_512 (v8i64 VR512:$src1),
2808 (v8i64 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
2809 (VPMINSQZrr VR512:$src1, VR512:$src2)>;
2810 def : Pat <(v8i64 (int_x86_avx512_mask_pminu_q_512 (v8i64 VR512:$src1),
2811 (v8i64 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
2812 (VPMINUQZrr VR512:$src1, VR512:$src2)>;
2813 //===----------------------------------------------------------------------===//
2814 // AVX-512 - Unpack Instructions
2815 //===----------------------------------------------------------------------===//
2817 multiclass avx512_unpack_fp<bits<8> opc, SDNode OpNode, ValueType vt,
2818 PatFrag mem_frag, RegisterClass RC,
2819 X86MemOperand x86memop, string asm,
2821 def rr : AVX512PI<opc, MRMSrcReg,
2822 (outs RC:$dst), (ins RC:$src1, RC:$src2),
2824 (vt (OpNode RC:$src1, RC:$src2)))],
2826 def rm : AVX512PI<opc, MRMSrcMem,
2827 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
2829 (vt (OpNode RC:$src1,
2830 (bitconvert (mem_frag addr:$src2)))))],
2834 defm VUNPCKHPSZ: avx512_unpack_fp<0x15, X86Unpckh, v16f32, memopv8f64,
2835 VR512, f512mem, "vunpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2836 SSEPackedSingle>, PS, EVEX_V512, EVEX_CD8<32, CD8VF>;
2837 defm VUNPCKHPDZ: avx512_unpack_fp<0x15, X86Unpckh, v8f64, memopv8f64,
2838 VR512, f512mem, "vunpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2839 SSEPackedDouble>, PD, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
2840 defm VUNPCKLPSZ: avx512_unpack_fp<0x14, X86Unpckl, v16f32, memopv8f64,
2841 VR512, f512mem, "vunpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2842 SSEPackedSingle>, PS, EVEX_V512, EVEX_CD8<32, CD8VF>;
2843 defm VUNPCKLPDZ: avx512_unpack_fp<0x14, X86Unpckl, v8f64, memopv8f64,
2844 VR512, f512mem, "vunpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2845 SSEPackedDouble>, PD, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
2847 multiclass avx512_unpack_int<bits<8> opc, string OpcodeStr, SDNode OpNode,
2848 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
2849 X86MemOperand x86memop> {
2850 def rr : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
2851 (ins RC:$src1, RC:$src2),
2852 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2853 [(set RC:$dst, (OpVT (OpNode (OpVT RC:$src1), (OpVT RC:$src2))))],
2854 IIC_SSE_UNPCK>, EVEX_4V;
2855 def rm : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
2856 (ins RC:$src1, x86memop:$src2),
2857 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2858 [(set RC:$dst, (OpVT (OpNode (OpVT RC:$src1),
2859 (bitconvert (memop_frag addr:$src2)))))],
2860 IIC_SSE_UNPCK>, EVEX_4V;
2862 defm VPUNPCKLDQZ : avx512_unpack_int<0x62, "vpunpckldq", X86Unpckl, v16i32,
2863 VR512, memopv16i32, i512mem>, EVEX_V512,
2864 EVEX_CD8<32, CD8VF>;
2865 defm VPUNPCKLQDQZ : avx512_unpack_int<0x6C, "vpunpcklqdq", X86Unpckl, v8i64,
2866 VR512, memopv8i64, i512mem>, EVEX_V512,
2867 VEX_W, EVEX_CD8<64, CD8VF>;
2868 defm VPUNPCKHDQZ : avx512_unpack_int<0x6A, "vpunpckhdq", X86Unpckh, v16i32,
2869 VR512, memopv16i32, i512mem>, EVEX_V512,
2870 EVEX_CD8<32, CD8VF>;
2871 defm VPUNPCKHQDQZ : avx512_unpack_int<0x6D, "vpunpckhqdq", X86Unpckh, v8i64,
2872 VR512, memopv8i64, i512mem>, EVEX_V512,
2873 VEX_W, EVEX_CD8<64, CD8VF>;
2874 //===----------------------------------------------------------------------===//
2878 multiclass avx512_pshuf_imm<bits<8> opc, string OpcodeStr, RegisterClass RC,
2879 SDNode OpNode, PatFrag mem_frag,
2880 X86MemOperand x86memop, ValueType OpVT> {
2881 def ri : AVX512Ii8<opc, MRMSrcReg, (outs RC:$dst),
2882 (ins RC:$src1, i8imm:$src2),
2883 !strconcat(OpcodeStr,
2884 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2886 (OpVT (OpNode RC:$src1, (i8 imm:$src2))))]>,
2888 def mi : AVX512Ii8<opc, MRMSrcMem, (outs RC:$dst),
2889 (ins x86memop:$src1, i8imm:$src2),
2890 !strconcat(OpcodeStr,
2891 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2893 (OpVT (OpNode (mem_frag addr:$src1),
2894 (i8 imm:$src2))))]>, EVEX;
2897 defm VPSHUFDZ : avx512_pshuf_imm<0x70, "vpshufd", VR512, X86PShufd, memopv16i32,
2898 i512mem, v16i32>, PD, EVEX_V512, EVEX_CD8<32, CD8VF>;
2900 let ExeDomain = SSEPackedSingle in
2901 defm VPERMILPSZ : avx512_pshuf_imm<0x04, "vpermilps", VR512, X86VPermilpi,
2902 memopv16f32, f512mem, v16f32>, TAPD, EVEX_V512,
2903 EVEX_CD8<32, CD8VF>;
2904 let ExeDomain = SSEPackedDouble in
2905 defm VPERMILPDZ : avx512_pshuf_imm<0x05, "vpermilpd", VR512, X86VPermilpi,
2906 memopv8f64, f512mem, v8f64>, TAPD, EVEX_V512,
2907 VEX_W, EVEX_CD8<64, CD8VF>;
2909 def : Pat<(v16i32 (X86VPermilpi VR512:$src1, (i8 imm:$imm))),
2910 (VPERMILPSZri VR512:$src1, imm:$imm)>;
2911 def : Pat<(v8i64 (X86VPermilpi VR512:$src1, (i8 imm:$imm))),
2912 (VPERMILPDZri VR512:$src1, imm:$imm)>;
2914 //===----------------------------------------------------------------------===//
2915 // AVX-512 Logical Instructions
2916 //===----------------------------------------------------------------------===//
2918 defm VPAND : avx512_binop_rm_vl_dq<0xDB, 0xDB, "vpand", and,
2919 SSE_INTALU_ITINS_P, HasAVX512, 1>;
2920 defm VPOR : avx512_binop_rm_vl_dq<0xEB, 0xEB, "vpor", or,
2921 SSE_INTALU_ITINS_P, HasAVX512, 1>;
2922 defm VPXOR : avx512_binop_rm_vl_dq<0xEF, 0xEF, "vpxor", xor,
2923 SSE_INTALU_ITINS_P, HasAVX512, 1>;
2924 defm VPANDN : avx512_binop_rm_vl_dq<0xDF, 0xDF, "vpandn", X86andnp,
2925 SSE_INTALU_ITINS_P, HasAVX512, 1>;
2927 //===----------------------------------------------------------------------===//
2928 // AVX-512 FP arithmetic
2929 //===----------------------------------------------------------------------===//
2931 multiclass avx512_binop_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
2933 defm SSZ : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"), OpNode, FR32X,
2934 f32mem, itins.s, 0>, XS, EVEX_4V, VEX_LIG,
2935 EVEX_CD8<32, CD8VT1>;
2936 defm SDZ : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd"), OpNode, FR64X,
2937 f64mem, itins.d, 0>, XD, VEX_W, EVEX_4V, VEX_LIG,
2938 EVEX_CD8<64, CD8VT1>;
2941 let isCommutable = 1 in {
2942 defm VADD : avx512_binop_s<0x58, "add", fadd, SSE_ALU_ITINS_S>;
2943 defm VMUL : avx512_binop_s<0x59, "mul", fmul, SSE_ALU_ITINS_S>;
2944 defm VMIN : avx512_binop_s<0x5D, "min", X86fmin, SSE_ALU_ITINS_S>;
2945 defm VMAX : avx512_binop_s<0x5F, "max", X86fmax, SSE_ALU_ITINS_S>;
2947 let isCommutable = 0 in {
2948 defm VSUB : avx512_binop_s<0x5C, "sub", fsub, SSE_ALU_ITINS_S>;
2949 defm VDIV : avx512_binop_s<0x5E, "div", fdiv, SSE_ALU_ITINS_S>;
2952 multiclass avx512_fp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
2954 RegisterClass RC, ValueType vt,
2955 X86MemOperand x86memop, PatFrag mem_frag,
2956 X86MemOperand x86scalar_mop, PatFrag scalar_mfrag,
2958 Domain d, OpndItins itins, bit commutable> {
2959 let isCommutable = commutable in {
2960 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
2961 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2962 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], itins.rr, d>,
2965 def rrk: PI<opc, MRMSrcReg, (outs RC:$dst), (ins KRC:$mask, RC:$src1, RC:$src2),
2966 !strconcat(OpcodeStr,
2967 " \t{$src2, $src1, $dst {${mask}} |$dst {${mask}}, $src1, $src2}"),
2968 [], itins.rr, d>, EVEX_4V, EVEX_K;
2970 def rrkz: PI<opc, MRMSrcReg,&nbs