1 //===-- X86InstrAVX512.td - AVX512 Instruction Set ---------*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86 AVX512 instruction set, defining the
11 // instructions, and properties of the instructions which are needed for code
12 // generation, machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
16 // Group template arguments that can be derived from the vector type (EltNum x
17 // EltVT). These are things like the register class for the writemask, etc.
18 // The idea is to pass one of these as the template argument rather than the
19 // individual arguments.
20 // The template is also used for scalar types, in this case numelts is 1.
21 class X86VectorVTInfo<int numelts, ValueType eltvt, RegisterClass rc,
23 RegisterClass RC = rc;
24 ValueType EltVT = eltvt;
25 int NumElts = numelts;
27 // Corresponding mask register class.
28 RegisterClass KRC = !cast<RegisterClass>("VK" # NumElts);
30 // Corresponding write-mask register class.
31 RegisterClass KRCWM = !cast<RegisterClass>("VK" # NumElts # "WM");
33 // The GPR register class that can hold the write mask. Use GR8 for fewer
34 // than 8 elements. Use shift-right and equal to work around the lack of
37 !cast<RegisterClass>("GR" #
38 !if (!eq (!srl(NumElts, 3), 0), 8, NumElts));
40 // Suffix used in the instruction mnemonic.
41 string Suffix = suffix;
43 // VTName is a string name for vector VT. For vector types it will be
44 // v # NumElts # EltVT, so for vector of 8 elements of i32 it will be v8i32
45 // It is a little bit complex for scalar types, where NumElts = 1.
46 // In this case we build v4f32 or v2f64
47 string VTName = "v" # !if (!eq (NumElts, 1),
48 !if (!eq (EltVT.Size, 32), 4,
49 !if (!eq (EltVT.Size, 64), 2, NumElts)), NumElts) # EltVT;
52 ValueType VT = !cast<ValueType>(VTName);
54 string EltTypeName = !cast<string>(EltVT);
55 // Size of the element type in bits, e.g. 32 for v16i32.
56 string EltSizeName = !subst("i", "", !subst("f", "", EltTypeName));
57 int EltSize = EltVT.Size;
59 // "i" for integer types and "f" for floating-point types
60 string TypeVariantName = !subst(EltSizeName, "", EltTypeName);
62 // Size of RC in bits, e.g. 512 for VR512.
65 // The corresponding memory operand, e.g. i512mem for VR512.
66 X86MemOperand MemOp = !cast<X86MemOperand>(TypeVariantName # Size # "mem");
67 X86MemOperand ScalarMemOp = !cast<X86MemOperand>(EltVT # "mem");
70 // Note: For 128/256-bit integer VT we choose loadv2i64/loadv4i64
71 // due to load promotion during legalization
72 PatFrag LdFrag = !cast<PatFrag>("load" #
73 !if (!eq (TypeVariantName, "i"),
74 !if (!eq (Size, 128), "v2i64",
75 !if (!eq (Size, 256), "v4i64",
78 PatFrag AlignedLdFrag = !cast<PatFrag>("alignedload" #
79 !if (!eq (TypeVariantName, "i"),
80 !if (!eq (Size, 128), "v2i64",
81 !if (!eq (Size, 256), "v4i64",
83 !if (!eq (EltSize, 64), "v8i64", "v16i32"),
86 PatFrag ScalarLdFrag = !cast<PatFrag>("load" # EltVT);
88 // The corresponding float type, e.g. v16f32 for v16i32
89 // Note: For EltSize < 32, FloatVT is illegal and TableGen
90 // fails to compile, so we choose FloatVT = VT
91 ValueType FloatVT = !cast<ValueType>(
92 !if (!eq (!srl(EltSize,5),0),
94 !if (!eq(TypeVariantName, "i"),
95 "v" # NumElts # "f" # EltSize,
98 // The string to specify embedded broadcast in assembly.
99 string BroadcastStr = "{1to" # NumElts # "}";
101 // 8-bit compressed displacement tuple/subvector format. This is only
102 // defined for NumElts <= 8.
103 CD8VForm CD8TupleForm = !if (!eq (!srl(NumElts, 4), 0),
104 !cast<CD8VForm>("CD8VT" # NumElts), ?);
106 SubRegIndex SubRegIdx = !if (!eq (Size, 128), sub_xmm,
107 !if (!eq (Size, 256), sub_ymm, ?));
109 Domain ExeDomain = !if (!eq (EltTypeName, "f32"), SSEPackedSingle,
110 !if (!eq (EltTypeName, "f64"), SSEPackedDouble,
113 RegisterClass FRC = !if (!eq (EltTypeName, "f32"), FR32X, FR64X);
115 // A vector type of the same width with element type i32. This is used to
116 // create the canonical constant zero node ImmAllZerosV.
117 ValueType i32VT = !cast<ValueType>("v" # !srl(Size, 5) # "i32");
118 dag ImmAllZerosV = (VT (bitconvert (i32VT immAllZerosV)));
120 string ZSuffix = !if (!eq (Size, 128), "Z128",
121 !if (!eq (Size, 256), "Z256", "Z"));
124 def v64i8_info : X86VectorVTInfo<64, i8, VR512, "b">;
125 def v32i16_info : X86VectorVTInfo<32, i16, VR512, "w">;
126 def v16i32_info : X86VectorVTInfo<16, i32, VR512, "d">;
127 def v8i64_info : X86VectorVTInfo<8, i64, VR512, "q">;
128 def v16f32_info : X86VectorVTInfo<16, f32, VR512, "ps">;
129 def v8f64_info : X86VectorVTInfo<8, f64, VR512, "pd">;
131 // "x" in v32i8x_info means RC = VR256X
132 def v32i8x_info : X86VectorVTInfo<32, i8, VR256X, "b">;
133 def v16i16x_info : X86VectorVTInfo<16, i16, VR256X, "w">;
134 def v8i32x_info : X86VectorVTInfo<8, i32, VR256X, "d">;
135 def v4i64x_info : X86VectorVTInfo<4, i64, VR256X, "q">;
136 def v8f32x_info : X86VectorVTInfo<8, f32, VR256X, "ps">;
137 def v4f64x_info : X86VectorVTInfo<4, f64, VR256X, "pd">;
139 def v16i8x_info : X86VectorVTInfo<16, i8, VR128X, "b">;
140 def v8i16x_info : X86VectorVTInfo<8, i16, VR128X, "w">;
141 def v4i32x_info : X86VectorVTInfo<4, i32, VR128X, "d">;
142 def v2i64x_info : X86VectorVTInfo<2, i64, VR128X, "q">;
143 def v4f32x_info : X86VectorVTInfo<4, f32, VR128X, "ps">;
144 def v2f64x_info : X86VectorVTInfo<2, f64, VR128X, "pd">;
146 // We map scalar types to the smallest (128-bit) vector type
147 // with the appropriate element type. This allows to use the same masking logic.
148 def f32x_info : X86VectorVTInfo<1, f32, VR128X, "ss">;
149 def f64x_info : X86VectorVTInfo<1, f64, VR128X, "sd">;
151 class AVX512VLVectorVTInfo<X86VectorVTInfo i512, X86VectorVTInfo i256,
152 X86VectorVTInfo i128> {
153 X86VectorVTInfo info512 = i512;
154 X86VectorVTInfo info256 = i256;
155 X86VectorVTInfo info128 = i128;
158 def avx512vl_i8_info : AVX512VLVectorVTInfo<v64i8_info, v32i8x_info,
160 def avx512vl_i16_info : AVX512VLVectorVTInfo<v32i16_info, v16i16x_info,
162 def avx512vl_i32_info : AVX512VLVectorVTInfo<v16i32_info, v8i32x_info,
164 def avx512vl_i64_info : AVX512VLVectorVTInfo<v8i64_info, v4i64x_info,
166 def avx512vl_f32_info : AVX512VLVectorVTInfo<v16f32_info, v8f32x_info,
168 def avx512vl_f64_info : AVX512VLVectorVTInfo<v8f64_info, v4f64x_info,
171 // This multiclass generates the masking variants from the non-masking
172 // variant. It only provides the assembly pieces for the masking variants.
173 // It assumes custom ISel patterns for masking which can be provided as
174 // template arguments.
175 multiclass AVX512_maskable_custom<bits<8> O, Format F,
177 dag Ins, dag MaskingIns, dag ZeroMaskingIns,
179 string AttSrcAsm, string IntelSrcAsm,
181 list<dag> MaskingPattern,
182 list<dag> ZeroMaskingPattern,
184 string MaskingConstraint = "",
185 InstrItinClass itin = NoItinerary,
186 bit IsCommutable = 0> {
187 let isCommutable = IsCommutable in
188 def NAME: AVX512<O, F, Outs, Ins,
189 OpcodeStr#"\t{"#AttSrcAsm#", $dst "#Round#"|"#
190 "$dst "#Round#", "#IntelSrcAsm#"}",
193 // Prefer over VMOV*rrk Pat<>
194 let AddedComplexity = 20 in
195 def NAME#k: AVX512<O, F, Outs, MaskingIns,
196 OpcodeStr#"\t{"#AttSrcAsm#", $dst {${mask}}"#Round#"|"#
197 "$dst {${mask}}"#Round#", "#IntelSrcAsm#"}",
198 MaskingPattern, itin>,
200 // In case of the 3src subclass this is overridden with a let.
201 string Constraints = MaskingConstraint;
203 let AddedComplexity = 30 in // Prefer over VMOV*rrkz Pat<>
204 def NAME#kz: AVX512<O, F, Outs, ZeroMaskingIns,
205 OpcodeStr#"\t{"#AttSrcAsm#", $dst {${mask}} {z}"#Round#"|"#
206 "$dst {${mask}} {z}"#Round#", "#IntelSrcAsm#"}",
213 // Common base class of AVX512_maskable and AVX512_maskable_3src.
214 multiclass AVX512_maskable_common<bits<8> O, Format F, X86VectorVTInfo _,
216 dag Ins, dag MaskingIns, dag ZeroMaskingIns,
218 string AttSrcAsm, string IntelSrcAsm,
219 dag RHS, dag MaskingRHS,
220 SDNode Select = vselect, string Round = "",
221 string MaskingConstraint = "",
222 InstrItinClass itin = NoItinerary,
223 bit IsCommutable = 0> :
224 AVX512_maskable_custom<O, F, Outs, Ins, MaskingIns, ZeroMaskingIns, OpcodeStr,
225 AttSrcAsm, IntelSrcAsm,
226 [(set _.RC:$dst, RHS)],
227 [(set _.RC:$dst, MaskingRHS)],
229 (Select _.KRCWM:$mask, RHS, _.ImmAllZerosV))],
230 Round, MaskingConstraint, NoItinerary, IsCommutable>;
232 // This multiclass generates the unconditional/non-masking, the masking and
233 // the zero-masking variant of the vector instruction. In the masking case, the
234 // perserved vector elements come from a new dummy input operand tied to $dst.
235 multiclass AVX512_maskable<bits<8> O, Format F, X86VectorVTInfo _,
236 dag Outs, dag Ins, string OpcodeStr,
237 string AttSrcAsm, string IntelSrcAsm,
238 dag RHS, string Round = "",
239 InstrItinClass itin = NoItinerary,
240 bit IsCommutable = 0> :
241 AVX512_maskable_common<O, F, _, Outs, Ins,
242 !con((ins _.RC:$src0, _.KRCWM:$mask), Ins),
243 !con((ins _.KRCWM:$mask), Ins),
244 OpcodeStr, AttSrcAsm, IntelSrcAsm, RHS,
245 (vselect _.KRCWM:$mask, RHS, _.RC:$src0), vselect,
246 Round, "$src0 = $dst", itin, IsCommutable>;
248 // This multiclass generates the unconditional/non-masking, the masking and
249 // the zero-masking variant of the scalar instruction.
250 multiclass AVX512_maskable_scalar<bits<8> O, Format F, X86VectorVTInfo _,
251 dag Outs, dag Ins, string OpcodeStr,
252 string AttSrcAsm, string IntelSrcAsm,
253 dag RHS, string Round = "",
254 InstrItinClass itin = NoItinerary,
255 bit IsCommutable = 0> :
256 AVX512_maskable_common<O, F, _, Outs, Ins,
257 !con((ins _.RC:$src0, _.KRCWM:$mask), Ins),
258 !con((ins _.KRCWM:$mask), Ins),
259 OpcodeStr, AttSrcAsm, IntelSrcAsm, RHS,
260 (X86select _.KRCWM:$mask, RHS, _.RC:$src0), X86select,
261 Round, "$src0 = $dst", itin, IsCommutable>;
263 // Similar to AVX512_maskable but in this case one of the source operands
264 // ($src1) is already tied to $dst so we just use that for the preserved
265 // vector elements. NOTE that the NonTiedIns (the ins dag) should exclude
267 multiclass AVX512_maskable_3src<bits<8> O, Format F, X86VectorVTInfo _,
268 dag Outs, dag NonTiedIns, string OpcodeStr,
269 string AttSrcAsm, string IntelSrcAsm,
271 AVX512_maskable_common<O, F, _, Outs,
272 !con((ins _.RC:$src1), NonTiedIns),
273 !con((ins _.RC:$src1, _.KRCWM:$mask), NonTiedIns),
274 !con((ins _.RC:$src1, _.KRCWM:$mask), NonTiedIns),
275 OpcodeStr, AttSrcAsm, IntelSrcAsm, RHS,
276 (vselect _.KRCWM:$mask, RHS, _.RC:$src1)>;
279 multiclass AVX512_maskable_in_asm<bits<8> O, Format F, X86VectorVTInfo _,
282 string AttSrcAsm, string IntelSrcAsm,
284 AVX512_maskable_custom<O, F, Outs, Ins,
285 !con((ins _.RC:$src0, _.KRCWM:$mask), Ins),
286 !con((ins _.KRCWM:$mask), Ins),
287 OpcodeStr, AttSrcAsm, IntelSrcAsm, Pattern, [], [], "",
291 // Instruction with mask that puts result in mask register,
292 // like "compare" and "vptest"
293 multiclass AVX512_maskable_custom_cmp<bits<8> O, Format F,
295 dag Ins, dag MaskingIns,
297 string AttSrcAsm, string IntelSrcAsm,
299 list<dag> MaskingPattern,
301 InstrItinClass itin = NoItinerary> {
302 def NAME: AVX512<O, F, Outs, Ins,
303 OpcodeStr#"\t{"#AttSrcAsm#", $dst "#Round#"|"#
304 "$dst "#Round#", "#IntelSrcAsm#"}",
307 def NAME#k: AVX512<O, F, Outs, MaskingIns,
308 OpcodeStr#"\t{"#Round#AttSrcAsm#", $dst {${mask}}|"#
309 "$dst {${mask}}, "#IntelSrcAsm#Round#"}",
310 MaskingPattern, itin>, EVEX_K;
313 multiclass AVX512_maskable_common_cmp<bits<8> O, Format F, X86VectorVTInfo _,
315 dag Ins, dag MaskingIns,
317 string AttSrcAsm, string IntelSrcAsm,
318 dag RHS, dag MaskingRHS,
320 InstrItinClass itin = NoItinerary> :
321 AVX512_maskable_custom_cmp<O, F, Outs, Ins, MaskingIns, OpcodeStr,
322 AttSrcAsm, IntelSrcAsm,
323 [(set _.KRC:$dst, RHS)],
324 [(set _.KRC:$dst, MaskingRHS)],
327 multiclass AVX512_maskable_cmp<bits<8> O, Format F, X86VectorVTInfo _,
328 dag Outs, dag Ins, string OpcodeStr,
329 string AttSrcAsm, string IntelSrcAsm,
330 dag RHS, string Round = "",
331 InstrItinClass itin = NoItinerary> :
332 AVX512_maskable_common_cmp<O, F, _, Outs, Ins,
333 !con((ins _.KRCWM:$mask), Ins),
334 OpcodeStr, AttSrcAsm, IntelSrcAsm, RHS,
335 (and _.KRCWM:$mask, RHS),
338 multiclass AVX512_maskable_cmp_alt<bits<8> O, Format F, X86VectorVTInfo _,
339 dag Outs, dag Ins, string OpcodeStr,
340 string AttSrcAsm, string IntelSrcAsm> :
341 AVX512_maskable_custom_cmp<O, F, Outs,
342 Ins, !con((ins _.KRCWM:$mask),Ins), OpcodeStr,
343 AttSrcAsm, IntelSrcAsm,
344 [],[],"", NoItinerary>;
346 // Bitcasts between 512-bit vector types. Return the original type since
347 // no instruction is needed for the conversion
348 let Predicates = [HasAVX512] in {
349 def : Pat<(v8f64 (bitconvert (v8i64 VR512:$src))), (v8f64 VR512:$src)>;
350 def : Pat<(v8f64 (bitconvert (v16i32 VR512:$src))), (v8f64 VR512:$src)>;
351 def : Pat<(v8f64 (bitconvert (v32i16 VR512:$src))), (v8f64 VR512:$src)>;
352 def : Pat<(v8f64 (bitconvert (v64i8 VR512:$src))), (v8f64 VR512:$src)>;
353 def : Pat<(v8f64 (bitconvert (v16f32 VR512:$src))), (v8f64 VR512:$src)>;
354 def : Pat<(v16f32 (bitconvert (v8i64 VR512:$src))), (v16f32 VR512:$src)>;
355 def : Pat<(v16f32 (bitconvert (v16i32 VR512:$src))), (v16f32 VR512:$src)>;
356 def : Pat<(v16f32 (bitconvert (v32i16 VR512:$src))), (v16f32 VR512:$src)>;
357 def : Pat<(v16f32 (bitconvert (v64i8 VR512:$src))), (v16f32 VR512:$src)>;
358 def : Pat<(v16f32 (bitconvert (v8f64 VR512:$src))), (v16f32 VR512:$src)>;
359 def : Pat<(v8i64 (bitconvert (v16i32 VR512:$src))), (v8i64 VR512:$src)>;
360 def : Pat<(v8i64 (bitconvert (v32i16 VR512:$src))), (v8i64 VR512:$src)>;
361 def : Pat<(v8i64 (bitconvert (v64i8 VR512:$src))), (v8i64 VR512:$src)>;
362 def : Pat<(v8i64 (bitconvert (v8f64 VR512:$src))), (v8i64 VR512:$src)>;
363 def : Pat<(v8i64 (bitconvert (v16f32 VR512:$src))), (v8i64 VR512:$src)>;
364 def : Pat<(v16i32 (bitconvert (v8i64 VR512:$src))), (v16i32 VR512:$src)>;
365 def : Pat<(v16i32 (bitconvert (v16f32 VR512:$src))), (v16i32 VR512:$src)>;
366 def : Pat<(v16i32 (bitconvert (v32i16 VR512:$src))), (v16i32 VR512:$src)>;
367 def : Pat<(v16i32 (bitconvert (v64i8 VR512:$src))), (v16i32 VR512:$src)>;
368 def : Pat<(v16i32 (bitconvert (v8f64 VR512:$src))), (v16i32 VR512:$src)>;
369 def : Pat<(v32i16 (bitconvert (v8i64 VR512:$src))), (v32i16 VR512:$src)>;
370 def : Pat<(v32i16 (bitconvert (v16i32 VR512:$src))), (v32i16 VR512:$src)>;
371 def : Pat<(v32i16 (bitconvert (v64i8 VR512:$src))), (v32i16 VR512:$src)>;
372 def : Pat<(v32i16 (bitconvert (v8f64 VR512:$src))), (v32i16 VR512:$src)>;
373 def : Pat<(v32i16 (bitconvert (v16f32 VR512:$src))), (v32i16 VR512:$src)>;
374 def : Pat<(v32i16 (bitconvert (v16f32 VR512:$src))), (v32i16 VR512:$src)>;
375 def : Pat<(v64i8 (bitconvert (v8i64 VR512:$src))), (v64i8 VR512:$src)>;
376 def : Pat<(v64i8 (bitconvert (v16i32 VR512:$src))), (v64i8 VR512:$src)>;
377 def : Pat<(v64i8 (bitconvert (v32i16 VR512:$src))), (v64i8 VR512:$src)>;
378 def : Pat<(v64i8 (bitconvert (v8f64 VR512:$src))), (v64i8 VR512:$src)>;
379 def : Pat<(v64i8 (bitconvert (v16f32 VR512:$src))), (v64i8 VR512:$src)>;
381 def : Pat<(v2i64 (bitconvert (v4i32 VR128X:$src))), (v2i64 VR128X:$src)>;
382 def : Pat<(v2i64 (bitconvert (v8i16 VR128X:$src))), (v2i64 VR128X:$src)>;
383 def : Pat<(v2i64 (bitconvert (v16i8 VR128X:$src))), (v2i64 VR128X:$src)>;
384 def : Pat<(v2i64 (bitconvert (v2f64 VR128X:$src))), (v2i64 VR128X:$src)>;
385 def : Pat<(v2i64 (bitconvert (v4f32 VR128X:$src))), (v2i64 VR128X:$src)>;
386 def : Pat<(v4i32 (bitconvert (v2i64 VR128X:$src))), (v4i32 VR128X:$src)>;
387 def : Pat<(v4i32 (bitconvert (v8i16 VR128X:$src))), (v4i32 VR128X:$src)>;
388 def : Pat<(v4i32 (bitconvert (v16i8 VR128X:$src))), (v4i32 VR128X:$src)>;
389 def : Pat<(v4i32 (bitconvert (v2f64 VR128X:$src))), (v4i32 VR128X:$src)>;
390 def : Pat<(v4i32 (bitconvert (v4f32 VR128X:$src))), (v4i32 VR128X:$src)>;
391 def : Pat<(v8i16 (bitconvert (v2i64 VR128X:$src))), (v8i16 VR128X:$src)>;
392 def : Pat<(v8i16 (bitconvert (v4i32 VR128X:$src))), (v8i16 VR128X:$src)>;
393 def : Pat<(v8i16 (bitconvert (v16i8 VR128X:$src))), (v8i16 VR128X:$src)>;
394 def : Pat<(v8i16 (bitconvert (v2f64 VR128X:$src))), (v8i16 VR128X:$src)>;
395 def : Pat<(v8i16 (bitconvert (v4f32 VR128X:$src))), (v8i16 VR128X:$src)>;
396 def : Pat<(v16i8 (bitconvert (v2i64 VR128X:$src))), (v16i8 VR128X:$src)>;
397 def : Pat<(v16i8 (bitconvert (v4i32 VR128X:$src))), (v16i8 VR128X:$src)>;
398 def : Pat<(v16i8 (bitconvert (v8i16 VR128X:$src))), (v16i8 VR128X:$src)>;
399 def : Pat<(v16i8 (bitconvert (v2f64 VR128X:$src))), (v16i8 VR128X:$src)>;
400 def : Pat<(v16i8 (bitconvert (v4f32 VR128X:$src))), (v16i8 VR128X:$src)>;
401 def : Pat<(v4f32 (bitconvert (v2i64 VR128X:$src))), (v4f32 VR128X:$src)>;
402 def : Pat<(v4f32 (bitconvert (v4i32 VR128X:$src))), (v4f32 VR128X:$src)>;
403 def : Pat<(v4f32 (bitconvert (v8i16 VR128X:$src))), (v4f32 VR128X:$src)>;
404 def : Pat<(v4f32 (bitconvert (v16i8 VR128X:$src))), (v4f32 VR128X:$src)>;
405 def : Pat<(v4f32 (bitconvert (v2f64 VR128X:$src))), (v4f32 VR128X:$src)>;
406 def : Pat<(v2f64 (bitconvert (v2i64 VR128X:$src))), (v2f64 VR128X:$src)>;
407 def : Pat<(v2f64 (bitconvert (v4i32 VR128X:$src))), (v2f64 VR128X:$src)>;
408 def : Pat<(v2f64 (bitconvert (v8i16 VR128X:$src))), (v2f64 VR128X:$src)>;
409 def : Pat<(v2f64 (bitconvert (v16i8 VR128X:$src))), (v2f64 VR128X:$src)>;
410 def : Pat<(v2f64 (bitconvert (v4f32 VR128X:$src))), (v2f64 VR128X:$src)>;
412 // Bitcasts between 256-bit vector types. Return the original type since
413 // no instruction is needed for the conversion
414 def : Pat<(v4f64 (bitconvert (v8f32 VR256X:$src))), (v4f64 VR256X:$src)>;
415 def : Pat<(v4f64 (bitconvert (v8i32 VR256X:$src))), (v4f64 VR256X:$src)>;
416 def : Pat<(v4f64 (bitconvert (v4i64 VR256X:$src))), (v4f64 VR256X:$src)>;
417 def : Pat<(v4f64 (bitconvert (v16i16 VR256X:$src))), (v4f64 VR256X:$src)>;
418 def : Pat<(v4f64 (bitconvert (v32i8 VR256X:$src))), (v4f64 VR256X:$src)>;
419 def : Pat<(v8f32 (bitconvert (v8i32 VR256X:$src))), (v8f32 VR256X:$src)>;
420 def : Pat<(v8f32 (bitconvert (v4i64 VR256X:$src))), (v8f32 VR256X:$src)>;
421 def : Pat<(v8f32 (bitconvert (v4f64 VR256X:$src))), (v8f32 VR256X:$src)>;
422 def : Pat<(v8f32 (bitconvert (v32i8 VR256X:$src))), (v8f32 VR256X:$src)>;
423 def : Pat<(v8f32 (bitconvert (v16i16 VR256X:$src))), (v8f32 VR256X:$src)>;
424 def : Pat<(v4i64 (bitconvert (v8f32 VR256X:$src))), (v4i64 VR256X:$src)>;
425 def : Pat<(v4i64 (bitconvert (v8i32 VR256X:$src))), (v4i64 VR256X:$src)>;
426 def : Pat<(v4i64 (bitconvert (v4f64 VR256X:$src))), (v4i64 VR256X:$src)>;
427 def : Pat<(v4i64 (bitconvert (v32i8 VR256X:$src))), (v4i64 VR256X:$src)>;
428 def : Pat<(v4i64 (bitconvert (v16i16 VR256X:$src))), (v4i64 VR256X:$src)>;
429 def : Pat<(v32i8 (bitconvert (v4f64 VR256X:$src))), (v32i8 VR256X:$src)>;
430 def : Pat<(v32i8 (bitconvert (v4i64 VR256X:$src))), (v32i8 VR256X:$src)>;
431 def : Pat<(v32i8 (bitconvert (v8f32 VR256X:$src))), (v32i8 VR256X:$src)>;
432 def : Pat<(v32i8 (bitconvert (v8i32 VR256X:$src))), (v32i8 VR256X:$src)>;
433 def : Pat<(v32i8 (bitconvert (v16i16 VR256X:$src))), (v32i8 VR256X:$src)>;
434 def : Pat<(v8i32 (bitconvert (v32i8 VR256X:$src))), (v8i32 VR256X:$src)>;
435 def : Pat<(v8i32 (bitconvert (v16i16 VR256X:$src))), (v8i32 VR256X:$src)>;
436 def : Pat<(v8i32 (bitconvert (v8f32 VR256X:$src))), (v8i32 VR256X:$src)>;
437 def : Pat<(v8i32 (bitconvert (v4i64 VR256X:$src))), (v8i32 VR256X:$src)>;
438 def : Pat<(v8i32 (bitconvert (v4f64 VR256X:$src))), (v8i32 VR256X:$src)>;
439 def : Pat<(v16i16 (bitconvert (v8f32 VR256X:$src))), (v16i16 VR256X:$src)>;
440 def : Pat<(v16i16 (bitconvert (v8i32 VR256X:$src))), (v16i16 VR256X:$src)>;
441 def : Pat<(v16i16 (bitconvert (v4i64 VR256X:$src))), (v16i16 VR256X:$src)>;
442 def : Pat<(v16i16 (bitconvert (v4f64 VR256X:$src))), (v16i16 VR256X:$src)>;
443 def : Pat<(v16i16 (bitconvert (v32i8 VR256X:$src))), (v16i16 VR256X:$src)>;
447 // AVX-512: VPXOR instruction writes zero to its upper part, it's safe build zeros.
450 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
451 isPseudo = 1, Predicates = [HasAVX512] in {
452 def AVX512_512_SET0 : I<0, Pseudo, (outs VR512:$dst), (ins), "",
453 [(set VR512:$dst, (v16f32 immAllZerosV))]>;
456 let Predicates = [HasAVX512] in {
457 def : Pat<(v8i64 immAllZerosV), (AVX512_512_SET0)>;
458 def : Pat<(v16i32 immAllZerosV), (AVX512_512_SET0)>;
459 def : Pat<(v8f64 immAllZerosV), (AVX512_512_SET0)>;
462 //===----------------------------------------------------------------------===//
463 // AVX-512 - VECTOR INSERT
466 multiclass vinsert_for_size_no_alt<int Opcode,
467 X86VectorVTInfo From, X86VectorVTInfo To,
468 PatFrag vinsert_insert,
469 SDNodeXForm INSERT_get_vinsert_imm> {
470 let hasSideEffects = 0, ExeDomain = To.ExeDomain in {
471 def rr : AVX512AIi8<Opcode, MRMSrcReg, (outs VR512:$dst),
472 (ins VR512:$src1, From.RC:$src2, u8imm:$src3),
473 "vinsert" # From.EltTypeName # "x" # From.NumElts #
474 "\t{$src3, $src2, $src1, $dst|"
475 "$dst, $src1, $src2, $src3}",
476 [(set To.RC:$dst, (vinsert_insert:$src3 (To.VT VR512:$src1),
477 (From.VT From.RC:$src2),
482 def rm : AVX512AIi8<Opcode, MRMSrcMem, (outs VR512:$dst),
483 (ins VR512:$src1, From.MemOp:$src2, u8imm:$src3),
484 "vinsert" # From.EltTypeName # "x" # From.NumElts #
485 "\t{$src3, $src2, $src1, $dst|"
486 "$dst, $src1, $src2, $src3}",
488 EVEX_4V, EVEX_V512, EVEX_CD8<From.EltSize, From.CD8TupleForm>;
492 multiclass vinsert_for_size<int Opcode,
493 X86VectorVTInfo From, X86VectorVTInfo To,
494 X86VectorVTInfo AltFrom, X86VectorVTInfo AltTo,
495 PatFrag vinsert_insert,
496 SDNodeXForm INSERT_get_vinsert_imm> :
497 vinsert_for_size_no_alt<Opcode, From, To,
498 vinsert_insert, INSERT_get_vinsert_imm> {
499 // Codegen pattern with the alternative types, e.g. v2i64 -> v8i64 for
500 // vinserti32x4. Only add this if 64x2 and friends are not supported
501 // natively via AVX512DQ.
502 let Predicates = [NoDQI] in
503 def : Pat<(vinsert_insert:$ins
504 (AltTo.VT VR512:$src1), (AltFrom.VT From.RC:$src2), (iPTR imm)),
505 (AltTo.VT (!cast<Instruction>(NAME # From.EltSize # "x4rr")
506 VR512:$src1, From.RC:$src2,
507 (INSERT_get_vinsert_imm VR512:$ins)))>;
510 multiclass vinsert_for_type<ValueType EltVT32, int Opcode128,
511 ValueType EltVT64, int Opcode256> {
512 defm NAME # "32x4" : vinsert_for_size<Opcode128,
513 X86VectorVTInfo< 4, EltVT32, VR128X>,
514 X86VectorVTInfo<16, EltVT32, VR512>,
515 X86VectorVTInfo< 2, EltVT64, VR128X>,
516 X86VectorVTInfo< 8, EltVT64, VR512>,
518 INSERT_get_vinsert128_imm>;
519 let Predicates = [HasDQI] in
520 defm NAME # "64x2" : vinsert_for_size_no_alt<Opcode128,
521 X86VectorVTInfo< 2, EltVT64, VR128X>,
522 X86VectorVTInfo< 8, EltVT64, VR512>,
524 INSERT_get_vinsert128_imm>, VEX_W;
525 defm NAME # "64x4" : vinsert_for_size<Opcode256,
526 X86VectorVTInfo< 4, EltVT64, VR256X>,
527 X86VectorVTInfo< 8, EltVT64, VR512>,
528 X86VectorVTInfo< 8, EltVT32, VR256>,
529 X86VectorVTInfo<16, EltVT32, VR512>,
531 INSERT_get_vinsert256_imm>, VEX_W;
532 let Predicates = [HasDQI] in
533 defm NAME # "32x8" : vinsert_for_size_no_alt<Opcode256,
534 X86VectorVTInfo< 8, EltVT32, VR256X>,
535 X86VectorVTInfo<16, EltVT32, VR512>,
537 INSERT_get_vinsert256_imm>;
540 defm VINSERTF : vinsert_for_type<f32, 0x18, f64, 0x1a>;
541 defm VINSERTI : vinsert_for_type<i32, 0x38, i64, 0x3a>;
543 // vinsertps - insert f32 to XMM
544 def VINSERTPSzrr : AVX512AIi8<0x21, MRMSrcReg, (outs VR128X:$dst),
545 (ins VR128X:$src1, VR128X:$src2, u8imm:$src3),
546 "vinsertps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
547 [(set VR128X:$dst, (X86insertps VR128X:$src1, VR128X:$src2, imm:$src3))]>,
549 def VINSERTPSzrm: AVX512AIi8<0x21, MRMSrcMem, (outs VR128X:$dst),
550 (ins VR128X:$src1, f32mem:$src2, u8imm:$src3),
551 "vinsertps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
552 [(set VR128X:$dst, (X86insertps VR128X:$src1,
553 (v4f32 (scalar_to_vector (loadf32 addr:$src2))),
554 imm:$src3))]>, EVEX_4V, EVEX_CD8<32, CD8VT1>;
556 //===----------------------------------------------------------------------===//
557 // AVX-512 VECTOR EXTRACT
560 multiclass vextract_for_size<int Opcode,
561 X86VectorVTInfo From, X86VectorVTInfo To,
562 X86VectorVTInfo AltFrom, X86VectorVTInfo AltTo,
563 PatFrag vextract_extract,
564 SDNodeXForm EXTRACT_get_vextract_imm> {
565 let hasSideEffects = 0, ExeDomain = To.ExeDomain in {
566 defm rr : AVX512_maskable_in_asm<Opcode, MRMDestReg, To, (outs To.RC:$dst),
567 (ins VR512:$src1, u8imm:$idx),
568 "vextract" # To.EltTypeName # "x4",
569 "$idx, $src1", "$src1, $idx",
570 [(set To.RC:$dst, (vextract_extract:$idx (From.VT VR512:$src1),
572 AVX512AIi8Base, EVEX, EVEX_V512;
574 def rm : AVX512AIi8<Opcode, MRMDestMem, (outs),
575 (ins To.MemOp:$dst, VR512:$src1, u8imm:$src2),
576 "vextract" # To.EltTypeName # "x4\t{$src2, $src1, $dst|"
577 "$dst, $src1, $src2}",
578 []>, EVEX, EVEX_V512, EVEX_CD8<To.EltSize, CD8VT4>;
581 // Codegen pattern with the alternative types, e.g. v8i64 -> v2i64 for
583 def : Pat<(vextract_extract:$ext (AltFrom.VT VR512:$src1), (iPTR imm)),
584 (AltTo.VT (!cast<Instruction>(NAME # To.EltSize # "x4rr")
586 (EXTRACT_get_vextract_imm To.RC:$ext)))>;
588 // A 128/256-bit subvector extract from the first 512-bit vector position is
589 // a subregister copy that needs no instruction.
590 def : Pat<(To.VT (extract_subvector (From.VT VR512:$src), (iPTR 0))),
592 (EXTRACT_SUBREG (From.VT VR512:$src), To.SubRegIdx))>;
594 // And for the alternative types.
595 def : Pat<(AltTo.VT (extract_subvector (AltFrom.VT VR512:$src), (iPTR 0))),
597 (EXTRACT_SUBREG (AltFrom.VT VR512:$src), AltTo.SubRegIdx))>;
599 // Intrinsic call with masking.
600 def : Pat<(!cast<Intrinsic>("int_x86_avx512_mask_vextract" # To.EltTypeName #
602 VR512:$src1, (iPTR imm:$idx), To.RC:$src0, GR8:$mask),
603 (!cast<Instruction>(NAME # To.EltSize # "x4rrk") To.RC:$src0,
604 (v4i1 (COPY_TO_REGCLASS GR8:$mask, VK4WM)),
605 VR512:$src1, imm:$idx)>;
607 // Intrinsic call with zero-masking.
608 def : Pat<(!cast<Intrinsic>("int_x86_avx512_mask_vextract" # To.EltTypeName #
610 VR512:$src1, (iPTR imm:$idx), To.ImmAllZerosV, GR8:$mask),
611 (!cast<Instruction>(NAME # To.EltSize # "x4rrkz")
612 (v4i1 (COPY_TO_REGCLASS GR8:$mask, VK4WM)),
613 VR512:$src1, imm:$idx)>;
615 // Intrinsic call without masking.
616 def : Pat<(!cast<Intrinsic>("int_x86_avx512_mask_vextract" # To.EltTypeName #
618 VR512:$src1, (iPTR imm:$idx), To.ImmAllZerosV, (i8 -1)),
619 (!cast<Instruction>(NAME # To.EltSize # "x4rr")
620 VR512:$src1, imm:$idx)>;
623 multiclass vextract_for_type<ValueType EltVT32, int Opcode32,
624 ValueType EltVT64, int Opcode64> {
625 defm NAME # "32x4" : vextract_for_size<Opcode32,
626 X86VectorVTInfo<16, EltVT32, VR512>,
627 X86VectorVTInfo< 4, EltVT32, VR128X>,
628 X86VectorVTInfo< 8, EltVT64, VR512>,
629 X86VectorVTInfo< 2, EltVT64, VR128X>,
631 EXTRACT_get_vextract128_imm>;
632 defm NAME # "64x4" : vextract_for_size<Opcode64,
633 X86VectorVTInfo< 8, EltVT64, VR512>,
634 X86VectorVTInfo< 4, EltVT64, VR256X>,
635 X86VectorVTInfo<16, EltVT32, VR512>,
636 X86VectorVTInfo< 8, EltVT32, VR256>,
638 EXTRACT_get_vextract256_imm>, VEX_W;
641 defm VEXTRACTF : vextract_for_type<f32, 0x19, f64, 0x1b>;
642 defm VEXTRACTI : vextract_for_type<i32, 0x39, i64, 0x3b>;
644 // A 128-bit subvector insert to the first 512-bit vector position
645 // is a subregister copy that needs no instruction.
646 def : Pat<(insert_subvector undef, (v2i64 VR128X:$src), (iPTR 0)),
647 (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)),
648 (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
650 def : Pat<(insert_subvector undef, (v2f64 VR128X:$src), (iPTR 0)),
651 (INSERT_SUBREG (v8f64 (IMPLICIT_DEF)),
652 (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
654 def : Pat<(insert_subvector undef, (v4i32 VR128X:$src), (iPTR 0)),
655 (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)),
656 (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
658 def : Pat<(insert_subvector undef, (v4f32 VR128X:$src), (iPTR 0)),
659 (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)),
660 (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
663 def : Pat<(insert_subvector undef, (v4i64 VR256X:$src), (iPTR 0)),
664 (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
665 def : Pat<(insert_subvector undef, (v4f64 VR256X:$src), (iPTR 0)),
666 (INSERT_SUBREG (v8f64 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
667 def : Pat<(insert_subvector undef, (v8i32 VR256X:$src), (iPTR 0)),
668 (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
669 def : Pat<(insert_subvector undef, (v8f32 VR256X:$src), (iPTR 0)),
670 (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
672 // vextractps - extract 32 bits from XMM
673 def VEXTRACTPSzrr : AVX512AIi8<0x17, MRMDestReg, (outs GR32:$dst),
674 (ins VR128X:$src1, u8imm:$src2),
675 "vextractps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
676 [(set GR32:$dst, (extractelt (bc_v4i32 (v4f32 VR128X:$src1)), imm:$src2))]>,
679 def VEXTRACTPSzmr : AVX512AIi8<0x17, MRMDestMem, (outs),
680 (ins f32mem:$dst, VR128X:$src1, u8imm:$src2),
681 "vextractps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
682 [(store (extractelt (bc_v4i32 (v4f32 VR128X:$src1)), imm:$src2),
683 addr:$dst)]>, EVEX, EVEX_CD8<32, CD8VT1>;
685 //===---------------------------------------------------------------------===//
688 multiclass avx512_fp_broadcast<bits<8> opc, SDNode OpNode, RegisterClass SrcRC,
689 ValueType svt, X86VectorVTInfo _> {
690 defm r : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
691 (ins SrcRC:$src), "vbroadcast"## !subst("p", "s", _.Suffix),
692 "$src", "$src", (_.VT (OpNode (svt SrcRC:$src)))>,
696 defm m : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
697 (ins _.ScalarMemOp:$src),
698 "vbroadcast"##!subst("p", "s", _.Suffix), "$src", "$src",
699 (_.VT (OpNode (_.ScalarLdFrag addr:$src)))>,
704 multiclass avx512_fp_broadcast_vl<bits<8> opc, SDNode OpNode,
705 AVX512VLVectorVTInfo _> {
706 defm Z : avx512_fp_broadcast<opc, OpNode, VR128X, _.info128.VT, _.info512>,
709 let Predicates = [HasVLX] in {
710 defm Z256 : avx512_fp_broadcast<opc, OpNode, VR128X, _.info128.VT, _.info256>,
715 let ExeDomain = SSEPackedSingle in {
716 defm VBROADCASTSS : avx512_fp_broadcast_vl<0x18, X86VBroadcast,
717 avx512vl_f32_info>, EVEX_CD8<32, CD8VT1>;
718 let Predicates = [HasVLX] in {
719 defm VBROADCASTSSZ128 : avx512_fp_broadcast<0x18, X86VBroadcast, VR128X,
720 v4f32, v4f32x_info>, EVEX_V128,
721 EVEX_CD8<32, CD8VT1>;
725 let ExeDomain = SSEPackedDouble in {
726 defm VBROADCASTSD : avx512_fp_broadcast_vl<0x19, X86VBroadcast,
727 avx512vl_f64_info>, VEX_W, EVEX_CD8<64, CD8VT1>;
730 // avx512_broadcast_pat introduces patterns for broadcast with a scalar argument.
731 // Later, we can canonize broadcast instructions before ISel phase and
732 // eliminate additional patterns on ISel.
733 // SrcRC_v and SrcRC_s are RegisterClasses for vector and scalar
734 // representations of source
735 multiclass avx512_broadcast_pat<string InstName, SDNode OpNode,
736 X86VectorVTInfo _, RegisterClass SrcRC_v,
737 RegisterClass SrcRC_s> {
738 def : Pat<(_.VT (OpNode (_.EltVT SrcRC_s:$src))),
739 (!cast<Instruction>(InstName##"r")
740 (COPY_TO_REGCLASS SrcRC_s:$src, SrcRC_v))>;
742 let AddedComplexity = 30 in {
743 def : Pat<(_.VT (vselect _.KRCWM:$mask,
744 (OpNode (_.EltVT SrcRC_s:$src)), _.RC:$src0)),
745 (!cast<Instruction>(InstName##"rk") _.RC:$src0, _.KRCWM:$mask,
746 (COPY_TO_REGCLASS SrcRC_s:$src, SrcRC_v))>;
748 def : Pat<(_.VT(vselect _.KRCWM:$mask,
749 (OpNode (_.EltVT SrcRC_s:$src)), _.ImmAllZerosV)),
750 (!cast<Instruction>(InstName##"rkz") _.KRCWM:$mask,
751 (COPY_TO_REGCLASS SrcRC_s:$src, SrcRC_v))>;
755 defm : avx512_broadcast_pat<"VBROADCASTSSZ", X86VBroadcast, v16f32_info,
757 defm : avx512_broadcast_pat<"VBROADCASTSDZ", X86VBroadcast, v8f64_info,
760 let Predicates = [HasVLX] in {
761 defm : avx512_broadcast_pat<"VBROADCASTSSZ256", X86VBroadcast,
762 v8f32x_info, VR128X, FR32X>;
763 defm : avx512_broadcast_pat<"VBROADCASTSSZ128", X86VBroadcast,
764 v4f32x_info, VR128X, FR32X>;
765 defm : avx512_broadcast_pat<"VBROADCASTSDZ256", X86VBroadcast,
766 v4f64x_info, VR128X, FR64X>;
769 def : Pat<(v16f32 (X86VBroadcast (loadf32 addr:$src))),
770 (VBROADCASTSSZm addr:$src)>;
771 def : Pat<(v8f64 (X86VBroadcast (loadf64 addr:$src))),
772 (VBROADCASTSDZm addr:$src)>;
774 def : Pat<(int_x86_avx512_vbroadcast_ss_512 addr:$src),
775 (VBROADCASTSSZm addr:$src)>;
776 def : Pat<(int_x86_avx512_vbroadcast_sd_512 addr:$src),
777 (VBROADCASTSDZm addr:$src)>;
779 multiclass avx512_int_broadcast_reg<bits<8> opc, X86VectorVTInfo _,
780 RegisterClass SrcRC> {
781 defm r : AVX512_maskable_in_asm<opc, MRMSrcReg, _, (outs _.RC:$dst),
782 (ins SrcRC:$src), "vpbroadcast"##_.Suffix,
783 "$src", "$src", []>, T8PD, EVEX;
786 multiclass avx512_int_broadcast_reg_vl<bits<8> opc, AVX512VLVectorVTInfo _,
787 RegisterClass SrcRC, Predicate prd> {
788 let Predicates = [prd] in
789 defm Z : avx512_int_broadcast_reg<opc, _.info512, SrcRC>, EVEX_V512;
790 let Predicates = [prd, HasVLX] in {
791 defm Z256 : avx512_int_broadcast_reg<opc, _.info256, SrcRC>, EVEX_V256;
792 defm Z128 : avx512_int_broadcast_reg<opc, _.info128, SrcRC>, EVEX_V128;
796 defm VPBROADCASTBr : avx512_int_broadcast_reg_vl<0x7A, avx512vl_i8_info, GR32,
798 defm VPBROADCASTWr : avx512_int_broadcast_reg_vl<0x7B, avx512vl_i16_info, GR32,
800 defm VPBROADCASTDr : avx512_int_broadcast_reg_vl<0x7C, avx512vl_i32_info, GR32,
802 defm VPBROADCASTQr : avx512_int_broadcast_reg_vl<0x7C, avx512vl_i64_info, GR64,
805 def : Pat <(v16i32 (X86vzext VK16WM:$mask)),
806 (VPBROADCASTDrZrkz VK16WM:$mask, (i32 (MOV32ri 0x1)))>;
808 def : Pat <(v8i64 (X86vzext VK8WM:$mask)),
809 (VPBROADCASTQrZrkz VK8WM:$mask, (i64 (MOV64ri 0x1)))>;
811 def : Pat<(v16i32 (X86VBroadcast (i32 GR32:$src))),
812 (VPBROADCASTDrZr GR32:$src)>;
813 def : Pat<(v8i64 (X86VBroadcast (i64 GR64:$src))),
814 (VPBROADCASTQrZr GR64:$src)>;
816 def : Pat<(v16i32 (int_x86_avx512_pbroadcastd_i32_512 (i32 GR32:$src))),
817 (VPBROADCASTDrZr GR32:$src)>;
818 def : Pat<(v8i64 (int_x86_avx512_pbroadcastq_i64_512 (i64 GR64:$src))),
819 (VPBROADCASTQrZr GR64:$src)>;
821 def : Pat<(v16i32 (int_x86_avx512_mask_pbroadcast_d_gpr_512 (i32 GR32:$src),
822 (v16i32 immAllZerosV), (i16 GR16:$mask))),
823 (VPBROADCASTDrZrkz (COPY_TO_REGCLASS GR16:$mask, VK16WM), GR32:$src)>;
824 def : Pat<(v8i64 (int_x86_avx512_mask_pbroadcast_q_gpr_512 (i64 GR64:$src),
825 (bc_v8i64 (v16i32 immAllZerosV)), (i8 GR8:$mask))),
826 (VPBROADCASTQrZrkz (COPY_TO_REGCLASS GR8:$mask, VK8WM), GR64:$src)>;
828 multiclass avx512_int_broadcast_rm<bits<8> opc, string OpcodeStr,
829 X86MemOperand x86memop, PatFrag ld_frag,
830 RegisterClass DstRC, ValueType OpVT, ValueType SrcVT,
832 def rr : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst), (ins VR128X:$src),
833 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
835 (OpVT (X86VBroadcast (SrcVT VR128X:$src))))]>, EVEX;
836 def rrk : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst), (ins KRC:$mask,
838 !strconcat(OpcodeStr,
839 "\t{$src, ${dst} {${mask}} |${dst} {${mask}}, $src}"),
841 def rrkz : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst), (ins KRC:$mask,
843 !strconcat(OpcodeStr,
844 "\t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
847 def rm : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
848 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
850 (OpVT (X86VBroadcast (ld_frag addr:$src))))]>, EVEX;
851 def rmk : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst), (ins KRC:$mask,
853 !strconcat(OpcodeStr,
854 "\t{$src, ${dst} {${mask}}|${dst} {${mask}} , $src}"),
856 def rmkz : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst), (ins KRC:$mask,
858 !strconcat(OpcodeStr,
859 "\t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
860 [(set DstRC:$dst, (OpVT (vselect KRC:$mask,
861 (X86VBroadcast (ld_frag addr:$src)),
862 (OpVT (bitconvert (v16i32 immAllZerosV))))))]>, EVEX, EVEX_KZ;
866 defm VPBROADCASTDZ : avx512_int_broadcast_rm<0x58, "vpbroadcastd", i32mem,
867 loadi32, VR512, v16i32, v4i32, VK16WM>,
868 EVEX_V512, EVEX_CD8<32, CD8VT1>;
869 defm VPBROADCASTQZ : avx512_int_broadcast_rm<0x59, "vpbroadcastq", i64mem,
870 loadi64, VR512, v8i64, v2i64, VK8WM>, EVEX_V512, VEX_W,
871 EVEX_CD8<64, CD8VT1>;
873 multiclass avx512_int_subvec_broadcast_rm<bits<8> opc, string OpcodeStr,
874 X86MemOperand x86memop, PatFrag ld_frag,
877 def rm : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst), (ins x86memop:$src),
878 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
880 def krm : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst), (ins KRC:$mask,
882 !strconcat(OpcodeStr,
883 "\t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
888 defm VBROADCASTI32X4 : avx512_int_subvec_broadcast_rm<0x5a, "vbroadcasti32x4",
889 i128mem, loadv2i64, VK16WM>,
890 EVEX_V512, EVEX_CD8<32, CD8VT4>;
891 defm VBROADCASTI64X4 : avx512_int_subvec_broadcast_rm<0x5b, "vbroadcasti64x4",
892 i256mem, loadv4i64, VK16WM>, VEX_W,
893 EVEX_V512, EVEX_CD8<64, CD8VT4>;
895 def : Pat<(v16i32 (int_x86_avx512_pbroadcastd_512 (v4i32 VR128X:$src))),
896 (VPBROADCASTDZrr VR128X:$src)>;
897 def : Pat<(v8i64 (int_x86_avx512_pbroadcastq_512 (v2i64 VR128X:$src))),
898 (VPBROADCASTQZrr VR128X:$src)>;
900 def : Pat<(v16f32 (X86VBroadcast (v16f32 VR512:$src))),
901 (VBROADCASTSSZr (EXTRACT_SUBREG (v16f32 VR512:$src), sub_xmm))>;
902 def : Pat<(v8f64 (X86VBroadcast (v8f64 VR512:$src))),
903 (VBROADCASTSDZr (EXTRACT_SUBREG (v8f64 VR512:$src), sub_xmm))>;
905 def : Pat<(v16i32 (X86VBroadcast (v16i32 VR512:$src))),
906 (VPBROADCASTDZrr (EXTRACT_SUBREG (v16i32 VR512:$src), sub_xmm))>;
907 def : Pat<(v8i64 (X86VBroadcast (v8i64 VR512:$src))),
908 (VPBROADCASTQZrr (EXTRACT_SUBREG (v8i64 VR512:$src), sub_xmm))>;
910 def : Pat<(v16f32 (int_x86_avx512_vbroadcast_ss_ps_512 (v4f32 VR128X:$src))),
911 (VBROADCASTSSZr VR128X:$src)>;
912 def : Pat<(v8f64 (int_x86_avx512_vbroadcast_sd_pd_512 (v2f64 VR128X:$src))),
913 (VBROADCASTSDZr VR128X:$src)>;
915 // Provide fallback in case the load node that is used in the patterns above
916 // is used by additional users, which prevents the pattern selection.
917 def : Pat<(v16f32 (X86VBroadcast FR32X:$src)),
918 (VBROADCASTSSZr (COPY_TO_REGCLASS FR32X:$src, VR128X))>;
919 def : Pat<(v8f64 (X86VBroadcast FR64X:$src)),
920 (VBROADCASTSDZr (COPY_TO_REGCLASS FR64X:$src, VR128X))>;
923 //===----------------------------------------------------------------------===//
924 // AVX-512 BROADCAST MASK TO VECTOR REGISTER
927 multiclass avx512_mask_broadcast<bits<8> opc, string OpcodeStr,
929 let Predicates = [HasCDI] in
930 def Zrr : AVX512XS8I<opc, MRMSrcReg, (outs VR512:$dst), (ins KRC:$src),
931 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
932 []>, EVEX, EVEX_V512;
934 let Predicates = [HasCDI, HasVLX] in {
935 def Z128rr : AVX512XS8I<opc, MRMSrcReg, (outs VR128:$dst), (ins KRC:$src),
936 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
937 []>, EVEX, EVEX_V128;
938 def Z256rr : AVX512XS8I<opc, MRMSrcReg, (outs VR256:$dst), (ins KRC:$src),
939 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
940 []>, EVEX, EVEX_V256;
944 let Predicates = [HasCDI] in {
945 defm VPBROADCASTMW2D : avx512_mask_broadcast<0x3A, "vpbroadcastmw2d",
947 defm VPBROADCASTMB2Q : avx512_mask_broadcast<0x2A, "vpbroadcastmb2q",
951 //===----------------------------------------------------------------------===//
954 // -- immediate form --
955 multiclass avx512_perm_imm<bits<8> opc, string OpcodeStr, SDNode OpNode,
957 let ExeDomain = _.ExeDomain in {
958 def ri : AVX512AIi8<opc, MRMSrcReg, (outs _.RC:$dst),
959 (ins _.RC:$src1, u8imm:$src2),
960 !strconcat(OpcodeStr,
961 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
963 (_.VT (OpNode _.RC:$src1, (i8 imm:$src2))))]>,
965 def mi : AVX512AIi8<opc, MRMSrcMem, (outs _.RC:$dst),
966 (ins _.MemOp:$src1, u8imm:$src2),
967 !strconcat(OpcodeStr,
968 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
970 (_.VT (OpNode (_.LdFrag addr:$src1),
972 EVEX, EVEX_CD8<_.EltSize, CD8VF>;
976 multiclass avx512_permil<bits<8> OpcImm, bits<8> OpcVar, X86VectorVTInfo _,
977 X86VectorVTInfo Ctrl> :
978 avx512_perm_imm<OpcImm, "vpermil" # _.Suffix, X86VPermilpi, _> {
979 let ExeDomain = _.ExeDomain in {
980 def rr : AVX5128I<OpcVar, MRMSrcReg, (outs _.RC:$dst),
981 (ins _.RC:$src1, _.RC:$src2),
982 !strconcat("vpermil" # _.Suffix,
983 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
985 (_.VT (X86VPermilpv _.RC:$src1,
986 (Ctrl.VT Ctrl.RC:$src2))))]>,
988 def rm : AVX5128I<OpcVar, MRMSrcMem, (outs _.RC:$dst),
989 (ins _.RC:$src1, Ctrl.MemOp:$src2),
990 !strconcat("vpermil" # _.Suffix,
991 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
993 (_.VT (X86VPermilpv _.RC:$src1,
994 (Ctrl.VT (Ctrl.LdFrag addr:$src2)))))]>,
999 defm VPERMQZ : avx512_perm_imm<0x00, "vpermq", X86VPermi, v8i64_info>,
1001 defm VPERMPDZ : avx512_perm_imm<0x01, "vpermpd", X86VPermi, v8f64_info>,
1004 defm VPERMILPSZ : avx512_permil<0x04, 0x0C, v16f32_info, v16i32_info>,
1006 defm VPERMILPDZ : avx512_permil<0x05, 0x0D, v8f64_info, v8i64_info>,
1009 def : Pat<(v16i32 (X86VPermilpi VR512:$src1, (i8 imm:$imm))),
1010 (VPERMILPSZri VR512:$src1, imm:$imm)>;
1011 def : Pat<(v8i64 (X86VPermilpi VR512:$src1, (i8 imm:$imm))),
1012 (VPERMILPDZri VR512:$src1, imm:$imm)>;
1014 // -- VPERM - register form --
1015 multiclass avx512_perm<bits<8> opc, string OpcodeStr, RegisterClass RC,
1016 PatFrag mem_frag, X86MemOperand x86memop, ValueType OpVT> {
1018 def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
1019 (ins RC:$src1, RC:$src2),
1020 !strconcat(OpcodeStr,
1021 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1023 (OpVT (X86VPermv RC:$src1, RC:$src2)))]>, EVEX_4V;
1025 def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
1026 (ins RC:$src1, x86memop:$src2),
1027 !strconcat(OpcodeStr,
1028 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1030 (OpVT (X86VPermv RC:$src1, (mem_frag addr:$src2))))]>,
1034 defm VPERMDZ : avx512_perm<0x36, "vpermd", VR512, loadv16i32, i512mem,
1035 v16i32>, EVEX_V512, EVEX_CD8<32, CD8VF>;
1036 defm VPERMQZ : avx512_perm<0x36, "vpermq", VR512, loadv8i64, i512mem,
1037 v8i64>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
1038 let ExeDomain = SSEPackedSingle in
1039 defm VPERMPSZ : avx512_perm<0x16, "vpermps", VR512, loadv16f32, f512mem,
1040 v16f32>, EVEX_V512, EVEX_CD8<32, CD8VF>;
1041 let ExeDomain = SSEPackedDouble in
1042 defm VPERMPDZ : avx512_perm<0x16, "vpermpd", VR512, loadv8f64, f512mem,
1043 v8f64>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
1045 // -- VPERM2I - 3 source operands form --
1046 multiclass avx512_perm_3src<bits<8> opc, string OpcodeStr, RegisterClass RC,
1047 PatFrag mem_frag, X86MemOperand x86memop,
1048 SDNode OpNode, ValueType OpVT, RegisterClass KRC> {
1049 let Constraints = "$src1 = $dst" in {
1050 def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
1051 (ins RC:$src1, RC:$src2, RC:$src3),
1052 !strconcat(OpcodeStr,
1053 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
1055 (OpVT (OpNode RC:$src1, RC:$src2, RC:$src3)))]>,
1058 def rrk : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
1059 (ins RC:$src1, KRC:$mask, RC:$src2, RC:$src3),
1060 !strconcat(OpcodeStr,
1061 "\t{$src3, $src2, $dst {${mask}}|"
1062 "$dst {${mask}}, $src2, $src3}"),
1063 [(set RC:$dst, (OpVT (vselect KRC:$mask,
1064 (OpNode RC:$src1, RC:$src2,
1069 let AddedComplexity = 30 in // Prefer over VMOV*rrkz Pat<>
1070 def rrkz : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
1071 (ins RC:$src1, KRC:$mask, RC:$src2, RC:$src3),
1072 !strconcat(OpcodeStr,
1073 "\t{$src3, $src2, $dst {${mask}} {z} |",
1074 "$dst {${mask}} {z}, $src2, $src3}"),
1075 [(set RC:$dst, (OpVT (vselect KRC:$mask,
1076 (OpNode RC:$src1, RC:$src2,
1079 (v16i32 immAllZerosV))))))]>,
1082 def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
1083 (ins RC:$src1, RC:$src2, x86memop:$src3),
1084 !strconcat(OpcodeStr,
1085 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
1087 (OpVT (OpNode RC:$src1, RC:$src2,
1088 (mem_frag addr:$src3))))]>, EVEX_4V;
1090 def rmk : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
1091 (ins RC:$src1, KRC:$mask, RC:$src2, x86memop:$src3),
1092 !strconcat(OpcodeStr,
1093 "\t{$src3, $src2, $dst {${mask}}|"
1094 "$dst {${mask}}, $src2, $src3}"),
1096 (OpVT (vselect KRC:$mask,
1097 (OpNode RC:$src1, RC:$src2,
1098 (mem_frag addr:$src3)),
1102 let AddedComplexity = 10 in // Prefer over the rrkz variant
1103 def rmkz : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
1104 (ins RC:$src1, KRC:$mask, RC:$src2, x86memop:$src3),
1105 !strconcat(OpcodeStr,
1106 "\t{$src3, $src2, $dst {${mask}} {z}|"
1107 "$dst {${mask}} {z}, $src2, $src3}"),
1109 (OpVT (vselect KRC:$mask,
1110 (OpNode RC:$src1, RC:$src2,
1111 (mem_frag addr:$src3)),
1113 (v16i32 immAllZerosV))))))]>,
1117 defm VPERMI2D : avx512_perm_3src<0x76, "vpermi2d", VR512, loadv16i32,
1118 i512mem, X86VPermiv3, v16i32, VK16WM>,
1119 EVEX_V512, EVEX_CD8<32, CD8VF>;
1120 defm VPERMI2Q : avx512_perm_3src<0x76, "vpermi2q", VR512, loadv8i64,
1121 i512mem, X86VPermiv3, v8i64, VK8WM>,
1122 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
1123 defm VPERMI2PS : avx512_perm_3src<0x77, "vpermi2ps", VR512, loadv16f32,
1124 i512mem, X86VPermiv3, v16f32, VK16WM>,
1125 EVEX_V512, EVEX_CD8<32, CD8VF>;
1126 defm VPERMI2PD : avx512_perm_3src<0x77, "vpermi2pd", VR512, loadv8f64,
1127 i512mem, X86VPermiv3, v8f64, VK8WM>,
1128 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
1130 multiclass avx512_perm_table_3src<bits<8> opc, string Suffix, RegisterClass RC,
1131 PatFrag mem_frag, X86MemOperand x86memop,
1132 SDNode OpNode, ValueType OpVT, RegisterClass KRC,
1133 ValueType MaskVT, RegisterClass MRC> :
1134 avx512_perm_3src<opc, "vpermt2"##Suffix, RC, mem_frag, x86memop, OpNode,
1136 def : Pat<(OpVT (!cast<Intrinsic>("int_x86_avx512_mask_vpermt_"##Suffix##"_512")
1137 VR512:$idx, VR512:$src1, VR512:$src2, -1)),
1138 (!cast<Instruction>(NAME#rr) VR512:$src1, VR512:$idx, VR512:$src2)>;
1140 def : Pat<(OpVT (!cast<Intrinsic>("int_x86_avx512_mask_vpermt_"##Suffix##"_512")
1141 VR512:$idx, VR512:$src1, VR512:$src2, MRC:$mask)),
1142 (!cast<Instruction>(NAME#rrk) VR512:$src1,
1143 (MaskVT (COPY_TO_REGCLASS MRC:$mask, KRC)), VR512:$idx, VR512:$src2)>;
1146 defm VPERMT2D : avx512_perm_table_3src<0x7E, "d", VR512, loadv16i32, i512mem,
1147 X86VPermv3, v16i32, VK16WM, v16i1, GR16>,
1148 EVEX_V512, EVEX_CD8<32, CD8VF>;
1149 defm VPERMT2Q : avx512_perm_table_3src<0x7E, "q", VR512, loadv8i64, i512mem,
1150 X86VPermv3, v8i64, VK8WM, v8i1, GR8>,
1151 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
1152 defm VPERMT2PS : avx512_perm_table_3src<0x7F, "ps", VR512, loadv16f32, i512mem,
1153 X86VPermv3, v16f32, VK16WM, v16i1, GR16>,
1154 EVEX_V512, EVEX_CD8<32, CD8VF>;
1155 defm VPERMT2PD : avx512_perm_table_3src<0x7F, "pd", VR512, loadv8f64, i512mem,
1156 X86VPermv3, v8f64, VK8WM, v8i1, GR8>,
1157 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
1159 //===----------------------------------------------------------------------===//
1160 // AVX-512 - BLEND using mask
1162 multiclass avx512_blendmask<bits<8> opc, string OpcodeStr, X86VectorVTInfo _> {
1163 let ExeDomain = _.ExeDomain in {
1164 def rr : AVX5128I<opc, MRMSrcReg, (outs _.RC:$dst),
1165 (ins _.RC:$src1, _.RC:$src2),
1166 !strconcat(OpcodeStr,
1167 "\t{$src2, $src1, ${dst} |${dst}, $src1, $src2}"),
1169 def rrk : AVX5128I<opc, MRMSrcReg, (outs _.RC:$dst),
1170 (ins _.KRCWM:$mask, _.RC:$src1, _.RC:$src2),
1171 !strconcat(OpcodeStr,
1172 "\t{$src2, $src1, ${dst} {${mask}}|${dst} {${mask}}, $src1, $src2}"),
1173 [(set _.RC:$dst, (X86select _.KRCWM:$mask, (_.VT _.RC:$src1),
1174 (_.VT _.RC:$src2)))]>, EVEX_4V, EVEX_K;
1175 def rrkz : AVX5128I<opc, MRMSrcReg, (outs _.RC:$dst),
1176 (ins _.KRCWM:$mask, _.RC:$src1, _.RC:$src2),
1177 !strconcat(OpcodeStr,
1178 "\t{$src2, $src1, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src1, $src2}"),
1179 []>, EVEX_4V, EVEX_KZ;
1180 let mayLoad = 1 in {
1181 def rm : AVX5128I<opc, MRMSrcMem, (outs _.RC:$dst),
1182 (ins _.RC:$src1, _.MemOp:$src2),
1183 !strconcat(OpcodeStr,
1184 "\t{$src2, $src1, ${dst} |${dst}, $src1, $src2}"),
1185 []>, EVEX_4V, EVEX_CD8<_.EltSize, CD8VF>;
1186 def rmk : AVX5128I<opc, MRMSrcMem, (outs _.RC:$dst),
1187 (ins _.KRCWM:$mask, _.RC:$src1, _.MemOp:$src2),
1188 !strconcat(OpcodeStr,
1189 "\t{$src2, $src1, ${dst} {${mask}}|${dst} {${mask}}, $src1, $src2}"),
1190 [(set _.RC:$dst, (X86select _.KRCWM:$mask, (_.VT _.RC:$src1),
1191 (_.VT (bitconvert (_.LdFrag addr:$src2)))))]>,
1192 EVEX_4V, EVEX_K, EVEX_CD8<_.EltSize, CD8VF>;
1193 def rmkz : AVX5128I<opc, MRMSrcMem, (outs _.RC:$dst),
1194 (ins _.KRCWM:$mask, _.RC:$src1, _.MemOp:$src2),
1195 !strconcat(OpcodeStr,
1196 "\t{$src2, $src1, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src1, $src2}"),
1197 []>, EVEX_4V, EVEX_KZ, EVEX_CD8<_.EltSize, CD8VF>;
1201 multiclass avx512_blendmask_rmb<bits<8> opc, string OpcodeStr, X86VectorVTInfo _> {
1203 def rmbk : AVX5128I<opc, MRMSrcMem, (outs _.RC:$dst),
1204 (ins _.KRCWM:$mask, _.RC:$src1, _.ScalarMemOp:$src2),
1205 !strconcat(OpcodeStr,
1206 "\t{${src2}", _.BroadcastStr, ", $src1, $dst {${mask}}|",
1207 "$dst {${mask}}, $src1, ${src2}", _.BroadcastStr, "}"),
1208 [(set _.RC:$dst,(X86select _.KRCWM:$mask, (_.VT _.RC:$src1),
1209 (X86VBroadcast (_.ScalarLdFrag addr:$src2))))]>,
1210 EVEX_4V, EVEX_K, EVEX_B, EVEX_CD8<_.EltSize, CD8VF>;
1212 def rmb : AVX5128I<opc, MRMSrcMem, (outs _.RC:$dst),
1213 (ins _.RC:$src1, _.ScalarMemOp:$src2),
1214 !strconcat(OpcodeStr,
1215 "\t{${src2}", _.BroadcastStr, ", $src1, $dst|",
1216 "$dst, $src1, ${src2}", _.BroadcastStr, "}"),
1217 []>, EVEX_4V, EVEX_B, EVEX_CD8<_.EltSize, CD8VF>;
1221 multiclass blendmask_dq <bits<8> opc, string OpcodeStr,
1222 AVX512VLVectorVTInfo VTInfo> {
1223 defm Z : avx512_blendmask <opc, OpcodeStr, VTInfo.info512>,
1224 avx512_blendmask_rmb <opc, OpcodeStr, VTInfo.info512>, EVEX_V512;
1226 let Predicates = [HasVLX] in {
1227 defm Z256 : avx512_blendmask<opc, OpcodeStr, VTInfo.info256>,
1228 avx512_blendmask_rmb <opc, OpcodeStr, VTInfo.info256>, EVEX_V256;
1229 defm Z128 : avx512_blendmask<opc, OpcodeStr, VTInfo.info128>,
1230 avx512_blendmask_rmb <opc, OpcodeStr, VTInfo.info128>, EVEX_V128;
1234 multiclass blendmask_bw <bits<8> opc, string OpcodeStr,
1235 AVX512VLVectorVTInfo VTInfo> {
1236 let Predicates = [HasBWI] in
1237 defm Z : avx512_blendmask <opc, OpcodeStr, VTInfo.info512>, EVEX_V512;
1239 let Predicates = [HasBWI, HasVLX] in {
1240 defm Z256 : avx512_blendmask <opc, OpcodeStr, VTInfo.info256>, EVEX_V256;
1241 defm Z128 : avx512_blendmask <opc, OpcodeStr, VTInfo.info128>, EVEX_V128;
1246 defm VBLENDMPS : blendmask_dq <0x65, "vblendmps", avx512vl_f32_info>;
1247 defm VBLENDMPD : blendmask_dq <0x65, "vblendmpd", avx512vl_f64_info>, VEX_W;
1248 defm VPBLENDMD : blendmask_dq <0x64, "vpblendmd", avx512vl_i32_info>;
1249 defm VPBLENDMQ : blendmask_dq <0x64, "vpblendmq", avx512vl_i64_info>, VEX_W;
1250 defm VPBLENDMB : blendmask_bw <0x66, "vpblendmb", avx512vl_i8_info>;
1251 defm VPBLENDMW : blendmask_bw <0x66, "vpblendmw", avx512vl_i16_info>, VEX_W;
1254 let Predicates = [HasAVX512] in {
1255 def : Pat<(v8f32 (vselect (v8i1 VK8WM:$mask), (v8f32 VR256X:$src1),
1256 (v8f32 VR256X:$src2))),
1258 (v16f32 (VBLENDMPSZrrk (COPY_TO_REGCLASS VK8WM:$mask, VK16WM),
1259 (v16f32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)),
1260 (v16f32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)))), sub_ymm)>;
1262 def : Pat<(v8i32 (vselect (v8i1 VK8WM:$mask), (v8i32 VR256X:$src1),
1263 (v8i32 VR256X:$src2))),
1265 (v16i32 (VPBLENDMDZrrk (COPY_TO_REGCLASS VK8WM:$mask, VK16WM),
1266 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)),
1267 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)))), sub_ymm)>;
1269 //===----------------------------------------------------------------------===//
1270 // Compare Instructions
1271 //===----------------------------------------------------------------------===//
1273 // avx512_cmp_scalar - AVX512 CMPSS and CMPSD
1274 multiclass avx512_cmp_scalar<RegisterClass RC, X86MemOperand x86memop,
1275 SDNode OpNode, ValueType VT,
1276 PatFrag ld_frag, string Suffix> {
1277 def rr : AVX512Ii8<0xC2, MRMSrcReg,
1278 (outs VK1:$dst), (ins RC:$src1, RC:$src2, AVXCC:$cc),
1279 !strconcat("vcmp${cc}", Suffix,
1280 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1281 [(set VK1:$dst, (OpNode (VT RC:$src1), RC:$src2, imm:$cc))],
1282 IIC_SSE_ALU_F32S_RR>, EVEX_4V;
1283 def rm : AVX512Ii8<0xC2, MRMSrcMem,
1284 (outs VK1:$dst), (ins RC:$src1, x86memop:$src2, AVXCC:$cc),
1285 !strconcat("vcmp${cc}", Suffix,
1286 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1287 [(set VK1:$dst, (OpNode (VT RC:$src1),
1288 (ld_frag addr:$src2), imm:$cc))], IIC_SSE_ALU_F32P_RM>, EVEX_4V;
1289 let isAsmParserOnly = 1, hasSideEffects = 0 in {
1290 def rri_alt : AVX512Ii8<0xC2, MRMSrcReg,
1291 (outs VK1:$dst), (ins RC:$src1, RC:$src2, u8imm:$cc),
1292 !strconcat("vcmp", Suffix,
1293 "\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}"),
1294 [], IIC_SSE_ALU_F32S_RR>, EVEX_4V;
1296 def rmi_alt : AVX512Ii8<0xC2, MRMSrcMem,
1297 (outs VK1:$dst), (ins RC:$src1, x86memop:$src2, u8imm:$cc),
1298 !strconcat("vcmp", Suffix,
1299 "\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}"),
1300 [], IIC_SSE_ALU_F32P_RM>, EVEX_4V;
1304 let Predicates = [HasAVX512] in {
1305 defm VCMPSSZ : avx512_cmp_scalar<FR32X, f32mem, X86cmpms, f32, loadf32, "ss">,
1307 defm VCMPSDZ : avx512_cmp_scalar<FR64X, f64mem, X86cmpms, f64, loadf64, "sd">,
1311 multiclass avx512_icmp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
1312 X86VectorVTInfo _> {
1313 def rr : AVX512BI<opc, MRMSrcReg,
1314 (outs _.KRC:$dst), (ins _.RC:$src1, _.RC:$src2),
1315 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1316 [(set _.KRC:$dst, (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2)))],
1317 IIC_SSE_ALU_F32P_RR>, EVEX_4V;
1319 def rm : AVX512BI<opc, MRMSrcMem,
1320 (outs _.KRC:$dst), (ins _.RC:$src1, _.MemOp:$src2),
1321 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1322 [(set _.KRC:$dst, (OpNode (_.VT _.RC:$src1),
1323 (_.VT (bitconvert (_.LdFrag addr:$src2)))))],
1324 IIC_SSE_ALU_F32P_RM>, EVEX_4V;
1325 def rrk : AVX512BI<opc, MRMSrcReg,
1326 (outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1, _.RC:$src2),
1327 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst {${mask}}|",
1328 "$dst {${mask}}, $src1, $src2}"),
1329 [(set _.KRC:$dst, (and _.KRCWM:$mask,
1330 (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2))))],
1331 IIC_SSE_ALU_F32P_RR>, EVEX_4V, EVEX_K;
1333 def rmk : AVX512BI<opc, MRMSrcMem,
1334 (outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1, _.MemOp:$src2),
1335 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst {${mask}}|",
1336 "$dst {${mask}}, $src1, $src2}"),
1337 [(set _.KRC:$dst, (and _.KRCWM:$mask,
1338 (OpNode (_.VT _.RC:$src1),
1340 (_.LdFrag addr:$src2))))))],
1341 IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_K;
1344 multiclass avx512_icmp_packed_rmb<bits<8> opc, string OpcodeStr, SDNode OpNode,
1345 X86VectorVTInfo _> :
1346 avx512_icmp_packed<opc, OpcodeStr, OpNode, _> {
1347 let mayLoad = 1 in {
1348 def rmb : AVX512BI<opc, MRMSrcMem,
1349 (outs _.KRC:$dst), (ins _.RC:$src1, _.ScalarMemOp:$src2),
1350 !strconcat(OpcodeStr, "\t{${src2}", _.BroadcastStr, ", $src1, $dst",
1351 "|$dst, $src1, ${src2}", _.BroadcastStr, "}"),
1352 [(set _.KRC:$dst, (OpNode (_.VT _.RC:$src1),
1353 (X86VBroadcast (_.ScalarLdFrag addr:$src2))))],
1354 IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_B;
1355 def rmbk : AVX512BI<opc, MRMSrcMem,
1356 (outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1,
1357 _.ScalarMemOp:$src2),
1358 !strconcat(OpcodeStr,
1359 "\t{${src2}", _.BroadcastStr, ", $src1, $dst {${mask}}|",
1360 "$dst {${mask}}, $src1, ${src2}", _.BroadcastStr, "}"),
1361 [(set _.KRC:$dst, (and _.KRCWM:$mask,
1362 (OpNode (_.VT _.RC:$src1),
1364 (_.ScalarLdFrag addr:$src2)))))],
1365 IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_K, EVEX_B;
1369 multiclass avx512_icmp_packed_vl<bits<8> opc, string OpcodeStr, SDNode OpNode,
1370 AVX512VLVectorVTInfo VTInfo, Predicate prd> {
1371 let Predicates = [prd] in
1372 defm Z : avx512_icmp_packed<opc, OpcodeStr, OpNode, VTInfo.info512>,
1375 let Predicates = [prd, HasVLX] in {
1376 defm Z256 : avx512_icmp_packed<opc, OpcodeStr, OpNode, VTInfo.info256>,
1378 defm Z128 : avx512_icmp_packed<opc, OpcodeStr, OpNode, VTInfo.info128>,
1383 multiclass avx512_icmp_packed_rmb_vl<bits<8> opc, string OpcodeStr,
1384 SDNode OpNode, AVX512VLVectorVTInfo VTInfo,
1386 let Predicates = [prd] in
1387 defm Z : avx512_icmp_packed_rmb<opc, OpcodeStr, OpNode, VTInfo.info512>,
1390 let Predicates = [prd, HasVLX] in {
1391 defm Z256 : avx512_icmp_packed_rmb<opc, OpcodeStr, OpNode, VTInfo.info256>,
1393 defm Z128 : avx512_icmp_packed_rmb<opc, OpcodeStr, OpNode, VTInfo.info128>,
1398 defm VPCMPEQB : avx512_icmp_packed_vl<0x74, "vpcmpeqb", X86pcmpeqm,
1399 avx512vl_i8_info, HasBWI>,
1402 defm VPCMPEQW : avx512_icmp_packed_vl<0x75, "vpcmpeqw", X86pcmpeqm,
1403 avx512vl_i16_info, HasBWI>,
1404 EVEX_CD8<16, CD8VF>;
1406 defm VPCMPEQD : avx512_icmp_packed_rmb_vl<0x76, "vpcmpeqd", X86pcmpeqm,
1407 avx512vl_i32_info, HasAVX512>,
1408 EVEX_CD8<32, CD8VF>;
1410 defm VPCMPEQQ : avx512_icmp_packed_rmb_vl<0x29, "vpcmpeqq", X86pcmpeqm,
1411 avx512vl_i64_info, HasAVX512>,
1412 T8PD, VEX_W, EVEX_CD8<64, CD8VF>;
1414 defm VPCMPGTB : avx512_icmp_packed_vl<0x64, "vpcmpgtb", X86pcmpgtm,
1415 avx512vl_i8_info, HasBWI>,
1418 defm VPCMPGTW : avx512_icmp_packed_vl<0x65, "vpcmpgtw", X86pcmpgtm,
1419 avx512vl_i16_info, HasBWI>,
1420 EVEX_CD8<16, CD8VF>;
1422 defm VPCMPGTD : avx512_icmp_packed_rmb_vl<0x66, "vpcmpgtd", X86pcmpgtm,
1423 avx512vl_i32_info, HasAVX512>,
1424 EVEX_CD8<32, CD8VF>;
1426 defm VPCMPGTQ : avx512_icmp_packed_rmb_vl<0x37, "vpcmpgtq", X86pcmpgtm,
1427 avx512vl_i64_info, HasAVX512>,
1428 T8PD, VEX_W, EVEX_CD8<64, CD8VF>;
1430 def : Pat<(v8i1 (X86pcmpgtm (v8i32 VR256X:$src1), (v8i32 VR256X:$src2))),
1431 (COPY_TO_REGCLASS (VPCMPGTDZrr
1432 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)),
1433 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm))), VK8)>;
1435 def : Pat<(v8i1 (X86pcmpeqm (v8i32 VR256X:$src1), (v8i32 VR256X:$src2))),
1436 (COPY_TO_REGCLASS (VPCMPEQDZrr
1437 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)),
1438 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm))), VK8)>;
1440 multiclass avx512_icmp_cc<bits<8> opc, string Suffix, SDNode OpNode,
1441 X86VectorVTInfo _> {
1442 def rri : AVX512AIi8<opc, MRMSrcReg,
1443 (outs _.KRC:$dst), (ins _.RC:$src1, _.RC:$src2, AVX512ICC:$cc),
1444 !strconcat("vpcmp${cc}", Suffix,
1445 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1446 [(set _.KRC:$dst, (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2),
1448 IIC_SSE_ALU_F32P_RR>, EVEX_4V;
1450 def rmi : AVX512AIi8<opc, MRMSrcMem,
1451 (outs _.KRC:$dst), (ins _.RC:$src1, _.MemOp:$src2, AVX512ICC:$cc),
1452 !strconcat("vpcmp${cc}", Suffix,
1453 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1454 [(set _.KRC:$dst, (OpNode (_.VT _.RC:$src1),
1455 (_.VT (bitconvert (_.LdFrag addr:$src2))),
1457 IIC_SSE_ALU_F32P_RM>, EVEX_4V;
1458 def rrik : AVX512AIi8<opc, MRMSrcReg,
1459 (outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1, _.RC:$src2,
1461 !strconcat("vpcmp${cc}", Suffix,
1462 "\t{$src2, $src1, $dst {${mask}}|",
1463 "$dst {${mask}}, $src1, $src2}"),
1464 [(set _.KRC:$dst, (and _.KRCWM:$mask,
1465 (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2),
1467 IIC_SSE_ALU_F32P_RR>, EVEX_4V, EVEX_K;
1469 def rmik : AVX512AIi8<opc, MRMSrcMem,
1470 (outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1, _.MemOp:$src2,
1472 !strconcat("vpcmp${cc}", Suffix,
1473 "\t{$src2, $src1, $dst {${mask}}|",
1474 "$dst {${mask}}, $src1, $src2}"),
1475 [(set _.KRC:$dst, (and _.KRCWM:$mask,
1476 (OpNode (_.VT _.RC:$src1),
1477 (_.VT (bitconvert (_.LdFrag addr:$src2))),
1479 IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_K;
1481 // Accept explicit immediate argument form instead of comparison code.
1482 let isAsmParserOnly = 1, hasSideEffects = 0 in {
1483 def rri_alt : AVX512AIi8<opc, MRMSrcReg,
1484 (outs _.KRC:$dst), (ins _.RC:$src1, _.RC:$src2, u8imm:$cc),
1485 !strconcat("vpcmp", Suffix, "\t{$cc, $src2, $src1, $dst|",
1486 "$dst, $src1, $src2, $cc}"),
1487 [], IIC_SSE_ALU_F32P_RR>, EVEX_4V;
1489 def rmi_alt : AVX512AIi8<opc, MRMSrcMem,
1490 (outs _.KRC:$dst), (ins _.RC:$src1, _.MemOp:$src2, u8imm:$cc),
1491 !strconcat("vpcmp", Suffix, "\t{$cc, $src2, $src1, $dst|",
1492 "$dst, $src1, $src2, $cc}"),
1493 [], IIC_SSE_ALU_F32P_RM>, EVEX_4V;
1494 def rrik_alt : AVX512AIi8<opc, MRMSrcReg,
1495 (outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1, _.RC:$src2,
1497 !strconcat("vpcmp", Suffix,
1498 "\t{$cc, $src2, $src1, $dst {${mask}}|",
1499 "$dst {${mask}}, $src1, $src2, $cc}"),
1500 [], IIC_SSE_ALU_F32P_RR>, EVEX_4V, EVEX_K;
1502 def rmik_alt : AVX512AIi8<opc, MRMSrcMem,
1503 (outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1, _.MemOp:$src2,
1505 !strconcat("vpcmp", Suffix,
1506 "\t{$cc, $src2, $src1, $dst {${mask}}|",
1507 "$dst {${mask}}, $src1, $src2, $cc}"),
1508 [], IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_K;
1512 multiclass avx512_icmp_cc_rmb<bits<8> opc, string Suffix, SDNode OpNode,
1513 X86VectorVTInfo _> :
1514 avx512_icmp_cc<opc, Suffix, OpNode, _> {
1515 def rmib : AVX512AIi8<opc, MRMSrcMem,
1516 (outs _.KRC:$dst), (ins _.RC:$src1, _.ScalarMemOp:$src2,
1518 !strconcat("vpcmp${cc}", Suffix,
1519 "\t{${src2}", _.BroadcastStr, ", $src1, $dst|",
1520 "$dst, $src1, ${src2}", _.BroadcastStr, "}"),
1521 [(set _.KRC:$dst, (OpNode (_.VT _.RC:$src1),
1522 (X86VBroadcast (_.ScalarLdFrag addr:$src2)),
1524 IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_B;
1525 def rmibk : AVX512AIi8<opc, MRMSrcMem,
1526 (outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1,
1527 _.ScalarMemOp:$src2, AVX512ICC:$cc),
1528 !strconcat("vpcmp${cc}", Suffix,
1529 "\t{${src2}", _.BroadcastStr, ", $src1, $dst {${mask}}|",
1530 "$dst {${mask}}, $src1, ${src2}", _.BroadcastStr, "}"),
1531 [(set _.KRC:$dst, (and _.KRCWM:$mask,
1532 (OpNode (_.VT _.RC:$src1),
1533 (X86VBroadcast (_.ScalarLdFrag addr:$src2)),
1535 IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_K, EVEX_B;
1537 // Accept explicit immediate argument form instead of comparison code.
1538 let isAsmParserOnly = 1, hasSideEffects = 0, mayLoad = 1 in {
1539 def rmib_alt : AVX512AIi8<opc, MRMSrcMem,
1540 (outs _.KRC:$dst), (ins _.RC:$src1, _.ScalarMemOp:$src2,
1542 !strconcat("vpcmp", Suffix,
1543 "\t{$cc, ${src2}", _.BroadcastStr, ", $src1, $dst|",
1544 "$dst, $src1, ${src2}", _.BroadcastStr, ", $cc}"),
1545 [], IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_B;
1546 def rmibk_alt : AVX512AIi8<opc, MRMSrcMem,
1547 (outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1,
1548 _.ScalarMemOp:$src2, u8imm:$cc),
1549 !strconcat("vpcmp", Suffix,
1550 "\t{$cc, ${src2}", _.BroadcastStr, ", $src1, $dst {${mask}}|",
1551 "$dst {${mask}}, $src1, ${src2}", _.BroadcastStr, ", $cc}"),
1552 [], IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_K, EVEX_B;
1556 multiclass avx512_icmp_cc_vl<bits<8> opc, string Suffix, SDNode OpNode,
1557 AVX512VLVectorVTInfo VTInfo, Predicate prd> {
1558 let Predicates = [prd] in
1559 defm Z : avx512_icmp_cc<opc, Suffix, OpNode, VTInfo.info512>, EVEX_V512;
1561 let Predicates = [prd, HasVLX] in {
1562 defm Z256 : avx512_icmp_cc<opc, Suffix, OpNode, VTInfo.info256>, EVEX_V256;
1563 defm Z128 : avx512_icmp_cc<opc, Suffix, OpNode, VTInfo.info128>, EVEX_V128;
1567 multiclass avx512_icmp_cc_rmb_vl<bits<8> opc, string Suffix, SDNode OpNode,
1568 AVX512VLVectorVTInfo VTInfo, Predicate prd> {
1569 let Predicates = [prd] in
1570 defm Z : avx512_icmp_cc_rmb<opc, Suffix, OpNode, VTInfo.info512>,
1573 let Predicates = [prd, HasVLX] in {
1574 defm Z256 : avx512_icmp_cc_rmb<opc, Suffix, OpNode, VTInfo.info256>,
1576 defm Z128 : avx512_icmp_cc_rmb<opc, Suffix, OpNode, VTInfo.info128>,
1581 defm VPCMPB : avx512_icmp_cc_vl<0x3F, "b", X86cmpm, avx512vl_i8_info,
1582 HasBWI>, EVEX_CD8<8, CD8VF>;
1583 defm VPCMPUB : avx512_icmp_cc_vl<0x3E, "ub", X86cmpmu, avx512vl_i8_info,
1584 HasBWI>, EVEX_CD8<8, CD8VF>;
1586 defm VPCMPW : avx512_icmp_cc_vl<0x3F, "w", X86cmpm, avx512vl_i16_info,
1587 HasBWI>, VEX_W, EVEX_CD8<16, CD8VF>;
1588 defm VPCMPUW : avx512_icmp_cc_vl<0x3E, "uw", X86cmpmu, avx512vl_i16_info,
1589 HasBWI>, VEX_W, EVEX_CD8<16, CD8VF>;
1591 defm VPCMPD : avx512_icmp_cc_rmb_vl<0x1F, "d", X86cmpm, avx512vl_i32_info,
1592 HasAVX512>, EVEX_CD8<32, CD8VF>;
1593 defm VPCMPUD : avx512_icmp_cc_rmb_vl<0x1E, "ud", X86cmpmu, avx512vl_i32_info,
1594 HasAVX512>, EVEX_CD8<32, CD8VF>;
1596 defm VPCMPQ : avx512_icmp_cc_rmb_vl<0x1F, "q", X86cmpm, avx512vl_i64_info,
1597 HasAVX512>, VEX_W, EVEX_CD8<64, CD8VF>;
1598 defm VPCMPUQ : avx512_icmp_cc_rmb_vl<0x1E, "uq", X86cmpmu, avx512vl_i64_info,
1599 HasAVX512>, VEX_W, EVEX_CD8<64, CD8VF>;
1601 multiclass avx512_vcmp_common<X86VectorVTInfo _> {
1603 defm rri : AVX512_maskable_cmp<0xC2, MRMSrcReg, _,
1604 (outs _.KRC:$dst), (ins _.RC:$src1, _.RC:$src2,AVXCC:$cc),
1605 "vcmp${cc}"#_.Suffix,
1606 "$src2, $src1", "$src1, $src2",
1607 (X86cmpm (_.VT _.RC:$src1),
1611 let mayLoad = 1 in {
1612 defm rmi : AVX512_maskable_cmp<0xC2, MRMSrcMem, _,
1613 (outs _.KRC:$dst),(ins _.RC:$src1, _.MemOp:$src2, AVXCC:$cc),
1614 "vcmp${cc}"#_.Suffix,
1615 "$src2, $src1", "$src1, $src2",
1616 (X86cmpm (_.VT _.RC:$src1),
1617 (_.VT (bitconvert (_.LdFrag addr:$src2))),
1620 defm rmbi : AVX512_maskable_cmp<0xC2, MRMSrcMem, _,
1622 (ins _.RC:$src1, _.ScalarMemOp:$src2, AVXCC:$cc),
1623 "vcmp${cc}"#_.Suffix,
1624 "${src2}"##_.BroadcastStr##", $src1",
1625 "$src1, ${src2}"##_.BroadcastStr,
1626 (X86cmpm (_.VT _.RC:$src1),
1627 (_.VT (X86VBroadcast(_.ScalarLdFrag addr:$src2))),
1630 // Accept explicit immediate argument form instead of comparison code.
1631 let isAsmParserOnly = 1, hasSideEffects = 0 in {
1632 defm rri_alt : AVX512_maskable_cmp_alt<0xC2, MRMSrcReg, _,
1634 (ins _.RC:$src1, _.RC:$src2, u8imm:$cc),
1636 "$cc, $src2, $src1", "$src1, $src2, $cc">;
1638 let mayLoad = 1 in {
1639 defm rmi_alt : AVX512_maskable_cmp_alt<0xC2, MRMSrcMem, _,
1641 (ins _.RC:$src1, _.MemOp:$src2, u8imm:$cc),
1643 "$cc, $src2, $src1", "$src1, $src2, $cc">;
1645 defm rmbi_alt : AVX512_maskable_cmp_alt<0xC2, MRMSrcMem, _,
1647 (ins _.RC:$src1, _.ScalarMemOp:$src2, u8imm:$cc),
1649 "$cc, ${src2}"##_.BroadcastStr##", $src1",
1650 "$src1, ${src2}"##_.BroadcastStr##", $cc">,EVEX_B;
1655 multiclass avx512_vcmp_sae<X86VectorVTInfo _> {
1656 // comparison code form (VCMP[EQ/LT/LE/...]
1657 defm rrib : AVX512_maskable_cmp<0xC2, MRMSrcReg, _,
1658 (outs _.KRC:$dst),(ins _.RC:$src1, _.RC:$src2, AVXCC:$cc),
1659 "vcmp${cc}"#_.Suffix,
1660 "{sae}, $src2, $src1", "$src1, $src2,{sae}",
1661 (X86cmpmRnd (_.VT _.RC:$src1),
1664 (i32 FROUND_NO_EXC))>, EVEX_B;
1666 let isAsmParserOnly = 1, hasSideEffects = 0 in {
1667 defm rrib_alt : AVX512_maskable_cmp_alt<0xC2, MRMSrcReg, _,
1669 (ins _.RC:$src1, _.RC:$src2, u8imm:$cc),
1671 "$cc,{sae}, $src2, $src1",
1672 "$src1, $src2,{sae}, $cc">, EVEX_B;
1676 multiclass avx512_vcmp<AVX512VLVectorVTInfo _> {
1677 let Predicates = [HasAVX512] in {
1678 defm Z : avx512_vcmp_common<_.info512>,
1679 avx512_vcmp_sae<_.info512>, EVEX_V512;
1682 let Predicates = [HasAVX512,HasVLX] in {
1683 defm Z128 : avx512_vcmp_common<_.info128>, EVEX_V128;
1684 defm Z256 : avx512_vcmp_common<_.info256>, EVEX_V256;
1688 defm VCMPPD : avx512_vcmp<avx512vl_f64_info>,
1689 AVX512PDIi8Base, EVEX_4V, EVEX_CD8<64, CD8VF>, VEX_W;
1690 defm VCMPPS : avx512_vcmp<avx512vl_f32_info>,
1691 AVX512PSIi8Base, EVEX_4V, EVEX_CD8<32, CD8VF>;
1693 def : Pat<(v8i1 (X86cmpm (v8f32 VR256X:$src1), (v8f32 VR256X:$src2), imm:$cc)),
1694 (COPY_TO_REGCLASS (VCMPPSZrri
1695 (v16f32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)),
1696 (v16f32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)),
1698 def : Pat<(v8i1 (X86cmpm (v8i32 VR256X:$src1), (v8i32 VR256X:$src2), imm:$cc)),
1699 (COPY_TO_REGCLASS (VPCMPDZrri
1700 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)),
1701 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)),
1703 def : Pat<(v8i1 (X86cmpmu (v8i32 VR256X:$src1), (v8i32 VR256X:$src2), imm:$cc)),
1704 (COPY_TO_REGCLASS (VPCMPUDZrri
1705 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)),
1706 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)),
1709 //-----------------------------------------------------------------
1710 // Mask register copy, including
1711 // - copy between mask registers
1712 // - load/store mask registers
1713 // - copy from GPR to mask register and vice versa
1715 multiclass avx512_mask_mov<bits<8> opc_kk, bits<8> opc_km, bits<8> opc_mk,
1716 string OpcodeStr, RegisterClass KRC,
1717 ValueType vvt, X86MemOperand x86memop> {
1718 let hasSideEffects = 0 in {
1719 def kk : I<opc_kk, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src),
1720 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>;
1722 def km : I<opc_km, MRMSrcMem, (outs KRC:$dst), (ins x86memop:$src),
1723 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
1724 [(set KRC:$dst, (vvt (load addr:$src)))]>;
1726 def mk : I<opc_mk, MRMDestMem, (outs), (ins x86memop:$dst, KRC:$src),
1727 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
1728 [(store KRC:$src, addr:$dst)]>;
1732 multiclass avx512_mask_mov_gpr<bits<8> opc_kr, bits<8> opc_rk,
1734 RegisterClass KRC, RegisterClass GRC> {
1735 let hasSideEffects = 0 in {
1736 def kr : I<opc_kr, MRMSrcReg, (outs KRC:$dst), (ins GRC:$src),
1737 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>;
1738 def rk : I<opc_rk, MRMSrcReg, (outs GRC:$dst), (ins KRC:$src),
1739 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>;
1743 let Predicates = [HasDQI] in
1744 defm KMOVB : avx512_mask_mov<0x90, 0x90, 0x91, "kmovb", VK8, v8i1, i8mem>,
1745 avx512_mask_mov_gpr<0x92, 0x93, "kmovb", VK8, GR32>,
1748 let Predicates = [HasAVX512] in
1749 defm KMOVW : avx512_mask_mov<0x90, 0x90, 0x91, "kmovw", VK16, v16i1, i16mem>,
1750 avx512_mask_mov_gpr<0x92, 0x93, "kmovw", VK16, GR32>,
1753 let Predicates = [HasBWI] in {
1754 defm KMOVD : avx512_mask_mov<0x90, 0x90, 0x91, "kmovd", VK32, v32i1,i32mem>,
1756 defm KMOVD : avx512_mask_mov_gpr<0x92, 0x93, "kmovd", VK32, GR32>,
1760 let Predicates = [HasBWI] in {
1761 defm KMOVQ : avx512_mask_mov<0x90, 0x90, 0x91, "kmovq", VK64, v64i1, i64mem>,
1763 defm KMOVQ : avx512_mask_mov_gpr<0x92, 0x93, "kmovq", VK64, GR64>,
1767 // GR from/to mask register
1768 let Predicates = [HasDQI] in {
1769 def : Pat<(v8i1 (bitconvert (i8 GR8:$src))),
1770 (KMOVBkr (SUBREG_TO_REG (i32 0), GR8:$src, sub_8bit))>;
1771 def : Pat<(i8 (bitconvert (v8i1 VK8:$src))),
1772 (EXTRACT_SUBREG (KMOVBrk VK8:$src), sub_8bit)>;
1774 let Predicates = [HasAVX512] in {
1775 def : Pat<(v16i1 (bitconvert (i16 GR16:$src))),
1776 (KMOVWkr (SUBREG_TO_REG (i32 0), GR16:$src, sub_16bit))>;
1777 def : Pat<(i16 (bitconvert (v16i1 VK16:$src))),
1778 (EXTRACT_SUBREG (KMOVWrk VK16:$src), sub_16bit)>;
1780 let Predicates = [HasBWI] in {
1781 def : Pat<(v32i1 (bitconvert (i32 GR32:$src))), (KMOVDkr GR32:$src)>;
1782 def : Pat<(i32 (bitconvert (v32i1 VK32:$src))), (KMOVDrk VK32:$src)>;
1784 let Predicates = [HasBWI] in {
1785 def : Pat<(v64i1 (bitconvert (i64 GR64:$src))), (KMOVQkr GR64:$src)>;
1786 def : Pat<(i64 (bitconvert (v64i1 VK64:$src))), (KMOVQrk VK64:$src)>;
1790 let Predicates = [HasDQI] in {
1791 def : Pat<(store (i8 (bitconvert (v8i1 VK8:$src))), addr:$dst),
1792 (KMOVBmk addr:$dst, VK8:$src)>;
1793 def : Pat<(v8i1 (bitconvert (i8 (load addr:$src)))),
1794 (KMOVBkm addr:$src)>;
1796 let Predicates = [HasAVX512, NoDQI] in {
1797 def : Pat<(store (i8 (bitconvert (v8i1 VK8:$src))), addr:$dst),
1798 (KMOVWmk addr:$dst, (COPY_TO_REGCLASS VK8:$src, VK16))>;
1799 def : Pat<(v8i1 (bitconvert (i8 (load addr:$src)))),
1800 (COPY_TO_REGCLASS (KMOVWkm addr:$src), VK8)>;
1802 let Predicates = [HasAVX512] in {
1803 def : Pat<(store (i16 (bitconvert (v16i1 VK16:$src))), addr:$dst),
1804 (KMOVWmk addr:$dst, VK16:$src)>;
1805 def : Pat<(i1 (load addr:$src)),
1806 (COPY_TO_REGCLASS (KMOVWkm addr:$src), VK1)>;
1807 def : Pat<(v16i1 (bitconvert (i16 (load addr:$src)))),
1808 (KMOVWkm addr:$src)>;
1810 let Predicates = [HasBWI] in {
1811 def : Pat<(store (i32 (bitconvert (v32i1 VK32:$src))), addr:$dst),
1812 (KMOVDmk addr:$dst, VK32:$src)>;
1813 def : Pat<(v32i1 (bitconvert (i32 (load addr:$src)))),
1814 (KMOVDkm addr:$src)>;
1816 let Predicates = [HasBWI] in {
1817 def : Pat<(store (i64 (bitconvert (v64i1 VK64:$src))), addr:$dst),
1818 (KMOVQmk addr:$dst, VK64:$src)>;
1819 def : Pat<(v64i1 (bitconvert (i64 (load addr:$src)))),
1820 (KMOVQkm addr:$src)>;
1823 let Predicates = [HasAVX512] in {
1824 def : Pat<(i1 (trunc (i64 GR64:$src))),
1825 (COPY_TO_REGCLASS (KMOVWkr (AND32ri (EXTRACT_SUBREG $src, sub_32bit),
1828 def : Pat<(i1 (trunc (i32 GR32:$src))),
1829 (COPY_TO_REGCLASS (KMOVWkr (AND32ri $src, (i32 1))), VK1)>;
1831 def : Pat<(i1 (trunc (i8 GR8:$src))),
1833 (KMOVWkr (AND32ri (SUBREG_TO_REG (i32 0), GR8:$src, sub_8bit), (i32 1))),
1835 def : Pat<(i1 (trunc (i16 GR16:$src))),
1837 (KMOVWkr (AND32ri (SUBREG_TO_REG (i32 0), $src, sub_16bit), (i32 1))),
1840 def : Pat<(i32 (zext VK1:$src)),
1841 (AND32ri (KMOVWrk (COPY_TO_REGCLASS VK1:$src, VK16)), (i32 1))>;
1842 def : Pat<(i8 (zext VK1:$src)),
1845 (COPY_TO_REGCLASS VK1:$src, VK16)), (i32 1)), sub_8bit)>;
1846 def : Pat<(i64 (zext VK1:$src)),
1847 (AND64ri8 (SUBREG_TO_REG (i64 0),
1848 (KMOVWrk (COPY_TO_REGCLASS VK1:$src, VK16)), sub_32bit), (i64 1))>;
1849 def : Pat<(i16 (zext VK1:$src)),
1851 (AND32ri (KMOVWrk (COPY_TO_REGCLASS VK1:$src, VK16)), (i32 1)),
1853 def : Pat<(v16i1 (scalar_to_vector VK1:$src)),
1854 (COPY_TO_REGCLASS VK1:$src, VK16)>;
1855 def : Pat<(v8i1 (scalar_to_vector VK1:$src)),
1856 (COPY_TO_REGCLASS VK1:$src, VK8)>;
1858 let Predicates = [HasBWI] in {
1859 def : Pat<(v32i1 (scalar_to_vector VK1:$src)),
1860 (COPY_TO_REGCLASS VK1:$src, VK32)>;
1861 def : Pat<(v64i1 (scalar_to_vector VK1:$src)),
1862 (COPY_TO_REGCLASS VK1:$src, VK64)>;
1866 // With AVX-512 only, 8-bit mask is promoted to 16-bit mask.
1867 let Predicates = [HasAVX512, NoDQI] in {
1868 // GR from/to 8-bit mask without native support
1869 def : Pat<(v8i1 (bitconvert (i8 GR8:$src))),
1871 (KMOVWkr (SUBREG_TO_REG (i32 0), GR8:$src, sub_8bit)),
1873 def : Pat<(i8 (bitconvert (v8i1 VK8:$src))),
1875 (KMOVWrk (COPY_TO_REGCLASS VK8:$src, VK16)),
1878 let Predicates = [HasAVX512] in {
1879 def : Pat<(i1 (X86Vextract VK16:$src, (iPTR 0))),
1880 (COPY_TO_REGCLASS VK16:$src, VK1)>;
1881 def : Pat<(i1 (X86Vextract VK8:$src, (iPTR 0))),
1882 (COPY_TO_REGCLASS VK8:$src, VK1)>;
1884 let Predicates = [HasBWI] in {
1885 def : Pat<(i1 (X86Vextract VK32:$src, (iPTR 0))),
1886 (COPY_TO_REGCLASS VK32:$src, VK1)>;
1887 def : Pat<(i1 (X86Vextract VK64:$src, (iPTR 0))),
1888 (COPY_TO_REGCLASS VK64:$src, VK1)>;
1891 // Mask unary operation
1893 multiclass avx512_mask_unop<bits<8> opc, string OpcodeStr,
1894 RegisterClass KRC, SDPatternOperator OpNode,
1896 let Predicates = [prd] in
1897 def rr : I<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src),
1898 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
1899 [(set KRC:$dst, (OpNode KRC:$src))]>;
1902 multiclass avx512_mask_unop_all<bits<8> opc, string OpcodeStr,
1903 SDPatternOperator OpNode> {
1904 defm B : avx512_mask_unop<opc, !strconcat(OpcodeStr, "b"), VK8, OpNode,
1906 defm W : avx512_mask_unop<opc, !strconcat(OpcodeStr, "w"), VK16, OpNode,
1907 HasAVX512>, VEX, PS;
1908 defm D : avx512_mask_unop<opc, !strconcat(OpcodeStr, "d"), VK32, OpNode,
1909 HasBWI>, VEX, PD, VEX_W;
1910 defm Q : avx512_mask_unop<opc, !strconcat(OpcodeStr, "q"), VK64, OpNode,
1911 HasBWI>, VEX, PS, VEX_W;
1914 defm KNOT : avx512_mask_unop_all<0x44, "knot", not>;
1916 multiclass avx512_mask_unop_int<string IntName, string InstName> {
1917 let Predicates = [HasAVX512] in
1918 def : Pat<(!cast<Intrinsic>("int_x86_avx512_"##IntName##"_w")
1920 (COPY_TO_REGCLASS (!cast<Instruction>(InstName##"Wrr")
1921 (v16i1 (COPY_TO_REGCLASS GR16:$src, VK16))), GR16)>;
1923 defm : avx512_mask_unop_int<"knot", "KNOT">;
1925 let Predicates = [HasDQI] in
1926 def : Pat<(xor VK8:$src1, (v8i1 immAllOnesV)), (KNOTBrr VK8:$src1)>;
1927 let Predicates = [HasAVX512] in
1928 def : Pat<(xor VK16:$src1, (v16i1 immAllOnesV)), (KNOTWrr VK16:$src1)>;
1929 let Predicates = [HasBWI] in
1930 def : Pat<(xor VK32:$src1, (v32i1 immAllOnesV)), (KNOTDrr VK32:$src1)>;
1931 let Predicates = [HasBWI] in
1932 def : Pat<(xor VK64:$src1, (v64i1 immAllOnesV)), (KNOTQrr VK64:$src1)>;
1934 // KNL does not support KMOVB, 8-bit mask is promoted to 16-bit
1935 let Predicates = [HasAVX512, NoDQI] in {
1936 def : Pat<(xor VK8:$src1, (v8i1 immAllOnesV)),
1937 (COPY_TO_REGCLASS (KNOTWrr (COPY_TO_REGCLASS VK8:$src1, VK16)), VK8)>;
1938 def : Pat<(not VK8:$src),
1940 (KNOTWrr (COPY_TO_REGCLASS VK8:$src, VK16)), VK8)>;
1942 def : Pat<(xor VK4:$src1, (v4i1 immAllOnesV)),
1943 (COPY_TO_REGCLASS (KNOTWrr (COPY_TO_REGCLASS VK4:$src1, VK16)), VK4)>;
1944 def : Pat<(xor VK2:$src1, (v2i1 immAllOnesV)),
1945 (COPY_TO_REGCLASS (KNOTWrr (COPY_TO_REGCLASS VK2:$src1, VK16)), VK2)>;
1947 // Mask binary operation
1948 // - KAND, KANDN, KOR, KXNOR, KXOR
1949 multiclass avx512_mask_binop<bits<8> opc, string OpcodeStr,
1950 RegisterClass KRC, SDPatternOperator OpNode,
1951 Predicate prd, bit IsCommutable> {
1952 let Predicates = [prd], isCommutable = IsCommutable in
1953 def rr : I<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src1, KRC:$src2),
1954 !strconcat(OpcodeStr,
1955 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1956 [(set KRC:$dst, (OpNode KRC:$src1, KRC:$src2))]>;
1959 multiclass avx512_mask_binop_all<bits<8> opc, string OpcodeStr,
1960 SDPatternOperator OpNode, bit IsCommutable> {
1961 defm B : avx512_mask_binop<opc, !strconcat(OpcodeStr, "b"), VK8, OpNode,
1962 HasDQI, IsCommutable>, VEX_4V, VEX_L, PD;
1963 defm W : avx512_mask_binop<opc, !strconcat(OpcodeStr, "w"), VK16, OpNode,
1964 HasAVX512, IsCommutable>, VEX_4V, VEX_L, PS;
1965 defm D : avx512_mask_binop<opc, !strconcat(OpcodeStr, "d"), VK32, OpNode,
1966 HasBWI, IsCommutable>, VEX_4V, VEX_L, VEX_W, PD;
1967 defm Q : avx512_mask_binop<opc, !strconcat(OpcodeStr, "q"), VK64, OpNode,
1968 HasBWI, IsCommutable>, VEX_4V, VEX_L, VEX_W, PS;
1971 def andn : PatFrag<(ops node:$i0, node:$i1), (and (not node:$i0), node:$i1)>;
1972 def xnor : PatFrag<(ops node:$i0, node:$i1), (not (xor node:$i0, node:$i1))>;
1974 defm KAND : avx512_mask_binop_all<0x41, "kand", and, 1>;
1975 defm KOR : avx512_mask_binop_all<0x45, "kor", or, 1>;
1976 defm KXNOR : avx512_mask_binop_all<0x46, "kxnor", xnor, 1>;
1977 defm KXOR : avx512_mask_binop_all<0x47, "kxor", xor, 1>;
1978 defm KANDN : avx512_mask_binop_all<0x42, "kandn", andn, 0>;
1980 multiclass avx512_mask_binop_int<string IntName, string InstName> {
1981 let Predicates = [HasAVX512] in
1982 def : Pat<(!cast<Intrinsic>("int_x86_avx512_"##IntName##"_w")
1983 (i16 GR16:$src1), (i16 GR16:$src2)),
1984 (COPY_TO_REGCLASS (!cast<Instruction>(InstName##"Wrr")
1985 (v16i1 (COPY_TO_REGCLASS GR16:$src1, VK16)),
1986 (v16i1 (COPY_TO_REGCLASS GR16:$src2, VK16))), GR16)>;
1989 defm : avx512_mask_binop_int<"kand", "KAND">;
1990 defm : avx512_mask_binop_int<"kandn", "KANDN">;
1991 defm : avx512_mask_binop_int<"kor", "KOR">;
1992 defm : avx512_mask_binop_int<"kxnor", "KXNOR">;
1993 defm : avx512_mask_binop_int<"kxor", "KXOR">;
1995 multiclass avx512_binop_pat<SDPatternOperator OpNode, Instruction Inst> {
1996 // With AVX512F, 8-bit mask is promoted to 16-bit mask,
1997 // for the DQI set, this type is legal and KxxxB instruction is used
1998 let Predicates = [NoDQI] in
1999 def : Pat<(OpNode VK8:$src1, VK8:$src2),
2001 (Inst (COPY_TO_REGCLASS VK8:$src1, VK16),
2002 (COPY_TO_REGCLASS VK8:$src2, VK16)), VK8)>;
2004 // All types smaller than 8 bits require conversion anyway
2005 def : Pat<(OpNode VK1:$src1, VK1:$src2),
2006 (COPY_TO_REGCLASS (Inst
2007 (COPY_TO_REGCLASS VK1:$src1, VK16),
2008 (COPY_TO_REGCLASS VK1:$src2, VK16)), VK1)>;
2009 def : Pat<(OpNode VK2:$src1, VK2:$src2),
2010 (COPY_TO_REGCLASS (Inst
2011 (COPY_TO_REGCLASS VK2:$src1, VK16),
2012 (COPY_TO_REGCLASS VK2:$src2, VK16)), VK1)>;
2013 def : Pat<(OpNode VK4:$src1, VK4:$src2),
2014 (COPY_TO_REGCLASS (Inst
2015 (COPY_TO_REGCLASS VK4:$src1, VK16),
2016 (COPY_TO_REGCLASS VK4:$src2, VK16)), VK1)>;
2019 defm : avx512_binop_pat<and, KANDWrr>;
2020 defm : avx512_binop_pat<andn, KANDNWrr>;
2021 defm : avx512_binop_pat<or, KORWrr>;
2022 defm : avx512_binop_pat<xnor, KXNORWrr>;
2023 defm : avx512_binop_pat<xor, KXORWrr>;
2025 def : Pat<(xor (xor VK16:$src1, VK16:$src2), (v16i1 immAllOnesV)),
2026 (KXNORWrr VK16:$src1, VK16:$src2)>;
2027 def : Pat<(xor (xor VK8:$src1, VK8:$src2), (v8i1 immAllOnesV)),
2028 (KXNORBrr VK8:$src1, VK8:$src2)>;
2029 def : Pat<(xor (xor VK32:$src1, VK32:$src2), (v32i1 immAllOnesV)),
2030 (KXNORDrr VK32:$src1, VK32:$src2)>;
2031 def : Pat<(xor (xor VK64:$src1, VK64:$src2), (v64i1 immAllOnesV)),
2032 (KXNORQrr VK64:$src1, VK64:$src2)>;
2034 let Predicates = [NoDQI] in
2035 def : Pat<(xor (xor VK8:$src1, VK8:$src2), (v8i1 immAllOnesV)),
2036 (COPY_TO_REGCLASS (KXNORWrr (COPY_TO_REGCLASS VK8:$src1, VK16),
2037 (COPY_TO_REGCLASS VK8:$src2, VK16)), VK8)>;
2039 def : Pat<(xor (xor VK4:$src1, VK4:$src2), (v4i1 immAllOnesV)),
2040 (COPY_TO_REGCLASS (KXNORWrr (COPY_TO_REGCLASS VK4:$src1, VK16),
2041 (COPY_TO_REGCLASS VK4:$src2, VK16)), VK4)>;
2043 def : Pat<(xor (xor VK2:$src1, VK2:$src2), (v2i1 immAllOnesV)),
2044 (COPY_TO_REGCLASS (KXNORWrr (COPY_TO_REGCLASS VK2:$src1, VK16),
2045 (COPY_TO_REGCLASS VK2:$src2, VK16)), VK2)>;
2047 def : Pat<(xor (xor VK1:$src1, VK1:$src2), (i1 1)),
2048 (COPY_TO_REGCLASS (KXNORWrr (COPY_TO_REGCLASS VK1:$src1, VK16),
2049 (COPY_TO_REGCLASS VK1:$src2, VK16)), VK1)>;
2052 multiclass avx512_mask_unpck<bits<8> opc, string OpcodeStr,
2053 RegisterClass KRC> {
2054 let Predicates = [HasAVX512] in
2055 def rr : I<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src1, KRC:$src2),
2056 !strconcat(OpcodeStr,
2057 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
2060 multiclass avx512_mask_unpck_bw<bits<8> opc, string OpcodeStr> {
2061 defm BW : avx512_mask_unpck<opc, !strconcat(OpcodeStr, "bw"), VK16>,
2065 defm KUNPCK : avx512_mask_unpck_bw<0x4b, "kunpck">;
2066 def : Pat<(v16i1 (concat_vectors (v8i1 VK8:$src1), (v8i1 VK8:$src2))),
2067 (KUNPCKBWrr (COPY_TO_REGCLASS VK8:$src2, VK16),
2068 (COPY_TO_REGCLASS VK8:$src1, VK16))>;
2071 multiclass avx512_mask_unpck_int<string IntName, string InstName> {
2072 let Predicates = [HasAVX512] in
2073 def : Pat<(!cast<Intrinsic>("int_x86_avx512_"##IntName##"_bw")
2074 (i16 GR16:$src1), (i16 GR16:$src2)),
2075 (COPY_TO_REGCLASS (!cast<Instruction>(InstName##"BWrr")
2076 (v16i1 (COPY_TO_REGCLASS GR16:$src1, VK16)),
2077 (v16i1 (COPY_TO_REGCLASS GR16:$src2, VK16))), GR16)>;
2079 defm : avx512_mask_unpck_int<"kunpck", "KUNPCK">;
2082 multiclass avx512_mask_testop<bits<8> opc, string OpcodeStr, RegisterClass KRC,
2084 let Predicates = [HasAVX512], Defs = [EFLAGS] in
2085 def rr : I<opc, MRMSrcReg, (outs), (ins KRC:$src1, KRC:$src2),
2086 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
2087 [(set EFLAGS, (OpNode KRC:$src1, KRC:$src2))]>;
2090 multiclass avx512_mask_testop_w<bits<8> opc, string OpcodeStr, SDNode OpNode> {
2091 defm W : avx512_mask_testop<opc, !strconcat(OpcodeStr, "w"), VK16, OpNode>,
2093 let Predicates = [HasDQI] in
2094 defm B : avx512_mask_testop<opc, !strconcat(OpcodeStr, "b"), VK8, OpNode>,
2096 let Predicates = [HasBWI] in {
2097 defm Q : avx512_mask_testop<opc, !strconcat(OpcodeStr, "q"), VK64, OpNode>,
2099 defm D : avx512_mask_testop<opc, !strconcat(OpcodeStr, "d"), VK32, OpNode>,
2104 defm KORTEST : avx512_mask_testop_w<0x98, "kortest", X86kortest>;
2107 multiclass avx512_mask_shiftop<bits<8> opc, string OpcodeStr, RegisterClass KRC,
2109 let Predicates = [HasAVX512] in
2110 def ri : Ii8<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src, u8imm:$imm),
2111 !strconcat(OpcodeStr,
2112 "\t{$imm, $src, $dst|$dst, $src, $imm}"),
2113 [(set KRC:$dst, (OpNode KRC:$src, (i8 imm:$imm)))]>;
2116 multiclass avx512_mask_shiftop_w<bits<8> opc1, bits<8> opc2, string OpcodeStr,
2118 defm W : avx512_mask_shiftop<opc1, !strconcat(OpcodeStr, "w"), VK16, OpNode>,
2120 let Predicates = [HasDQI] in
2121 defm B : avx512_mask_shiftop<opc1, !strconcat(OpcodeStr, "b"), VK8, OpNode>,
2123 let Predicates = [HasBWI] in {
2124 defm Q : avx512_mask_shiftop<opc2, !strconcat(OpcodeStr, "q"), VK64, OpNode>,
2126 let Predicates = [HasDQI] in
2127 defm D : avx512_mask_shiftop<opc2, !strconcat(OpcodeStr, "d"), VK32, OpNode>,
2132 defm KSHIFTL : avx512_mask_shiftop_w<0x32, 0x33, "kshiftl", X86vshli>;
2133 defm KSHIFTR : avx512_mask_shiftop_w<0x30, 0x31, "kshiftr", X86vsrli>;
2135 // Mask setting all 0s or 1s
2136 multiclass avx512_mask_setop<RegisterClass KRC, ValueType VT, PatFrag Val> {
2137 let Predicates = [HasAVX512] in
2138 let isReMaterializable = 1, isAsCheapAsAMove = 1, isPseudo = 1 in
2139 def #NAME# : I<0, Pseudo, (outs KRC:$dst), (ins), "",
2140 [(set KRC:$dst, (VT Val))]>;
2143 multiclass avx512_mask_setop_w<PatFrag Val> {
2144 defm B : avx512_mask_setop<VK8, v8i1, Val>;
2145 defm W : avx512_mask_setop<VK16, v16i1, Val>;
2146 defm D : avx512_mask_setop<VK32, v32i1, Val>;
2147 defm Q : avx512_mask_setop<VK64, v64i1, Val>;
2150 defm KSET0 : avx512_mask_setop_w<immAllZerosV>;
2151 defm KSET1 : avx512_mask_setop_w<immAllOnesV>;
2153 // With AVX-512 only, 8-bit mask is promoted to 16-bit mask.
2154 let Predicates = [HasAVX512] in {
2155 def : Pat<(v8i1 immAllZerosV), (COPY_TO_REGCLASS (KSET0W), VK8)>;
2156 def : Pat<(v8i1 immAllOnesV), (COPY_TO_REGCLASS (KSET1W), VK8)>;
2157 def : Pat<(v4i1 immAllOnesV), (COPY_TO_REGCLASS (KSET1W), VK4)>;
2158 def : Pat<(v2i1 immAllOnesV), (COPY_TO_REGCLASS (KSET1W), VK2)>;
2159 def : Pat<(i1 0), (COPY_TO_REGCLASS (KSET0W), VK1)>;
2160 def : Pat<(i1 1), (COPY_TO_REGCLASS (KSET1W), VK1)>;
2161 def : Pat<(i1 -1), (COPY_TO_REGCLASS (KSET1W), VK1)>;
2163 def : Pat<(v8i1 (extract_subvector (v16i1 VK16:$src), (iPTR 0))),
2164 (v8i1 (COPY_TO_REGCLASS VK16:$src, VK8))>;
2166 def : Pat<(v16i1 (insert_subvector undef, (v8i1 VK8:$src), (iPTR 0))),
2167 (v16i1 (COPY_TO_REGCLASS VK8:$src, VK16))>;
2169 def : Pat<(v8i1 (extract_subvector (v16i1 VK16:$src), (iPTR 8))),
2170 (v8i1 (COPY_TO_REGCLASS (KSHIFTRWri VK16:$src, (i8 8)), VK8))>;
2172 let Predicates = [HasVLX] in {
2173 def : Pat<(v8i1 (insert_subvector undef, (v4i1 VK4:$src), (iPTR 0))),
2174 (v8i1 (COPY_TO_REGCLASS VK4:$src, VK8))>;
2175 def : Pat<(v8i1 (insert_subvector undef, (v2i1 VK2:$src), (iPTR 0))),
2176 (v8i1 (COPY_TO_REGCLASS VK2:$src, VK8))>;
2177 def : Pat<(v4i1 (insert_subvector undef, (v2i1 VK2:$src), (iPTR 0))),
2178 (v4i1 (COPY_TO_REGCLASS VK2:$src, VK4))>;
2179 def : Pat<(v4i1 (extract_subvector (v8i1 VK8:$src), (iPTR 0))),
2180 (v4i1 (COPY_TO_REGCLASS VK8:$src, VK4))>;
2181 def : Pat<(v2i1 (extract_subvector (v8i1 VK8:$src), (iPTR 0))),
2182 (v2i1 (COPY_TO_REGCLASS VK8:$src, VK2))>;
2185 def : Pat<(v8i1 (X86vshli VK8:$src, (i8 imm:$imm))),
2186 (v8i1 (COPY_TO_REGCLASS
2187 (KSHIFTLWri (COPY_TO_REGCLASS VK8:$src, VK16),
2188 (I8Imm $imm)), VK8))>, Requires<[HasAVX512, NoDQI]>;
2190 def : Pat<(v8i1 (X86vsrli VK8:$src, (i8 imm:$imm))),
2191 (v8i1 (COPY_TO_REGCLASS
2192 (KSHIFTRWri (COPY_TO_REGCLASS VK8:$src, VK16),
2193 (I8Imm $imm)), VK8))>, Requires<[HasAVX512, NoDQI]>;
2195 def : Pat<(v4i1 (X86vshli VK4:$src, (i8 imm:$imm))),
2196 (v4i1 (COPY_TO_REGCLASS
2197 (KSHIFTLWri (COPY_TO_REGCLASS VK4:$src, VK16),
2198 (I8Imm $imm)), VK4))>, Requires<[HasAVX512]>;
2200 def : Pat<(v4i1 (X86vsrli VK4:$src, (i8 imm:$imm))),
2201 (v4i1 (COPY_TO_REGCLASS
2202 (KSHIFTRWri (COPY_TO_REGCLASS VK4:$src, VK16),
2203 (I8Imm $imm)), VK4))>, Requires<[HasAVX512]>;
2205 //===----------------------------------------------------------------------===//
2206 // AVX-512 - Aligned and unaligned load and store
2210 multiclass avx512_load<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
2211 PatFrag ld_frag, PatFrag mload,
2212 bit IsReMaterializable = 1> {
2213 let hasSideEffects = 0 in {
2214 def rr : AVX512PI<opc, MRMSrcReg, (outs _.RC:$dst), (ins _.RC:$src),
2215 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), [],
2217 def rrkz : AVX512PI<opc, MRMSrcReg, (outs _.RC:$dst),
2218 (ins _.KRCWM:$mask, _.RC:$src),
2219 !strconcat(OpcodeStr, "\t{$src, ${dst} {${mask}} {z}|",
2220 "${dst} {${mask}} {z}, $src}"), [], _.ExeDomain>,
2223 let canFoldAsLoad = 1, isReMaterializable = IsReMaterializable,
2224 SchedRW = [WriteLoad] in
2225 def rm : AVX512PI<opc, MRMSrcMem, (outs _.RC:$dst), (ins _.MemOp:$src),
2226 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2227 [(set _.RC:$dst, (_.VT (bitconvert (ld_frag addr:$src))))],
2230 let Constraints = "$src0 = $dst" in {
2231 def rrk : AVX512PI<opc, MRMSrcReg, (outs _.RC:$dst),
2232 (ins _.RC:$src0, _.KRCWM:$mask, _.RC:$src1),
2233 !strconcat(OpcodeStr, "\t{$src1, ${dst} {${mask}}|",
2234 "${dst} {${mask}}, $src1}"),
2235 [(set _.RC:$dst, (_.VT (vselect _.KRCWM:$mask,
2237 (_.VT _.RC:$src0))))], _.ExeDomain>,
2239 let mayLoad = 1, SchedRW = [WriteLoad] in
2240 def rmk : AVX512PI<opc, MRMSrcMem, (outs _.RC:$dst),
2241 (ins _.RC:$src0, _.KRCWM:$mask, _.MemOp:$src1),
2242 !strconcat(OpcodeStr, "\t{$src1, ${dst} {${mask}}|",
2243 "${dst} {${mask}}, $src1}"),
2244 [(set _.RC:$dst, (_.VT
2245 (vselect _.KRCWM:$mask,
2246 (_.VT (bitconvert (ld_frag addr:$src1))),
2247 (_.VT _.RC:$src0))))], _.ExeDomain>, EVEX, EVEX_K;
2249 let mayLoad = 1, SchedRW = [WriteLoad] in
2250 def rmkz : AVX512PI<opc, MRMSrcMem, (outs _.RC:$dst),
2251 (ins _.KRCWM:$mask, _.MemOp:$src),
2252 OpcodeStr #"\t{$src, ${dst} {${mask}} {z}|"#
2253 "${dst} {${mask}} {z}, $src}",
2254 [(set _.RC:$dst, (_.VT (vselect _.KRCWM:$mask,
2255 (_.VT (bitconvert (ld_frag addr:$src))), _.ImmAllZerosV)))],
2256 _.ExeDomain>, EVEX, EVEX_KZ;
2258 def : Pat<(_.VT (mload addr:$ptr, _.KRCWM:$mask, undef)),
2259 (!cast<Instruction>(NAME#_.ZSuffix##rmkz) _.KRCWM:$mask, addr:$ptr)>;
2261 def : Pat<(_.VT (mload addr:$ptr, _.KRCWM:$mask, _.ImmAllZerosV)),
2262 (!cast<Instruction>(NAME#_.ZSuffix##rmkz) _.KRCWM:$mask, addr:$ptr)>;
2264 def : Pat<(_.VT (mload addr:$ptr, _.KRCWM:$mask, (_.VT _.RC:$src0))),
2265 (!cast<Instruction>(NAME#_.ZSuffix##rmk) _.RC:$src0,
2266 _.KRCWM:$mask, addr:$ptr)>;
2269 multiclass avx512_alignedload_vl<bits<8> opc, string OpcodeStr,
2270 AVX512VLVectorVTInfo _,
2272 bit IsReMaterializable = 1> {
2273 let Predicates = [prd] in
2274 defm Z : avx512_load<opc, OpcodeStr, _.info512, _.info512.AlignedLdFrag,
2275 masked_load_aligned512, IsReMaterializable>, EVEX_V512;
2277 let Predicates = [prd, HasVLX] in {
2278 defm Z256 : avx512_load<opc, OpcodeStr, _.info256, _.info256.AlignedLdFrag,
2279 masked_load_aligned256, IsReMaterializable>, EVEX_V256;
2280 defm Z128 : avx512_load<opc, OpcodeStr, _.info128, _.info128.AlignedLdFrag,
2281 masked_load_aligned128, IsReMaterializable>, EVEX_V128;
2285 multiclass avx512_load_vl<bits<8> opc, string OpcodeStr,
2286 AVX512VLVectorVTInfo _,
2288 bit IsReMaterializable = 1> {
2289 let Predicates = [prd] in
2290 defm Z : avx512_load<opc, OpcodeStr, _.info512, _.info512.LdFrag,
2291 masked_load_unaligned, IsReMaterializable>, EVEX_V512;
2293 let Predicates = [prd, HasVLX] in {
2294 defm Z256 : avx512_load<opc, OpcodeStr, _.info256, _.info256.LdFrag,
2295 masked_load_unaligned, IsReMaterializable>, EVEX_V256;
2296 defm Z128 : avx512_load<opc, OpcodeStr, _.info128, _.info128.LdFrag,
2297 masked_load_unaligned, IsReMaterializable>, EVEX_V128;
2301 multiclass avx512_store<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
2302 PatFrag st_frag, PatFrag mstore> {
2303 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in {
2304 def rr_alt : AVX512PI<opc, MRMDestReg, (outs _.RC:$dst), (ins _.RC:$src),
2305 OpcodeStr # "\t{$src, $dst|$dst, $src}", [],
2307 let Constraints = "$src1 = $dst" in
2308 def rrk_alt : AVX512PI<opc, MRMDestReg, (outs _.RC:$dst),
2309 (ins _.RC:$src1, _.KRCWM:$mask, _.RC:$src2),
2311 "\t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}",
2312 [], _.ExeDomain>, EVEX, EVEX_K;
2313 def rrkz_alt : AVX512PI<opc, MRMDestReg, (outs _.RC:$dst),
2314 (ins _.KRCWM:$mask, _.RC:$src),
2316 "\t{$src, ${dst} {${mask}} {z}|" #
2317 "${dst} {${mask}} {z}, $src}",
2318 [], _.ExeDomain>, EVEX, EVEX_KZ;
2320 let mayStore = 1 in {
2321 def mr : AVX512PI<opc, MRMDestMem, (outs), (ins _.MemOp:$dst, _.RC:$src),
2322 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2323 [(st_frag (_.VT _.RC:$src), addr:$dst)], _.ExeDomain>, EVEX;
2324 def mrk : AVX512PI<opc, MRMDestMem, (outs),
2325 (ins _.MemOp:$dst, _.KRCWM:$mask, _.RC:$src),
2326 OpcodeStr # "\t{$src, ${dst} {${mask}}|${dst} {${mask}}, $src}",
2327 [], _.ExeDomain>, EVEX, EVEX_K;
2330 def: Pat<(mstore addr:$ptr, _.KRCWM:$mask, (_.VT _.RC:$src)),
2331 (!cast<Instruction>(NAME#_.ZSuffix##mrk) addr:$ptr,
2332 _.KRCWM:$mask, _.RC:$src)>;
2336 multiclass avx512_store_vl< bits<8> opc, string OpcodeStr,
2337 AVX512VLVectorVTInfo _, Predicate prd> {
2338 let Predicates = [prd] in
2339 defm Z : avx512_store<opc, OpcodeStr, _.info512, store,
2340 masked_store_unaligned>, EVEX_V512;
2342 let Predicates = [prd, HasVLX] in {
2343 defm Z256 : avx512_store<opc, OpcodeStr, _.info256, store,
2344 masked_store_unaligned>, EVEX_V256;
2345 defm Z128 : avx512_store<opc, OpcodeStr, _.info128, store,
2346 masked_store_unaligned>, EVEX_V128;
2350 multiclass avx512_alignedstore_vl<bits<8> opc, string OpcodeStr,
2351 AVX512VLVectorVTInfo _, Predicate prd> {
2352 let Predicates = [prd] in
2353 defm Z : avx512_store<opc, OpcodeStr, _.info512, alignedstore512,
2354 masked_store_aligned512>, EVEX_V512;
2356 let Predicates = [prd, HasVLX] in {
2357 defm Z256 : avx512_store<opc, OpcodeStr, _.info256, alignedstore256,
2358 masked_store_aligned256>, EVEX_V256;
2359 defm Z128 : avx512_store<opc, OpcodeStr, _.info128, alignedstore,
2360 masked_store_aligned128>, EVEX_V128;
2364 defm VMOVAPS : avx512_alignedload_vl<0x28, "vmovaps", avx512vl_f32_info,
2366 avx512_alignedstore_vl<0x29, "vmovaps", avx512vl_f32_info,
2367 HasAVX512>, PS, EVEX_CD8<32, CD8VF>;
2369 defm VMOVAPD : avx512_alignedload_vl<0x28, "vmovapd", avx512vl_f64_info,
2371 avx512_alignedstore_vl<0x29, "vmovapd", avx512vl_f64_info,
2372 HasAVX512>, PD, VEX_W, EVEX_CD8<64, CD8VF>;
2374 defm VMOVUPS : avx512_load_vl<0x10, "vmovups", avx512vl_f32_info, HasAVX512>,
2375 avx512_store_vl<0x11, "vmovups", avx512vl_f32_info, HasAVX512>,
2376 PS, EVEX_CD8<32, CD8VF>;
2378 defm VMOVUPD : avx512_load_vl<0x10, "vmovupd", avx512vl_f64_info, HasAVX512, 0>,
2379 avx512_store_vl<0x11, "vmovupd", avx512vl_f64_info, HasAVX512>,
2380 PD, VEX_W, EVEX_CD8<64, CD8VF>;
2382 def: Pat<(v8f64 (int_x86_avx512_mask_loadu_pd_512 addr:$ptr,
2383 (bc_v8f64 (v16i32 immAllZerosV)), GR8:$mask)),
2384 (VMOVUPDZrmkz (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)), addr:$ptr)>;
2386 def: Pat<(v16f32 (int_x86_avx512_mask_loadu_ps_512 addr:$ptr,
2387 (bc_v16f32 (v16i32 immAllZerosV)), GR16:$mask)),
2388 (VMOVUPSZrmkz (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)), addr:$ptr)>;
2390 def: Pat<(v8f64 (int_x86_avx512_mask_load_pd_512 addr:$ptr,
2391 (bc_v8f64 (v16i32 immAllZerosV)), GR8:$mask)),
2392 (VMOVAPDZrmkz (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)), addr:$ptr)>;
2394 def: Pat<(v16f32 (int_x86_avx512_mask_load_ps_512 addr:$ptr,
2395 (bc_v16f32 (v16i32 immAllZerosV)), GR16:$mask)),
2396 (VMOVAPSZrmkz (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)), addr:$ptr)>;
2398 def: Pat<(v8f64 (int_x86_avx512_mask_load_pd_512 addr:$ptr,
2399 (bc_v8f64 (v16i32 immAllZerosV)), (i8 -1))),
2400 (VMOVAPDZrm addr:$ptr)>;
2402 def: Pat<(v16f32 (int_x86_avx512_mask_load_ps_512 addr:$ptr,
2403 (bc_v16f32 (v16i32 immAllZerosV)), (i16 -1))),
2404 (VMOVAPSZrm addr:$ptr)>;
2406 def: Pat<(int_x86_avx512_mask_storeu_ps_512 addr:$ptr, (v16f32 VR512:$src),
2408 (VMOVUPSZmrk addr:$ptr, (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)),
2410 def: Pat<(int_x86_avx512_mask_storeu_pd_512 addr:$ptr, (v8f64 VR512:$src),
2412 (VMOVUPDZmrk addr:$ptr, (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)),
2415 def: Pat<(int_x86_avx512_mask_store_ps_512 addr:$ptr, (v16f32 VR512:$src),
2417 (VMOVAPSZmrk addr:$ptr, (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)),
2419 def: Pat<(int_x86_avx512_mask_store_pd_512 addr:$ptr, (v8f64 VR512:$src),
2421 (VMOVAPDZmrk addr:$ptr, (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)),
2424 let Predicates = [HasAVX512, NoVLX] in {
2425 def: Pat<(masked_store addr:$ptr, VK8WM:$mask, (v8f32 VR256:$src)),
2426 (VMOVUPSZmrk addr:$ptr,
2427 (v16i1 (COPY_TO_REGCLASS VK8WM:$mask, VK16WM)),
2428 (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)), VR256:$src, sub_ymm))>;
2430 def: Pat<(v8f32 (masked_load addr:$ptr, VK8WM:$mask, undef)),
2431 (v8f32 (EXTRACT_SUBREG (v16f32 (VMOVUPSZrmkz
2432 (v16i1 (COPY_TO_REGCLASS VK8WM:$mask, VK16WM)), addr:$ptr)), sub_ymm))>;
2434 def: Pat<(v8f32 (masked_load addr:$ptr, VK8WM:$mask, (v8f32 VR256:$src0))),
2435 (v8f32 (EXTRACT_SUBREG (v16f32 (VMOVUPSZrmk
2436 (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)), VR256:$src0, sub_ymm),
2437 (v16i1 (COPY_TO_REGCLASS VK8WM:$mask, VK16WM)), addr:$ptr)), sub_ymm))>;
2440 defm VMOVDQA32 : avx512_alignedload_vl<0x6F, "vmovdqa32", avx512vl_i32_info,
2442 avx512_alignedstore_vl<0x7F, "vmovdqa32", avx512vl_i32_info,
2443 HasAVX512>, PD, EVEX_CD8<32, CD8VF>;
2445 defm VMOVDQA64 : avx512_alignedload_vl<0x6F, "vmovdqa64", avx512vl_i64_info,
2447 avx512_alignedstore_vl<0x7F, "vmovdqa64", avx512vl_i64_info,
2448 HasAVX512>, PD, VEX_W, EVEX_CD8<64, CD8VF>;
2450 defm VMOVDQU8 : avx512_load_vl<0x6F, "vmovdqu8", avx512vl_i8_info, HasBWI>,
2451 avx512_store_vl<0x7F, "vmovdqu8", avx512vl_i8_info,
2452 HasBWI>, XD, EVEX_CD8<8, CD8VF>;
2454 defm VMOVDQU16 : avx512_load_vl<0x6F, "vmovdqu16", avx512vl_i16_info, HasBWI>,
2455 avx512_store_vl<0x7F, "vmovdqu16", avx512vl_i16_info,
2456 HasBWI>, XD, VEX_W, EVEX_CD8<16, CD8VF>;
2458 defm VMOVDQU32 : avx512_load_vl<0x6F, "vmovdqu32", avx512vl_i32_info, HasAVX512>,
2459 avx512_store_vl<0x7F, "vmovdqu32", avx512vl_i32_info,
2460 HasAVX512>, XS, EVEX_CD8<32, CD8VF>;
2462 defm VMOVDQU64 : avx512_load_vl<0x6F, "vmovdqu64", avx512vl_i64_info, HasAVX512>,
2463 avx512_store_vl<0x7F, "vmovdqu64", avx512vl_i64_info,
2464 HasAVX512>, XS, VEX_W, EVEX_CD8<64, CD8VF>;
2466 def: Pat<(v16i32 (int_x86_avx512_mask_loadu_d_512 addr:$ptr,
2467 (v16i32 immAllZerosV), GR16:$mask)),
2468 (VMOVDQU32Zrmkz (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)), addr:$ptr)>;
2470 def: Pat<(v8i64 (int_x86_avx512_mask_loadu_q_512 addr:$ptr,
2471 (bc_v8i64 (v16i32 immAllZerosV)), GR8:$mask)),
2472 (VMOVDQU64Zrmkz (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)), addr:$ptr)>;
2474 def: Pat<(int_x86_avx512_mask_storeu_d_512 addr:$ptr, (v16i32 VR512:$src),
2476 (VMOVDQU32Zmrk addr:$ptr, (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)),
2478 def: Pat<(int_x86_avx512_mask_storeu_q_512 addr:$ptr, (v8i64 VR512:$src),
2480 (VMOVDQU64Zmrk addr:$ptr, (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)),
2483 let AddedComplexity = 20 in {
2484 def : Pat<(v8i64 (vselect VK8WM:$mask, (v8i64 VR512:$src),
2485 (bc_v8i64 (v16i32 immAllZerosV)))),
2486 (VMOVDQU64Zrrkz VK8WM:$mask, VR512:$src)>;
2488 def : Pat<(v8i64 (vselect VK8WM:$mask, (bc_v8i64 (v16i32 immAllZerosV)),
2489 (v8i64 VR512:$src))),
2490 (VMOVDQU64Zrrkz (COPY_TO_REGCLASS (KNOTWrr (COPY_TO_REGCLASS VK8:$mask, VK16)),
2493 def : Pat<(v16i32 (vselect VK16WM:$mask, (v16i32 VR512:$src),
2494 (v16i32 immAllZerosV))),
2495 (VMOVDQU32Zrrkz VK16WM:$mask, VR512:$src)>;
2497 def : Pat<(v16i32 (vselect VK16WM:$mask, (v16i32 immAllZerosV),
2498 (v16i32 VR512:$src))),
2499 (VMOVDQU32Zrrkz (KNOTWrr VK16WM:$mask), VR512:$src)>;
2502 let Predicates = [HasAVX512, NoVLX] in {
2503 def: Pat<(masked_store addr:$ptr, VK8WM:$mask, (v8i32 VR256:$src)),
2504 (VMOVDQU32Zmrk addr:$ptr,
2505 (v16i1 (COPY_TO_REGCLASS VK8WM:$mask, VK16WM)),
2506 (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)), VR256:$src, sub_ymm))>;
2508 def: Pat<(v8i32 (masked_load addr:$ptr, VK8WM:$mask, undef)),
2509 (v8i32 (EXTRACT_SUBREG (v16i32 (VMOVDQU32Zrmkz
2510 (v16i1 (COPY_TO_REGCLASS VK8WM:$mask, VK16WM)), addr:$ptr)), sub_ymm))>;
2513 // Move Int Doubleword to Packed Double Int
2515 def VMOVDI2PDIZrr : AVX512BI<0x6E, MRMSrcReg, (outs VR128X:$dst), (ins GR32:$src),
2516 "vmovd\t{$src, $dst|$dst, $src}",
2518 (v4i32 (scalar_to_vector GR32:$src)))], IIC_SSE_MOVDQ>,
2520 def VMOVDI2PDIZrm : AVX512BI<0x6E, MRMSrcMem, (outs VR128X:$dst), (ins i32mem:$src),
2521 "vmovd\t{$src, $dst|$dst, $src}",
2523 (v4i32 (scalar_to_vector (loadi32 addr:$src))))],
2524 IIC_SSE_MOVDQ>, EVEX, VEX_LIG, EVEX_CD8<32, CD8VT1>;
2525 def VMOV64toPQIZrr : AVX512BI<0x6E, MRMSrcReg, (outs VR128X:$dst), (ins GR64:$src),
2526 "vmovq\t{$src, $dst|$dst, $src}",
2528 (v2i64 (scalar_to_vector GR64:$src)))],
2529 IIC_SSE_MOVDQ>, EVEX, VEX_W, VEX_LIG;
2530 let isCodeGenOnly = 1 in {
2531 def VMOV64toSDZrr : AVX512BI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
2532 "vmovq\t{$src, $dst|$dst, $src}",
2533 [(set FR64:$dst, (bitconvert GR64:$src))],
2534 IIC_SSE_MOVDQ>, EVEX, VEX_W, Sched<[WriteMove]>;
2535 def VMOVSDto64Zrr : AVX512BI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
2536 "vmovq\t{$src, $dst|$dst, $src}",
2537 [(set GR64:$dst, (bitconvert FR64:$src))],
2538 IIC_SSE_MOVDQ>, EVEX, VEX_W, Sched<[WriteMove]>;
2540 def VMOVSDto64Zmr : AVX512BI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
2541 "vmovq\t{$src, $dst|$dst, $src}",
2542 [(store (i64 (bitconvert FR64:$src)), addr:$dst)],
2543 IIC_SSE_MOVDQ>, EVEX, VEX_W, Sched<[WriteStore]>,
2544 EVEX_CD8<64, CD8VT1>;
2546 // Move Int Doubleword to Single Scalar
2548 let isCodeGenOnly = 1 in {
2549 def VMOVDI2SSZrr : AVX512BI<0x6E, MRMSrcReg, (outs FR32X:$dst), (ins GR32:$src),
2550 "vmovd\t{$src, $dst|$dst, $src}",
2551 [(set FR32X:$dst, (bitconvert GR32:$src))],
2552 IIC_SSE_MOVDQ>, EVEX, VEX_LIG;
2554 def VMOVDI2SSZrm : AVX512BI<0x6E, MRMSrcMem, (outs FR32X:$dst), (ins i32mem:$src),
2555 "vmovd\t{$src, $dst|$dst, $src}",
2556 [(set FR32X:$dst, (bitconvert (loadi32 addr:$src)))],
2557 IIC_SSE_MOVDQ>, EVEX, VEX_LIG, EVEX_CD8<32, CD8VT1>;
2560 // Move doubleword from xmm register to r/m32
2562 def VMOVPDI2DIZrr : AVX512BI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128X:$src),
2563 "vmovd\t{$src, $dst|$dst, $src}",
2564 [(set GR32:$dst, (vector_extract (v4i32 VR128X:$src),
2565 (iPTR 0)))], IIC_SSE_MOVD_ToGP>,
2567 def VMOVPDI2DIZmr : AVX512BI<0x7E, MRMDestMem, (outs),
2568 (ins i32mem:$dst, VR128X:$src),
2569 "vmovd\t{$src, $dst|$dst, $src}",
2570 [(store (i32 (vector_extract (v4i32 VR128X:$src),
2571 (iPTR 0))), addr:$dst)], IIC_SSE_MOVDQ>,
2572 EVEX, VEX_LIG, EVEX_CD8<32, CD8VT1>;
2574 // Move quadword from xmm1 register to r/m64
2576 def VMOVPQIto64Zrr : I<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128X:$src),
2577 "vmovq\t{$src, $dst|$dst, $src}",
2578 [(set GR64:$dst, (extractelt (v2i64 VR128X:$src),
2580 IIC_SSE_MOVD_ToGP>, PD, EVEX, VEX_LIG, VEX_W,
2581 Requires<[HasAVX512, In64BitMode]>;
2583 def VMOVPQIto64Zmr : I<0xD6, MRMDestMem, (outs),
2584 (ins i64mem:$dst, VR128X:$src),
2585 "vmovq\t{$src, $dst|$dst, $src}",
2586 [(store (extractelt (v2i64 VR128X:$src), (iPTR 0)),
2587 addr:$dst)], IIC_SSE_MOVDQ>,
2588 EVEX, PD, VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>,
2589 Sched<[WriteStore]>, Requires<[HasAVX512, In64BitMode]>;
2591 // Move Scalar Single to Double Int
2593 let isCodeGenOnly = 1 in {
2594 def VMOVSS2DIZrr : AVX512BI<0x7E, MRMDestReg, (outs GR32:$dst),
2596 "vmovd\t{$src, $dst|$dst, $src}",
2597 [(set GR32:$dst, (bitconvert FR32X:$src))],
2598 IIC_SSE_MOVD_ToGP>, EVEX, VEX_LIG;
2599 def VMOVSS2DIZmr : AVX512BI<0x7E, MRMDestMem, (outs),
2600 (ins i32mem:$dst, FR32X:$src),
2601 "vmovd\t{$src, $dst|$dst, $src}",
2602 [(store (i32 (bitconvert FR32X:$src)), addr:$dst)],
2603 IIC_SSE_MOVDQ>, EVEX, VEX_LIG, EVEX_CD8<32, CD8VT1>;
2606 // Move Quadword Int to Packed Quadword Int
2608 def VMOVQI2PQIZrm : AVX512BI<0x6E, MRMSrcMem, (outs VR128X:$dst),
2610 "vmovq\t{$src, $dst|$dst, $src}",
2612 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>,
2613 EVEX, VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
2615 //===----------------------------------------------------------------------===//
2616 // AVX-512 MOVSS, MOVSD
2617 //===----------------------------------------------------------------------===//
2619 multiclass avx512_move_scalar <string asm, RegisterClass RC,
2620 SDNode OpNode, ValueType vt,
2621 X86MemOperand x86memop, PatFrag mem_pat> {
2622 let hasSideEffects = 0 in {
2623 def rr : SI<0x10, MRMSrcReg, (outs VR128X:$dst), (ins VR128X:$src1, RC:$src2),
2624 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2625 [(set VR128X:$dst, (vt (OpNode VR128X:$src1,
2626 (scalar_to_vector RC:$src2))))],
2627 IIC_SSE_MOV_S_RR>, EVEX_4V, VEX_LIG;
2628 let Constraints = "$src1 = $dst" in
2629 def rrk : SI<0x10, MRMSrcReg, (outs VR128X:$dst),
2630 (ins VR128X:$src1, VK1WM:$mask, RC:$src2, RC:$src3),
2632 "\t{$src3, $src2, $dst {${mask}}|$dst {${mask}}, $src2, $src3}"),
2633 [], IIC_SSE_MOV_S_RR>, EVEX_4V, VEX_LIG, EVEX_K;
2634 def rm : SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
2635 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
2636 [(set RC:$dst, (mem_pat addr:$src))], IIC_SSE_MOV_S_RM>,
2638 let mayStore = 1 in {
2639 def mr: SI<0x11, MRMDestMem, (outs), (ins x86memop:$dst, RC:$src),
2640 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
2641 [(store RC:$src, addr:$dst)], IIC_SSE_MOV_S_MR>,
2643 def mrk: SI<0x11, MRMDestMem, (outs), (ins x86memop:$dst, VK1WM:$mask, RC:$src),
2644 !strconcat(asm, "\t{$src, $dst {${mask}}|$dst {${mask}}, $src}"),
2645 [], IIC_SSE_MOV_S_MR>,
2646 EVEX, VEX_LIG, EVEX_K;
2648 } //hasSideEffects = 0
2651 let ExeDomain = SSEPackedSingle in
2652 defm VMOVSSZ : avx512_move_scalar<"movss", FR32X, X86Movss, v4f32, f32mem,
2653 loadf32>, XS, EVEX_CD8<32, CD8VT1>;
2655 let ExeDomain = SSEPackedDouble in
2656 defm VMOVSDZ : avx512_move_scalar<"movsd", FR64X, X86Movsd, v2f64, f64mem,
2657 loadf64>, XD, VEX_W, EVEX_CD8<64, CD8VT1>;
2659 def : Pat<(f32 (X86select VK1WM:$mask, (f32 FR32X:$src1), (f32 FR32X:$src2))),
2660 (COPY_TO_REGCLASS (VMOVSSZrrk (COPY_TO_REGCLASS FR32X:$src2, VR128X),
2661 VK1WM:$mask, (f32 (IMPLICIT_DEF)), FR32X:$src1), FR32X)>;
2663 def : Pat<(f64 (X86select VK1WM:$mask, (f64 FR64X:$src1), (f64 FR64X:$src2))),
2664 (COPY_TO_REGCLASS (VMOVSDZrrk (COPY_TO_REGCLASS FR64X:$src2, VR128X),
2665 VK1WM:$mask, (f64 (IMPLICIT_DEF)), FR64X:$src1), FR64X)>;
2667 def : Pat<(int_x86_avx512_mask_store_ss addr:$dst, VR128X:$src, GR8:$mask),
2668 (VMOVSSZmrk addr:$dst, (i1 (COPY_TO_REGCLASS GR8:$mask, VK1WM)),
2669 (COPY_TO_REGCLASS VR128X:$src, FR32X))>;
2671 // For the disassembler
2672 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in {
2673 def VMOVSSZrr_REV : SI<0x11, MRMDestReg, (outs VR128X:$dst),
2674 (ins VR128X:$src1, FR32X:$src2),
2675 "movss\t{$src2, $src1, $dst|$dst, $src1, $src2}", [],
2677 XS, EVEX_4V, VEX_LIG;
2678 def VMOVSDZrr_REV : SI<0x11, MRMDestReg, (outs VR128X:$dst),
2679 (ins VR128X:$src1, FR64X:$src2),
2680 "movsd\t{$src2, $src1, $dst|$dst, $src1, $src2}", [],
2682 XD, EVEX_4V, VEX_LIG, VEX_W;
2685 let Predicates = [HasAVX512] in {
2686 let AddedComplexity = 15 in {
2687 // Move scalar to XMM zero-extended, zeroing a VR128X then do a
2688 // MOVS{S,D} to the lower bits.
2689 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32X:$src)))),
2690 (VMOVSSZrr (v4f32 (V_SET0)), FR32X:$src)>;
2691 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128X:$src))),
2692 (VMOVSSZrr (v4f32 (V_SET0)), (COPY_TO_REGCLASS VR128X:$src, FR32X))>;
2693 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128X:$src))),
2694 (VMOVSSZrr (v4i32 (V_SET0)), (COPY_TO_REGCLASS VR128X:$src, FR32X))>;
2695 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64X:$src)))),
2696 (VMOVSDZrr (v2f64 (V_SET0)), FR64X:$src)>;
2698 // Move low f32 and clear high bits.
2699 def : Pat<(v8f32 (X86vzmovl (v8f32 VR256X:$src))),
2700 (SUBREG_TO_REG (i32 0),
2701 (VMOVSSZrr (v4f32 (V_SET0)),
2702 (EXTRACT_SUBREG (v8f32 VR256X:$src), sub_xmm)), sub_xmm)>;
2703 def : Pat<(v8i32 (X86vzmovl (v8i32 VR256X:$src))),
2704 (SUBREG_TO_REG (i32 0),
2705 (VMOVSSZrr (v4i32 (V_SET0)),
2706 (EXTRACT_SUBREG (v8i32 VR256X:$src), sub_xmm)), sub_xmm)>;
2709 let AddedComplexity = 20 in {
2710 // MOVSSrm zeros the high parts of the register; represent this
2711 // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
2712 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
2713 (COPY_TO_REGCLASS (VMOVSSZrm addr:$src), VR128X)>;
2714 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
2715 (COPY_TO_REGCLASS (VMOVSSZrm addr:$src), VR128X)>;
2716 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
2717 (COPY_TO_REGCLASS (VMOVSSZrm addr:$src), VR128X)>;
2719 // MOVSDrm zeros the high parts of the register; represent this
2720 // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
2721 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
2722 (COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>;
2723 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
2724 (COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>;
2725 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
2726 (COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>;
2727 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
2728 (COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>;
2729 def : Pat<(v2f64 (X86vzload addr:$src)),
2730 (COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>;
2732 // Represent the same patterns above but in the form they appear for
2734 def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
2735 (v4i32 (scalar_to_vector (loadi32 addr:$src))), (iPTR 0)))),
2736 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrm addr:$src), sub_xmm)>;
2737 def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
2738 (v4f32 (scalar_to_vector (loadf32 addr:$src))), (iPTR 0)))),
2739 (SUBREG_TO_REG (i32 0), (VMOVSSZrm addr:$src), sub_xmm)>;
2740 def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,
2741 (v2f64 (scalar_to_vector (loadf64 addr:$src))), (iPTR 0)))),
2742 (SUBREG_TO_REG (i32 0), (VMOVSDZrm addr:$src), sub_xmm)>;
2744 def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
2745 (v4f32 (scalar_to_vector FR32X:$src)), (iPTR 0)))),
2746 (SUBREG_TO_REG (i32 0), (v4f32 (VMOVSSZrr (v4f32 (V_SET0)),
2747 FR32X:$src)), sub_xmm)>;
2748 def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,
2749 (v2f64 (scalar_to_vector FR64X:$src)), (iPTR 0)))),
2750 (SUBREG_TO_REG (i64 0), (v2f64 (VMOVSDZrr (v2f64 (V_SET0)),
2751 FR64X:$src)), sub_xmm)>;
2752 def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
2753 (v2i64 (scalar_to_vector (loadi64 addr:$src))), (iPTR 0)))),
2754 (SUBREG_TO_REG (i64 0), (VMOVQI2PQIZrm addr:$src), sub_xmm)>;
2756 // Move low f64 and clear high bits.
2757 def : Pat<(v4f64 (X86vzmovl (v4f64 VR256X:$src))),
2758 (SUBREG_TO_REG (i32 0),
2759 (VMOVSDZrr (v2f64 (V_SET0)),
2760 (EXTRACT_SUBREG (v4f64 VR256X:$src), sub_xmm)), sub_xmm)>;
2762 def : Pat<(v4i64 (X86vzmovl (v4i64 VR256X:$src))),
2763 (SUBREG_TO_REG (i32 0), (VMOVSDZrr (v2i64 (V_SET0)),
2764 (EXTRACT_SUBREG (v4i64 VR256X:$src), sub_xmm)), sub_xmm)>;
2766 // Extract and store.
2767 def : Pat<(store (f32 (vector_extract (v4f32 VR128X:$src), (iPTR 0))),
2769 (VMOVSSZmr addr:$dst, (COPY_TO_REGCLASS (v4f32 VR128X:$src), FR32X))>;
2770 def : Pat<(store (f64 (vector_extract (v2f64 VR128X:$src), (iPTR 0))),
2772 (VMOVSDZmr addr:$dst, (COPY_TO_REGCLASS (v2f64 VR128X:$src), FR64X))>;
2774 // Shuffle with VMOVSS
2775 def : Pat<(v4i32 (X86Movss VR128X:$src1, VR128X:$src2)),
2776 (VMOVSSZrr (v4i32 VR128X:$src1),
2777 (COPY_TO_REGCLASS (v4i32 VR128X:$src2), FR32X))>;
2778 def : Pat<(v4f32 (X86Movss VR128X:$src1, VR128X:$src2)),
2779 (VMOVSSZrr (v4f32 VR128X:$src1),
2780 (COPY_TO_REGCLASS (v4f32 VR128X:$src2), FR32X))>;
2783 def : Pat<(v8i32 (X86Movss VR256X:$src1, VR256X:$src2)),
2784 (SUBREG_TO_REG (i32 0),
2785 (VMOVSSZrr (EXTRACT_SUBREG (v8i32 VR256X:$src1), sub_xmm),
2786 (EXTRACT_SUBREG (v8i32 VR256X:$src2), sub_xmm)),
2788 def : Pat<(v8f32 (X86Movss VR256X:$src1, VR256X:$src2)),
2789 (SUBREG_TO_REG (i32 0),
2790 (VMOVSSZrr (EXTRACT_SUBREG (v8f32 VR256X:$src1), sub_xmm),
2791 (EXTRACT_SUBREG (v8f32 VR256X:$src2), sub_xmm)),
2794 // Shuffle with VMOVSD
2795 def : Pat<(v2i64 (X86Movsd VR128X:$src1, VR128X:$src2)),
2796 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
2797 def : Pat<(v2f64 (X86Movsd VR128X:$src1, VR128X:$src2)),
2798 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
2799 def : Pat<(v4f32 (X86Movsd VR128X:$src1, VR128X:$src2)),
2800 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
2801 def : Pat<(v4i32 (X86Movsd VR128X:$src1, VR128X:$src2)),
2802 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
2805 def : Pat<(v4i64 (X86Movsd VR256X:$src1, VR256X:$src2)),
2806 (SUBREG_TO_REG (i32 0),
2807 (VMOVSDZrr (EXTRACT_SUBREG (v4i64 VR256X:$src1), sub_xmm),
2808 (EXTRACT_SUBREG (v4i64 VR256X:$src2), sub_xmm)),
2810 def : Pat<(v4f64 (X86Movsd VR256X:$src1, VR256X:$src2)),
2811 (SUBREG_TO_REG (i32 0),
2812 (VMOVSDZrr (EXTRACT_SUBREG (v4f64 VR256X:$src1), sub_xmm),
2813 (EXTRACT_SUBREG (v4f64 VR256X:$src2), sub_xmm)),
2816 def : Pat<(v2f64 (X86Movlpd VR128X:$src1, VR128X:$src2)),
2817 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
2818 def : Pat<(v2i64 (X86Movlpd VR128X:$src1, VR128X:$src2)),
2819 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
2820 def : Pat<(v4f32 (X86Movlps VR128X:$src1, VR128X:$src2)),
2821 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
2822 def : Pat<(v4i32 (X86Movlps VR128X:$src1, VR128X:$src2)),
2823 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
2826 let AddedComplexity = 15 in
2827 def VMOVZPQILo2PQIZrr : AVX512XSI<0x7E, MRMSrcReg, (outs VR128X:$dst),
2829 "vmovq\t{$src, $dst|$dst, $src}",
2830 [(set VR128X:$dst, (v2i64 (X86vzmovl
2831 (v2i64 VR128X:$src))))],
2832 IIC_SSE_MOVQ_RR>, EVEX, VEX_W;
2834 let AddedComplexity = 20 in
2835 def VMOVZPQILo2PQIZrm : AVX512XSI<0x7E, MRMSrcMem, (outs VR128X:$dst),
2837 "vmovq\t{$src, $dst|$dst, $src}",
2838 [(set VR128X:$dst, (v2i64 (X86vzmovl
2839 (loadv2i64 addr:$src))))],
2840 IIC_SSE_MOVDQ>, EVEX, VEX_W,
2841 EVEX_CD8<8, CD8VT8>;
2843 let Predicates = [HasAVX512] in {
2844 // AVX 128-bit movd/movq instruction write zeros in the high 128-bit part.
2845 let AddedComplexity = 20 in {
2846 def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector (loadi32 addr:$src))))),
2847 (VMOVDI2PDIZrm addr:$src)>;
2848 def : Pat<(v2i64 (X86vzmovl (v2i64 (scalar_to_vector GR64:$src)))),
2849 (VMOV64toPQIZrr GR64:$src)>;
2850 def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector GR32:$src)))),
2851 (VMOVDI2PDIZrr GR32:$src)>;
2853 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
2854 (VMOVDI2PDIZrm addr:$src)>;
2855 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
2856 (VMOVDI2PDIZrm addr:$src)>;
2857 def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
2858 (VMOVZPQILo2PQIZrm addr:$src)>;
2859 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128X:$src))),
2860 (VMOVZPQILo2PQIZrr VR128X:$src)>;
2861 def : Pat<(v2i64 (X86vzload addr:$src)),
2862 (VMOVZPQILo2PQIZrm addr:$src)>;
2865 // Use regular 128-bit instructions to match 256-bit scalar_to_vec+zext.
2866 def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
2867 (v4i32 (scalar_to_vector GR32:$src)),(iPTR 0)))),
2868 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrr GR32:$src), sub_xmm)>;
2869 def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
2870 (v2i64 (scalar_to_vector GR64:$src)),(iPTR 0)))),
2871 (SUBREG_TO_REG (i64 0), (VMOV64toPQIZrr GR64:$src), sub_xmm)>;
2874 def : Pat<(v16i32 (X86Vinsert (v16i32 immAllZerosV), GR32:$src2, (iPTR 0))),
2875 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrr GR32:$src2), sub_xmm)>;
2877 def : Pat<(v8i64 (X86Vinsert (bc_v8i64 (v16i32 immAllZerosV)), GR64:$src2, (iPTR 0))),
2878 (SUBREG_TO_REG (i32 0), (VMOV64toPQIZrr GR64:$src2), sub_xmm)>;
2880 def : Pat<(v16i32 (X86Vinsert undef, GR32:$src2, (iPTR 0))),
2881 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrr GR32:$src2), sub_xmm)>;
2883 def : Pat<(v8i64 (X86Vinsert undef, GR64:$src2, (iPTR 0))),
2884 (SUBREG_TO_REG (i32 0), (VMOV64toPQIZrr GR64:$src2), sub_xmm)>;
2886 //===----------------------------------------------------------------------===//
2887 // AVX-512 - Non-temporals
2888 //===----------------------------------------------------------------------===//
2889 let SchedRW = [WriteLoad] in {
2890 def VMOVNTDQAZrm : AVX512PI<0x2A, MRMSrcMem, (outs VR512:$dst),
2891 (ins i512mem:$src), "vmovntdqa\t{$src, $dst|$dst, $src}",
2892 [(set VR512:$dst, (int_x86_avx512_movntdqa addr:$src))],
2893 SSEPackedInt>, EVEX, T8PD, EVEX_V512,
2894 EVEX_CD8<64, CD8VF>;
2896 let Predicates = [HasAVX512, HasVLX] in {
2897 def VMOVNTDQAZ256rm : AVX512PI<0x2A, MRMSrcMem, (outs VR256X:$dst),
2899 "vmovntdqa\t{$src, $dst|$dst, $src}", [],
2900 SSEPackedInt>, EVEX, T8PD, EVEX_V256,
2901 EVEX_CD8<64, CD8VF>;
2903 def VMOVNTDQAZ128rm : AVX512PI<0x2A, MRMSrcMem, (outs VR128X:$dst),
2905 "vmovntdqa\t{$src, $dst|$dst, $src}", [],
2906 SSEPackedInt>, EVEX, T8PD, EVEX_V128,
2907 EVEX_CD8<64, CD8VF>;
2911 multiclass avx512_movnt<bits<8> opc, string OpcodeStr, PatFrag st_frag,
2912 ValueType OpVT, RegisterClass RC, X86MemOperand memop,
2913 Domain d, InstrItinClass itin = IIC_SSE_MOVNT> {
2914 let SchedRW = [WriteStore], mayStore = 1,
2915 AddedComplexity = 400 in
2916 def mr : AVX512PI<opc, MRMDestMem, (outs), (ins memop:$dst, RC:$src),
2917 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2918 [(st_frag (OpVT RC:$src), addr:$dst)], d, itin>, EVEX;
2921 multiclass avx512_movnt_vl<bits<8> opc, string OpcodeStr, PatFrag st_frag,
2922 string elty, string elsz, string vsz512,
2923 string vsz256, string vsz128, Domain d,
2924 Predicate prd, InstrItinClass itin = IIC_SSE_MOVNT> {
2925 let Predicates = [prd] in
2926 defm Z : avx512_movnt<opc, OpcodeStr, st_frag,
2927 !cast<ValueType>("v"##vsz512##elty##elsz), VR512,
2928 !cast<X86MemOperand>(elty##"512mem"), d, itin>,
2931 let Predicates = [prd, HasVLX] in {
2932 defm Z256 : avx512_movnt<opc, OpcodeStr, st_frag,
2933 !cast<ValueType>("v"##vsz256##elty##elsz), VR256X,
2934 !cast<X86MemOperand>(elty##"256mem"), d, itin>,
2937 defm Z128 : avx512_movnt<opc, OpcodeStr, st_frag,
2938 !cast<ValueType>("v"##vsz128##elty##elsz), VR128X,
2939 !cast<X86MemOperand>(elty##"128mem"), d, itin>,
2944 defm VMOVNTDQ : avx512_movnt_vl<0xE7, "vmovntdq", alignednontemporalstore,
2945 "i", "64", "8", "4", "2", SSEPackedInt,
2946 HasAVX512>, PD, EVEX_CD8<64, CD8VF>;
2948 defm VMOVNTPD : avx512_movnt_vl<0x2B, "vmovntpd", alignednontemporalstore,
2949 "f", "64", "8", "4", "2", SSEPackedDouble,
2950 HasAVX512>, PD, VEX_W, EVEX_CD8<64, CD8VF>;
2952 defm VMOVNTPS : avx512_movnt_vl<0x2B, "vmovntps", alignednontemporalstore,
2953 "f", "32", "16", "8", "4", SSEPackedSingle,
2954 HasAVX512>, PS, EVEX_CD8<32, CD8VF>;
2956 //===----------------------------------------------------------------------===//
2957 // AVX-512 - Integer arithmetic
2959 multiclass avx512_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
2960 X86VectorVTInfo _, OpndItins itins,
2961 bit IsCommutable = 0> {
2962 defm rr : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
2963 (ins _.RC:$src1, _.RC:$src2), OpcodeStr##_.Suffix,
2964 "$src2, $src1", "$src1, $src2",
2965 (_.VT (OpNode _.RC:$src1, _.RC:$src2)),
2966 "", itins.rr, IsCommutable>,
2967 AVX512BIBase, EVEX_4V;
2970 defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
2971 (ins _.RC:$src1, _.MemOp:$src2), OpcodeStr##_.Suffix,
2972 "$src2, $src1", "$src1, $src2",
2973 (_.VT (OpNode _.RC:$src1,
2974 (bitconvert (_.LdFrag addr:$src2)))),
2976 AVX512BIBase, EVEX_4V;
2979 multiclass avx512_binop_rmb<bits<8> opc, string OpcodeStr, SDNode OpNode,
2980 X86VectorVTInfo _, OpndItins itins,
2981 bit IsCommutable = 0> :
2982 avx512_binop_rm<opc, OpcodeStr, OpNode, _, itins, IsCommutable> {
2984 defm rmb : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
2985 (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr##_.Suffix,
2986 "${src2}"##_.BroadcastStr##", $src1",
2987 "$src1, ${src2}"##_.BroadcastStr,
2988 (_.VT (OpNode _.RC:$src1,
2990 (_.ScalarLdFrag addr:$src2)))),
2992 AVX512BIBase, EVEX_4V, EVEX_B;
2995 multiclass avx512_binop_rm_vl<bits<8> opc, string OpcodeStr, SDNode OpNode,
2996 AVX512VLVectorVTInfo VTInfo, OpndItins itins,
2997 Predicate prd, bit IsCommutable = 0> {
2998 let Predicates = [prd] in
2999 defm Z : avx512_binop_rm<opc, OpcodeStr, OpNode, VTInfo.info512, itins,
3000 IsCommutable>, EVEX_V512;
3002 let Predicates = [prd, HasVLX] in {
3003 defm Z256 : avx512_binop_rm<opc, OpcodeStr, OpNode, VTInfo.info256, itins,
3004 IsCommutable>, EVEX_V256;
3005 defm Z128 : avx512_binop_rm<opc, OpcodeStr, OpNode, VTInfo.info128, itins,
3006 IsCommutable>, EVEX_V128;
3010 multiclass avx512_binop_rmb_vl<bits<8> opc, string OpcodeStr, SDNode OpNode,
3011 AVX512VLVectorVTInfo VTInfo, OpndItins itins,
3012 Predicate prd, bit IsCommutable = 0> {
3013 let Predicates = [prd] in
3014 defm Z : avx512_binop_rmb<opc, OpcodeStr, OpNode, VTInfo.info512, itins,
3015 IsCommutable>, EVEX_V512;
3017 let Predicates = [prd, HasVLX] in {
3018 defm Z256 : avx512_binop_rmb<opc, OpcodeStr, OpNode, VTInfo.info256, itins,
3019 IsCommutable>, EVEX_V256;
3020 defm Z128 : avx512_binop_rmb<opc, OpcodeStr, OpNode, VTInfo.info128, itins,
3021 IsCommutable>, EVEX_V128;
3025 multiclass avx512_binop_rm_vl_q<bits<8> opc, string OpcodeStr, SDNode OpNode,
3026 OpndItins itins, Predicate prd,
3027 bit IsCommutable = 0> {
3028 defm NAME : avx512_binop_rmb_vl<opc, OpcodeStr, OpNode, avx512vl_i64_info,
3029 itins, prd, IsCommutable>,
3030 VEX_W, EVEX_CD8<64, CD8VF>;
3033 multiclass avx512_binop_rm_vl_d<bits<8> opc, string OpcodeStr, SDNode OpNode,
3034 OpndItins itins, Predicate prd,
3035 bit IsCommutable = 0> {
3036 defm NAME : avx512_binop_rmb_vl<opc, OpcodeStr, OpNode, avx512vl_i32_info,
3037 itins, prd, IsCommutable>, EVEX_CD8<32, CD8VF>;
3040 multiclass avx512_binop_rm_vl_w<bits<8> opc, string OpcodeStr, SDNode OpNode,
3041 OpndItins itins, Predicate prd,
3042 bit IsCommutable = 0> {
3043 defm NAME : avx512_binop_rm_vl<opc, OpcodeStr, OpNode, avx512vl_i16_info,
3044 itins, prd, IsCommutable>, EVEX_CD8<16, CD8VF>;
3047 multiclass avx512_binop_rm_vl_b<bits<8> opc, string OpcodeStr, SDNode OpNode,
3048 OpndItins itins, Predicate prd,
3049 bit IsCommutable = 0> {
3050 defm NAME : avx512_binop_rm_vl<opc, OpcodeStr, OpNode, avx512vl_i8_info,
3051 itins, prd, IsCommutable>, EVEX_CD8<8, CD8VF>;
3054 multiclass avx512_binop_rm_vl_dq<bits<8> opc_d, bits<8> opc_q, string OpcodeStr,
3055 SDNode OpNode, OpndItins itins, Predicate prd,
3056 bit IsCommutable = 0> {
3057 defm Q : avx512_binop_rm_vl_q<opc_q, OpcodeStr, OpNode, itins, prd,
3060 defm D : avx512_binop_rm_vl_d<opc_d, OpcodeStr, OpNode, itins, prd,
3064 multiclass avx512_binop_rm_vl_bw<bits<8> opc_b, bits<8> opc_w, string OpcodeStr,
3065 SDNode OpNode, OpndItins itins, Predicate prd,
3066 bit IsCommutable = 0> {
3067 defm W : avx512_binop_rm_vl_w<opc_w, OpcodeStr, OpNode, itins, prd,
3070 defm B : avx512_binop_rm_vl_b<opc_b, OpcodeStr, OpNode, itins, prd,
3074 multiclass avx512_binop_rm_vl_all<bits<8> opc_b, bits<8> opc_w,
3075 bits<8> opc_d, bits<8> opc_q,
3076 string OpcodeStr, SDNode OpNode,
3077 OpndItins itins, bit IsCommutable = 0> {
3078 defm NAME : avx512_binop_rm_vl_dq<opc_d, opc_q, OpcodeStr, OpNode,
3079 itins, HasAVX512, IsCommutable>,
3080 avx512_binop_rm_vl_bw<opc_b, opc_w, OpcodeStr, OpNode,
3081 itins, HasBWI, IsCommutable>;
3084 multiclass avx512_binop_rm2<bits<8> opc, string OpcodeStr, OpndItins itins,
3085 SDNode OpNode,X86VectorVTInfo _Src,
3086 X86VectorVTInfo _Dst, bit IsCommutable = 0> {
3087 defm rr : AVX512_maskable<opc, MRMSrcReg, _Dst, (outs _Dst.RC:$dst),
3088 (ins _Src.RC:$src1, _Src.RC:$src2), OpcodeStr,
3089 "$src2, $src1","$src1, $src2",
3091 (_Src.VT _Src.RC:$src1),
3092 (_Src.VT _Src.RC:$src2))),
3093 "",itins.rr, IsCommutable>,
3094 AVX512BIBase, EVEX_4V;
3095 let mayLoad = 1 in {
3096 defm rm : AVX512_maskable<opc, MRMSrcMem, _Dst, (outs _Dst.RC:$dst),
3097 (ins _Src.RC:$src1, _Src.MemOp:$src2), OpcodeStr,
3098 "$src2, $src1", "$src1, $src2",
3099 (_Dst.VT (OpNode (_Src.VT _Src.RC:$src1),
3100 (bitconvert (_Src.LdFrag addr:$src2)))),
3102 AVX512BIBase, EVEX_4V;
3104 defm rmb : AVX512_maskable<opc, MRMSrcMem, _Dst, (outs _Dst.RC:$dst),
3105 (ins _Src.RC:$src1, _Dst.ScalarMemOp:$src2),
3107 "${src2}"##_Dst.BroadcastStr##", $src1",
3108 "$src1, ${src2}"##_Dst.BroadcastStr,
3109 (_Dst.VT (OpNode (_Src.VT _Src.RC:$src1), (bitconvert
3110 (_Dst.VT (X86VBroadcast
3111 (_Dst.ScalarLdFrag addr:$src2)))))),
3113 AVX512BIBase, EVEX_4V, EVEX_B;
3117 defm VPADD : avx512_binop_rm_vl_all<0xFC, 0xFD, 0xFE, 0xD4, "vpadd", add,
3118 SSE_INTALU_ITINS_P, 1>;
3119 defm VPSUB : avx512_binop_rm_vl_all<0xF8, 0xF9, 0xFA, 0xFB, "vpsub", sub,
3120 SSE_INTALU_ITINS_P, 0>;
3121 defm VPADDS : avx512_binop_rm_vl_bw<0xEC, 0xED, "vpadds", X86adds,
3122 SSE_INTALU_ITINS_P, HasBWI, 1>;
3123 defm VPSUBS : avx512_binop_rm_vl_bw<0xE8, 0xE9, "vpsubs", X86subs,
3124 SSE_INTALU_ITINS_P, HasBWI, 0>;
3125 defm VPADDUS : avx512_binop_rm_vl_bw<0xDC, 0xDD, "vpaddus", X86addus,
3126 SSE_INTALU_ITINS_P, HasBWI, 1>;
3127 defm VPSUBUS : avx512_binop_rm_vl_bw<0xD8, 0xD9, "vpsubus", X86subus,
3128 SSE_INTALU_ITINS_P, HasBWI, 0>;
3129 defm VPMULLD : avx512_binop_rm_vl_d<0x40, "vpmull", mul,
3130 SSE_INTALU_ITINS_P, HasAVX512, 1>, T8PD;
3131 defm VPMULLW : avx512_binop_rm_vl_w<0xD5, "vpmull", mul,
3132 SSE_INTALU_ITINS_P, HasBWI, 1>;
3133 defm VPMULLQ : avx512_binop_rm_vl_q<0x40, "vpmull", mul,
3134 SSE_INTALU_ITINS_P, HasDQI, 1>, T8PD;
3137 multiclass avx512_binop_all<bits<8> opc, string OpcodeStr, OpndItins itins,
3138 SDNode OpNode, bit IsCommutable = 0> {
3140 defm NAME#Z : avx512_binop_rm2<opc, OpcodeStr, itins, OpNode,
3141 v16i32_info, v8i64_info, IsCommutable>,
3142 EVEX_V512, EVEX_CD8<64, CD8VF>, VEX_W;
3143 let Predicates = [HasVLX] in {
3144 defm NAME#Z256 : avx512_binop_rm2<opc, OpcodeStr, itins, OpNode,
3145 v8i32x_info, v4i64x_info, IsCommutable>,
3146 EVEX_V256, EVEX_CD8<64, CD8VF>, VEX_W;
3147 defm NAME#Z128 : avx512_binop_rm2<opc, OpcodeStr, itins, OpNode,
3148 v4i32x_info, v2i64x_info, IsCommutable>,
3149 EVEX_V128, EVEX_CD8<64, CD8VF>, VEX_W;
3153 defm VPMULDQ : avx512_binop_all<0x28, "vpmuldq", SSE_INTALU_ITINS_P,
3155 defm VPMULUDQ : avx512_binop_all<0xF4, "vpmuludq", SSE_INTMUL_ITINS_P,
3158 multiclass avx512_packs_rmb<bits<8> opc, string OpcodeStr, SDNode OpNode,
3159 X86VectorVTInfo _Src, X86VectorVTInfo _Dst> {
3160 let mayLoad = 1 in {
3161 defm rmb : AVX512_maskable<opc, MRMSrcMem, _Dst, (outs _Dst.RC:$dst),
3162 (ins _Src.RC:$src1, _Src.ScalarMemOp:$src2),
3164 "${src2}"##_Src.BroadcastStr##", $src1",
3165 "$src1, ${src2}"##_Src.BroadcastStr,
3166 (_Dst.VT (OpNode (_Src.VT _Src.RC:$src1), (bitconvert
3167 (_Src.VT (X86VBroadcast
3168 (_Src.ScalarLdFrag addr:$src2)))))),
3170 EVEX_4V, EVEX_B, EVEX_CD8<_Src.EltSize, CD8VF>;
3174 multiclass avx512_packs_rm<bits<8> opc, string OpcodeStr,
3175 SDNode OpNode,X86VectorVTInfo _Src,
3176 X86VectorVTInfo _Dst> {
3177 defm rr : AVX512_maskable<opc, MRMSrcReg, _Dst, (outs _Dst.RC:$dst),
3178 (ins _Src.RC:$src1, _Src.RC:$src2), OpcodeStr,
3179 "$src2, $src1","$src1, $src2",
3181 (_Src.VT _Src.RC:$src1),
3182 (_Src.VT _Src.RC:$src2))),
3183 "">, EVEX_CD8<_Src.EltSize, CD8VF>, EVEX_4V;
3184 let mayLoad = 1 in {
3185 defm rm : AVX512_maskable<opc, MRMSrcMem, _Dst, (outs _Dst.RC:$dst),
3186 (ins _Src.RC:$src1, _Src.MemOp:$src2), OpcodeStr,
3187 "$src2, $src1", "$src1, $src2",
3188 (_Dst.VT (OpNode (_Src.VT _Src.RC:$src1),
3189 (bitconvert (_Src.LdFrag addr:$src2)))),
3190 "">, EVEX_4V, EVEX_CD8<_Src.EltSize, CD8VF>;
3194 multiclass avx512_packs_all_i32_i16<bits<8> opc, string OpcodeStr,
3196 defm NAME#Z : avx512_packs_rm<opc, OpcodeStr, OpNode, v16i32_info,
3198 avx512_packs_rmb<opc, OpcodeStr, OpNode, v16i32_info,
3199 v32i16_info>, EVEX_V512;
3200 let Predicates = [HasVLX] in {
3201 defm NAME#Z256 : avx512_packs_rm<opc, OpcodeStr, OpNode, v8i32x_info,
3203 avx512_packs_rmb<opc, OpcodeStr, OpNode, v8i32x_info,
3204 v16i16x_info>, EVEX_V256;
3205 defm NAME#Z128 : avx512_packs_rm<opc, OpcodeStr, OpNode, v4i32x_info,
3207 avx512_packs_rmb<opc, OpcodeStr, OpNode, v4i32x_info,
3208 v8i16x_info>, EVEX_V128;
3211 multiclass avx512_packs_all_i16_i8<bits<8> opc, string OpcodeStr,
3213 defm NAME#Z : avx512_packs_rm<opc, OpcodeStr, OpNode, v32i16_info,
3214 v64i8_info>, EVEX_V512;
3215 let Predicates = [HasVLX] in {
3216 defm NAME#Z256 : avx512_packs_rm<opc, OpcodeStr, OpNode, v16i16x_info,
3217 v32i8x_info>, EVEX_V256;
3218 defm NAME#Z128 : avx512_packs_rm<opc, OpcodeStr, OpNode, v8i16x_info,
3219 v16i8x_info>, EVEX_V128;
3222 let Predicates = [HasBWI] in {
3223 defm VPACKSSDW : avx512_packs_all_i32_i16<0x6B, "vpackssdw", X86Packss>, PD;
3224 defm VPACKUSDW : avx512_packs_all_i32_i16<0x2b, "vpackusdw", X86Packus>, T8PD;
3225 defm VPACKSSWB : avx512_packs_all_i16_i8 <0x63, "vpacksswb", X86Packss>, AVX512BIBase, VEX_W;
3226 defm VPACKUSWB : avx512_packs_all_i16_i8 <0x67, "vpackuswb", X86Packus>, AVX512BIBase, VEX_W;
3229 defm VPMAXSB : avx512_binop_rm_vl_b<0x3C, "vpmaxs", X86smax,
3230 SSE_INTALU_ITINS_P, HasBWI, 1>, T8PD;
3231 defm VPMAXSW : avx512_binop_rm_vl_w<0xEE, "vpmaxs", X86smax,
3232 SSE_INTALU_ITINS_P, HasBWI, 1>;
3233 defm VPMAXS : avx512_binop_rm_vl_dq<0x3D, 0x3D, "vpmaxs", X86smax,
3234 SSE_INTALU_ITINS_P, HasAVX512, 1>, T8PD;
3236 defm VPMAXUB : avx512_binop_rm_vl_b<0xDE, "vpmaxu", X86umax,
3237 SSE_INTALU_ITINS_P, HasBWI, 1>;
3238 defm VPMAXUW : avx512_binop_rm_vl_w<0x3E, "vpmaxu", X86umax,
3239 SSE_INTALU_ITINS_P, HasBWI, 1>, T8PD;
3240 defm VPMAXU : avx512_binop_rm_vl_dq<0x3F, 0x3F, "vpmaxu", X86umax,
3241 SSE_INTALU_ITINS_P, HasAVX512, 1>, T8PD;
3243 defm VPMINSB : avx512_binop_rm_vl_b<0x38, "vpmins", X86smin,
3244 SSE_INTALU_ITINS_P, HasBWI, 1>, T8PD;
3245 defm VPMINSW : avx512_binop_rm_vl_w<0xEA, "vpmins", X86smin,
3246 SSE_INTALU_ITINS_P, HasBWI, 1>;
3247 defm VPMINS : avx512_binop_rm_vl_dq<0x39, 0x39, "vpmins", X86smin,
3248 SSE_INTALU_ITINS_P, HasAVX512, 1>, T8PD;
3250 defm VPMINUB : avx512_binop_rm_vl_b<0xDA, "vpminu", X86umin,
3251 SSE_INTALU_ITINS_P, HasBWI, 1>;
3252 defm VPMINUW : avx512_binop_rm_vl_w<0x3A, "vpminu", X86umin,
3253 SSE_INTALU_ITINS_P, HasBWI, 1>, T8PD;
3254 defm VPMINU : avx512_binop_rm_vl_dq<0x3B, 0x3B, "vpminu", X86umin,
3255 SSE_INTALU_ITINS_P, HasAVX512, 1>, T8PD;
3257 def : Pat <(v16i32 (int_x86_avx512_mask_pmaxs_d_512 (v16i32 VR512:$src1),
3258 (v16i32 VR512:$src2), (v16i32 immAllZerosV), (i16 -1))),
3259 (VPMAXSDZrr VR512:$src1, VR512:$src2)>;
3260 def : Pat <(v16i32 (int_x86_avx512_mask_pmaxu_d_512 (v16i32 VR512:$src1),
3261 (v16i32 VR512:$src2), (v16i32 immAllZerosV), (i16 -1))),
3262 (VPMAXUDZrr VR512:$src1, VR512:$src2)>;
3263 def : Pat <(v8i64 (int_x86_avx512_mask_pmaxs_q_512 (v8i64 VR512:$src1),
3264 (v8i64 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
3265 (VPMAXSQZrr VR512:$src1, VR512:$src2)>;
3266 def : Pat <(v8i64 (int_x86_avx512_mask_pmaxu_q_512 (v8i64 VR512:$src1),
3267 (v8i64 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
3268 (VPMAXUQZrr VR512:$src1, VR512:$src2)>;
3269 def : Pat <(v16i32 (int_x86_avx512_mask_pmins_d_512 (v16i32 VR512:$src1),
3270 (v16i32 VR512:$src2), (v16i32 immAllZerosV), (i16 -1))),
3271 (VPMINSDZrr VR512:$src1, VR512:$src2)>;
3272 def : Pat <(v16i32 (int_x86_avx512_mask_pminu_d_512 (v16i32 VR512:$src1),
3273 (v16i32 VR512:$src2), (v16i32 immAllZerosV), (i16 -1))),
3274 (VPMINUDZrr VR512:$src1, VR512:$src2)>;
3275 def : Pat <(v8i64 (int_x86_avx512_mask_pmins_q_512 (v8i64 VR512:$src1),
3276 (v8i64 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
3277 (VPMINSQZrr VR512:$src1, VR512:$src2)>;
3278 def : Pat <(v8i64 (int_x86_avx512_mask_pminu_q_512 (v8i64 VR512:$src1),
3279 (v8i64 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
3280 (VPMINUQZrr VR512:$src1, VR512:$src2)>;
3281 //===----------------------------------------------------------------------===//
3282 // AVX-512 - Unpack Instructions
3283 //===----------------------------------------------------------------------===//
3285 multiclass avx512_unpack_fp<bits<8> opc, SDNode OpNode, ValueType vt,
3286 PatFrag mem_frag, RegisterClass RC,
3287 X86MemOperand x86memop, string asm,
3289 def rr : AVX512PI<opc, MRMSrcReg,
3290 (outs RC:$dst), (ins RC:$src1, RC:$src2),
3292 (vt (OpNode RC:$src1, RC:$src2)))],
3294 def rm : AVX512PI<opc, MRMSrcMem,
3295 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
3297 (vt (OpNode RC:$src1,
3298 (bitconvert (mem_frag addr:$src2)))))],
3302 defm VUNPCKHPSZ: avx512_unpack_fp<0x15, X86Unpckh, v16f32, loadv8f64,
3303 VR512, f512mem, "vunpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3304 SSEPackedSingle>, PS, EVEX_V512, EVEX_CD8<32, CD8VF>;
3305 defm VUNPCKHPDZ: avx512_unpack_fp<0x15, X86Unpckh, v8f64, loadv8f64,
3306 VR512, f512mem, "vunpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3307 SSEPackedDouble>, PD, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
3308 defm VUNPCKLPSZ: avx512_unpack_fp<0x14, X86Unpckl, v16f32, loadv8f64,
3309 VR512, f512mem, "vunpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3310 SSEPackedSingle>, PS, EVEX_V512, EVEX_CD8<32, CD8VF>;
3311 defm VUNPCKLPDZ: avx512_unpack_fp<0x14, X86Unpckl, v8f64, loadv8f64,
3312 VR512, f512mem, "vunpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3313 SSEPackedDouble>, PD, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
3315 multiclass avx512_unpack_int<bits<8> opc, string OpcodeStr, SDNode OpNode,
3316 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
3317 X86MemOperand x86memop> {
3318 def rr : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
3319 (ins RC:$src1, RC:$src2),
3320 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3321 [(set RC:$dst, (OpVT (OpNode (OpVT RC:$src1), (OpVT RC:$src2))))],
3322 IIC_SSE_UNPCK>, EVEX_4V;
3323 def rm : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
3324 (ins RC:$src1, x86memop:$src2),
3325 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3326 [(set RC:$dst, (OpVT (OpNode (OpVT RC:$src1),
3327 (bitconvert (memop_frag addr:$src2)))))],
3328 IIC_SSE_UNPCK>, EVEX_4V;
3330 defm VPUNPCKLDQZ : avx512_unpack_int<0x62, "vpunpckldq", X86Unpckl, v16i32,
3331 VR512, loadv16i32, i512mem>, EVEX_V512,
3332 EVEX_CD8<32, CD8VF>;
3333 defm VPUNPCKLQDQZ : avx512_unpack_int<0x6C, "vpunpcklqdq", X86Unpckl, v8i64,
3334 VR512, loadv8i64, i512mem>, EVEX_V512,
3335 VEX_W, EVEX_CD8<64, CD8VF>;
3336 defm VPUNPCKHDQZ : avx512_unpack_int<0x6A, "vpunpckhdq", X86Unpckh, v16i32,
3337 VR512, loadv16i32, i512mem>, EVEX_V512,
3338 EVEX_CD8<32, CD8VF>;
3339 defm VPUNPCKHQDQZ : avx512_unpack_int<0x6D, "vpunpckhqdq", X86Unpckh, v8i64,
3340 VR512, loadv8i64, i512mem>, EVEX_V512,
3341 VEX_W, EVEX_CD8<64, CD8VF>;
3342 //===----------------------------------------------------------------------===//
3346 multiclass avx512_pshuf_imm<bits<8> opc, string OpcodeStr, RegisterClass RC,
3347 SDNode OpNode, PatFrag mem_frag,
3348 X86MemOperand x86memop, ValueType OpVT> {
3349 def ri : AVX512Ii8<opc, MRMSrcReg, (outs RC:$dst),
3350 (ins RC:$src1, u8imm:$src2),
3351 !strconcat(OpcodeStr,
3352 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3354 (OpVT (OpNode RC:$src1, (i8 imm:$src2))))]>,
3356 def mi : AVX512Ii8<opc, MRMSrcMem, (outs RC:$dst),
3357 (ins x86memop:$src1, u8imm:$src2),
3358 !strconcat(OpcodeStr,
3359 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3361 (OpVT (OpNode (mem_frag addr:$src1),
3362 (i8 imm:$src2))))]>, EVEX;
3365 defm VPSHUFDZ : avx512_pshuf_imm<0x70, "vpshufd", VR512, X86PShufd, loadv16i32,
3366 i512mem, v16i32>, PD, EVEX_V512, EVEX_CD8<32, CD8VF>;
3368 //===----------------------------------------------------------------------===//
3369 // AVX-512 Logical Instructions
3370 //===----------------------------------------------------------------------===//
3372 defm VPAND : avx512_binop_rm_vl_dq<0xDB, 0xDB, "vpand", and,
3373 SSE_INTALU_ITINS_P, HasAVX512, 1>;
3374 defm VPOR : avx512_binop_rm_vl_dq<0xEB, 0xEB, "vpor", or,
3375 SSE_INTALU_ITINS_P, HasAVX512, 1>;
3376 defm VPXOR : avx512_binop_rm_vl_dq<0xEF, 0xEF, "vpxor", xor,
3377 SSE_INTALU_ITINS_P, HasAVX512, 1>;
3378 defm VPANDN : avx512_binop_rm_vl_dq<0xDF, 0xDF, "vpandn", X86andnp,
3379 SSE_INTALU_ITINS_P, HasAVX512, 0>;
3381 //===----------------------------------------------------------------------===//
3382 // AVX-512 FP arithmetic
3383 //===----------------------------------------------------------------------===//
3384 multiclass avx512_fp_scalar<bits<8> opc, string OpcodeStr,X86VectorVTInfo _,
3385 SDNode OpNode, SDNode VecNode, OpndItins itins,
3388 defm rr_Int : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
3389 (ins _.RC:$src1, _.RC:$src2), OpcodeStr,
3390 "$src2, $src1", "$src1, $src2",
3391 (VecNode (_.VT _.RC:$src1), (_.VT _.RC:$src2),
3392 (i32 FROUND_CURRENT)),
3393 "", itins.rr, IsCommutable>;
3395 defm rm_Int : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
3396 (ins _.RC:$src1, _.MemOp:$src2), OpcodeStr,
3397 "$src2, $src1", "$src1, $src2",
3398 (VecNode (_.VT _.RC:$src1),
3399 (_.VT (scalar_to_vector (_.ScalarLdFrag addr:$src2))),
3400 (i32 FROUND_CURRENT)),
3401 "", itins.rm, IsCommutable>;
3402 let isCodeGenOnly = 1, isCommutable = IsCommutable,
3403 Predicates = [HasAVX512] in {
3404 def rr : I< opc, MRMSrcReg, (outs _.FRC:$dst),
3405 (ins _.FRC:$src1, _.FRC:$src2),
3406 OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3407 [(set _.FRC:$dst, (OpNode _.FRC:$src1, _.FRC:$src2))],
3409 def rm : I< opc, MRMSrcMem, (outs _.FRC:$dst),
3410 (ins _.FRC:$src1, _.ScalarMemOp:$src2),
3411 OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3412 [(set _.FRC:$dst, (OpNode _.FRC:$src1,
3413 (_.ScalarLdFrag addr:$src2)))], itins.rr>;
3417 multiclass avx512_fp_scalar_round<bits<8> opc, string OpcodeStr,X86VectorVTInfo _,
3418 SDNode VecNode, OpndItins itins, bit IsCommutable> {
3420 defm rrb : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
3421 (ins _.RC:$src1, _.RC:$src2, AVX512RC:$rc), OpcodeStr,
3422 "$rc, $src2, $src1", "$src1, $src2, $rc",
3423 (VecNode (_.VT _.RC:$src1), (_.VT _.RC:$src2),
3424 (i32 imm:$rc)), "", itins.rr, IsCommutable>,
3427 multiclass avx512_fp_scalar_sae<bits<8> opc, string OpcodeStr,X86VectorVTInfo _,
3428 SDNode VecNode, OpndItins itins, bit IsCommutable> {
3430 defm rrb : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
3431 (ins _.RC:$src1, _.RC:$src2), OpcodeStr,
3432 "$src2, $src1", "$src1, $src2",
3433 (VecNode (_.VT _.RC:$src1), (_.VT _.RC:$src2),
3434 (i32 FROUND_NO_EXC)), "{sae}">, EVEX_B;
3437 multiclass avx512_binop_s_round<bits<8> opc, string OpcodeStr, SDNode OpNode,
3439 SizeItins itins, bit IsCommutable> {
3440 defm SSZ : avx512_fp_scalar<opc, OpcodeStr#"ss", f32x_info, OpNode, VecNode,
3441 itins.s, IsCommutable>,
3442 avx512_fp_scalar_round<opc, OpcodeStr#"ss", f32x_info, VecNode,
3443 itins.s, IsCommutable>,
3444 XS, EVEX_4V, VEX_LIG, EVEX_CD8<32, CD8VT1>;
3445 defm SDZ : avx512_fp_scalar<opc, OpcodeStr#"sd", f64x_info, OpNode, VecNode,
3446 itins.d, IsCommutable>,
3447 avx512_fp_scalar_round<opc, OpcodeStr#"sd", f64x_info, VecNode,
3448 itins.d, IsCommutable>,
3449 XD, VEX_W, EVEX_4V, VEX_LIG, EVEX_CD8<64, CD8VT1>;
3452 multiclass avx512_binop_s_sae<bits<8> opc, string OpcodeStr, SDNode OpNode,
3454 SizeItins itins, bit IsCommutable> {
3455 defm SSZ : avx512_fp_scalar<opc, OpcodeStr#"ss", f32x_info, OpNode, VecNode,
3456 itins.s, IsCommutable>,
3457 avx512_fp_scalar_sae<opc, OpcodeStr#"ss", f32x_info, VecNode,
3458 itins.s, IsCommutable>,
3459 XS, EVEX_4V, VEX_LIG, EVEX_CD8<32, CD8VT1>;
3460 defm SDZ : avx512_fp_scalar<opc, OpcodeStr#"sd", f64x_info, OpNode, VecNode,
3461 itins.d, IsCommutable>,
3462 avx512_fp_scalar_sae<opc, OpcodeStr#"sd", f64x_info, VecNode,
3463 itins.d, IsCommutable>,
3464 XD, VEX_W, EVEX_4V, VEX_LIG, EVEX_CD8<64, CD8VT1>;
3466 defm VADD : avx512_binop_s_round<0x58, "vadd", fadd, X86faddRnd, SSE_ALU_ITINS_S, 1>;
3467 defm VMUL : avx512_binop_s_round<0x59, "vmul", fmul, X86fmulRnd, SSE_ALU_ITINS_S, 1>;
3468 defm VSUB : avx512_binop_s_round<0x5C, "vsub", fsub, X86fsubRnd, SSE_ALU_ITINS_S, 0>;
3469 defm VDIV : avx512_binop_s_round<0x5E, "vdiv", fdiv, X86fdivRnd, SSE_ALU_ITINS_S, 0>;
3470 defm VMIN : avx512_binop_s_sae <0x5D, "vmin", X86fmin, X86fminRnd, SSE_ALU_ITINS_S, 1>;
3471 defm VMAX : avx512_binop_s_sae <0x5F, "vmax", X86fmax, X86fmaxRnd, SSE_ALU_ITINS_S, 1>;
3473 multiclass avx512_fp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
3474 X86VectorVTInfo _, bit IsCommutable> {
3475 defm rr: AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
3476 (ins _.RC:$src1, _.RC:$src2), OpcodeStr##_.Suffix,
3477 "$src2, $src1", "$src1, $src2",
3478 (_.VT (OpNode _.RC:$src1, _.RC:$src2))>, EVEX_4V;
3479 let mayLoad = 1 in {
3480 defm rm: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
3481 (ins _.RC:$src1, _.MemOp:$src2), OpcodeStr##_.Suffix,
3482 "$src2, $src1", "$src1, $src2",
3483 (OpNode _.RC:$src1, (_.LdFrag addr:$src2))>, EVEX_4V;
3484 defm rmb: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
3485 (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr##_.Suffix,
3486 "${src2}"##_.BroadcastStr##", $src1",
3487 "$src1, ${src2}"##_.BroadcastStr,
3488 (OpNode _.RC:$src1, (_.VT (X86VBroadcast
3489 (_.ScalarLdFrag addr:$src2))))>,
3494 multiclass avx512_fp_round_packed<bits<8> opc, string OpcodeStr, SDNode OpNodeRnd,
3495 X86VectorVTInfo _, bit IsCommutable> {
3496 defm rb: AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
3497 (ins _.RC:$src1, _.RC:$src2, AVX512RC:$rc), OpcodeStr##_.Suffix,
3498 "$rc, $src2, $src1", "$src1, $src2, $rc",
3499 (_.VT (OpNodeRnd _.RC:$src1, _.RC:$src2, (i32 imm:$rc)))>,
3500 EVEX_4V, EVEX_B, EVEX_RC;
3503 multiclass avx512_fp_binop_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
3504 bit IsCommutable = 0> {
3505 defm PSZ : avx512_fp_packed<opc, OpcodeStr, OpNode, v16f32_info,
3506 IsCommutable>, EVEX_V512, PS,
3507 EVEX_CD8<32, CD8VF>;
3508 defm PDZ : avx512_fp_packed<opc, OpcodeStr, OpNode, v8f64_info,
3509 IsCommutable>, EVEX_V512, PD, VEX_W,
3510 EVEX_CD8<64, CD8VF>;
3512 // Define only if AVX512VL feature is present.
3513 let Predicates = [HasVLX] in {
3514 defm PSZ128 : avx512_fp_packed<opc, OpcodeStr, OpNode, v4f32x_info,
3515 IsCommutable>, EVEX_V128, PS,
3516 EVEX_CD8<32, CD8VF>;
3517 defm PSZ256 : avx512_fp_packed<opc, OpcodeStr, OpNode, v8f32x_info,
3518 IsCommutable>, EVEX_V256, PS,
3519 EVEX_CD8<32, CD8VF>;
3520 defm PDZ128 : avx512_fp_packed<opc, OpcodeStr, OpNode, v2f64x_info,
3521 IsCommutable>, EVEX_V128, PD, VEX_W,
3522 EVEX_CD8<64, CD8VF>;
3523 defm PDZ256 : avx512_fp_packed<opc, OpcodeStr, OpNode, v4f64x_info,
3524 IsCommutable>, EVEX_V256, PD, VEX_W,
3525 EVEX_CD8<64, CD8VF>;
3529 multiclass avx512_fp_binop_p_round<bits<8> opc, string OpcodeStr, SDNode OpNodeRnd> {
3530 defm PSZ : avx512_fp_round_packed<opc, OpcodeStr, OpNodeRnd, v16f32_info, 0>,
3531 EVEX_V512, PS, EVEX_CD8<32, CD8VF>;
3532 defm PDZ : avx512_fp_round_packed<opc, OpcodeStr, OpNodeRnd, v8f64_info, 0>,
3533 EVEX_V512, PD, VEX_W,EVEX_CD8<64, CD8VF>;
3536 defm VADD : avx512_fp_binop_p<0x58, "vadd", fadd, 1>,
3537 avx512_fp_binop_p_round<0x58, "vadd", X86faddRnd>;
3538 defm VMUL : avx512_fp_binop_p<0x59, "vmul", fmul, 1>,
3539 avx512_fp_binop_p_round<0x59, "vmul", X86fmulRnd>;
3540 defm VSUB : avx512_fp_binop_p<0x5C, "vsub", fsub>,
3541 avx512_fp_binop_p_round<0x5C, "vsub", X86fsubRnd>;
3542 defm VDIV : avx512_fp_binop_p<0x5E, "vdiv", fdiv>,
3543 avx512_fp_binop_p_round<0x5E, "vdiv", X86fdivRnd>;
3544 defm VMIN : avx512_fp_binop_p<0x5D, "vmin", X86fmin, 1>;
3545 defm VMAX : avx512_fp_binop_p<0x5F, "vmax", X86fmax, 1>;
3546 let Predicates = [HasDQI] in {
3547 defm VAND : avx512_fp_binop_p<0x54, "vand", X86fand, 1>;
3548 defm VANDN : avx512_fp_binop_p<0x55, "vandn", X86fandn, 0>;
3549 defm VOR : avx512_fp_binop_p<0x56, "vor", X86for, 1>;
3550 defm VXOR : avx512_fp_binop_p<0x57, "vxor", X86fxor, 1>;
3552 def : Pat<(v16f32 (int_x86_avx512_mask_max_ps_512 (v16f32 VR512:$src1),
3553 (v16f32 VR512:$src2), (bc_v16f32 (v16i32 immAllZerosV)),
3554 (i16 -1), FROUND_CURRENT)),
3555 (VMAXPSZrr VR512:$src1, VR512:$src2)>;
3557 def : Pat<(v8f64 (int_x86_avx512_mask_max_pd_512 (v8f64 VR512:$src1),
3558 (v8f64 VR512:$src2), (bc_v8f64 (v16i32 immAllZerosV)),
3559 (i8 -1), FROUND_CURRENT)),
3560 (VMAXPDZrr VR512:$src1, VR512:$src2)>;
3562 def : Pat<(v16f32 (int_x86_avx512_mask_min_ps_512 (v16f32 VR512:$src1),
3563 (v16f32 VR512:$src2), (bc_v16f32 (v16i32 immAllZerosV)),
3564 (i16 -1), FROUND_CURRENT)),
3565 (VMINPSZrr VR512:$src1, VR512:$src2)>;
3567 def : Pat<(v8f64 (int_x86_avx512_mask_min_pd_512 (v8f64 VR512:$src1),
3568 (v8f64 VR512:$src2), (bc_v8f64 (v16i32 immAllZerosV)),
3569 (i8 -1), FROUND_CURRENT)),
3570 (VMINPDZrr VR512:$src1, VR512:$src2)>;
3571 //===----------------------------------------------------------------------===//
3572 // AVX-512 VPTESTM instructions
3573 //===----------------------------------------------------------------------===//
3575 multiclass avx512_vptest<bits<8> opc, string OpcodeStr, SDNode OpNode,
3576 X86VectorVTInfo _> {
3577 defm rr : AVX512_maskable_cmp<opc, MRMSrcReg, _, (outs _.KRC:$dst),
3578 (ins _.RC:$src1, _.RC:$src2), OpcodeStr,
3579 "$src2, $src1", "$src1, $src2",
3580 (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2))>,
3583 defm rm : AVX512_maskable_cmp<opc, MRMSrcMem, _, (outs _.KRC:$dst),
3584 (ins _.RC:$src1, _.MemOp:$src2), OpcodeStr,
3585 "$src2, $src1", "$src1, $src2",
3586 (OpNode (_.VT _.RC:$src1),
3587 (_.VT (bitconvert (_.LdFrag addr:$src2))))>,
3589 EVEX_CD8<_.EltSize, CD8VF>;
3592 multiclass avx512_vptest_mb<bits<8> opc, string OpcodeStr, SDNode OpNode,
3593 X86VectorVTInfo _> {
3595 defm rmb : AVX512_maskable_cmp<opc, MRMSrcMem, _, (outs _.KRC:$dst),
3596 (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr,
3597 "${src2}"##_.BroadcastStr##", $src1",
3598 "$src1, ${src2}"##_.BroadcastStr,
3599 (OpNode (_.VT _.RC:$src1), (_.VT (X86VBroadcast
3600 (_.ScalarLdFrag addr:$src2))))>,
3601 EVEX_B, EVEX_4V, EVEX_CD8<_.EltSize, CD8VF>;
3603 multiclass avx512_vptest_dq_sizes<bits<8> opc, string OpcodeStr, SDNode OpNode,
3604 AVX512VLVectorVTInfo _> {
3605 let Predicates = [HasAVX512] in
3606 defm Z : avx512_vptest<opc, OpcodeStr, OpNode, _.info512>,
3607 avx512_vptest_mb<opc, OpcodeStr, OpNode, _.info512>, EVEX_V512;
3609 let Predicates = [HasAVX512, HasVLX] in {
3610 defm Z256 : avx512_vptest<opc, OpcodeStr, OpNode, _.info256>,
3611 avx512_vptest_mb<opc, OpcodeStr, OpNode, _.info256>, EVEX_V256;
3612 defm Z128 : avx512_vptest<opc, OpcodeStr, OpNode, _.info128>,
3613 avx512_vptest_mb<opc, OpcodeStr, OpNode, _.info128>, EVEX_V128;
3617 multiclass avx512_vptest_dq<bits<8> opc, string OpcodeStr, SDNode OpNode> {
3618 defm D : avx512_vptest_dq_sizes<opc, OpcodeStr#"d", OpNode,
3620 defm Q : avx512_vptest_dq_sizes<opc, OpcodeStr#"q", OpNode,
3621 avx512vl_i64_info>, VEX_W;
3624 multiclass avx512_vptest_wb<bits<8> opc, string OpcodeStr,
3626 let Predicates = [HasBWI] in {
3627 defm WZ: avx512_vptest<opc, OpcodeStr#"w", OpNode, v32i16_info>,
3629 defm BZ: avx512_vptest<opc, OpcodeStr#"b", OpNode, v64i8_info>,
3632 let Predicates = [HasVLX, HasBWI] in {
3634 defm WZ256: avx512_vptest<opc, OpcodeStr#"w", OpNode, v16i16x_info>,
3636 defm WZ128: avx512_vptest<opc, OpcodeStr#"w", OpNode, v8i16x_info>,
3638 defm BZ256: avx512_vptest<opc, OpcodeStr#"b", OpNode, v32i8x_info>,
3640 defm BZ128: avx512_vptest<opc, OpcodeStr#"b", OpNode, v16i8x_info>,
3645 multiclass avx512_vptest_all_forms<bits<8> opc_wb, bits<8> opc_dq, string OpcodeStr,
3647 avx512_vptest_wb <opc_wb, OpcodeStr, OpNode>,
3648 avx512_vptest_dq<opc_dq, OpcodeStr, OpNode>;
3650 defm VPTESTM : avx512_vptest_all_forms<0x26, 0x27, "vptestm", X86testm>, T8PD;
3651 defm VPTESTNM : avx512_vptest_all_forms<0x26, 0x27, "vptestnm", X86testnm>, T8XS;
3653 def : Pat <(i16 (int_x86_avx512_mask_ptestm_d_512 (v16i32 VR512:$src1),
3654 (v16i32 VR512:$src2), (i16 -1))),
3655 (COPY_TO_REGCLASS (VPTESTMDZrr VR512:$src1, VR512:$src2), GR16)>;
3657 def : Pat <(i8 (int_x86_avx512_mask_ptestm_q_512 (v8i64 VR512:$src1),
3658 (v8i64 VR512:$src2), (i8 -1))),
3659 (COPY_TO_REGCLASS (VPTESTMQZrr VR512:$src1, VR512:$src2), GR8)>;
3661 //===----------------------------------------------------------------------===//
3662 // AVX-512 Shift instructions
3663 //===----------------------------------------------------------------------===//
3664 multiclass avx512_shift_rmi<bits<8> opc, Format ImmFormR, Format ImmFormM,
3665 string OpcodeStr, SDNode OpNode, X86VectorVTInfo _> {
3666 defm ri : AVX512_maskable<opc, ImmFormR, _, (outs _.RC:$dst),
3667 (ins _.RC:$src1, u8imm:$src2), OpcodeStr,
3668 "$src2, $src1", "$src1, $src2",
3669 (_.VT (OpNode _.RC:$src1, (i8 imm:$src2))),
3670 " ", SSE_INTSHIFT_ITINS_P.rr>, AVX512BIi8Base, EVEX_4V;
3672 defm mi : AVX512_maskable<opc, ImmFormM, _, (outs _.RC:$dst),
3673 (ins _.MemOp:$src1, u8imm:$src2), OpcodeStr,
3674 "$src2, $src1", "$src1, $src2",
3675 (_.VT (OpNode (_.VT (bitconvert (_.LdFrag addr:$src1))),
3677 " ", SSE_INTSHIFT_ITINS_P.rm>, AVX512BIi8Base, EVEX_4V;
3680 multiclass avx512_shift_rmbi<bits<8> opc, Format ImmFormM,
3681 string OpcodeStr, SDNode OpNode, X86VectorVTInfo _> {
3683 defm mbi : AVX512_maskable<opc, ImmFormM, _, (outs _.RC:$dst),
3684 (ins _.ScalarMemOp:$src1, u8imm:$src2), OpcodeStr,
3685 "$src2, ${src1}"##_.BroadcastStr, "${src1}"##_.BroadcastStr##", $src2",
3686 (_.VT (OpNode (X86VBroadcast (_.ScalarLdFrag addr:$src1)), (i8 imm:$src2))),
3687 " ", SSE_INTSHIFT_ITINS_P.rm>, AVX512BIi8Base, EVEX_4V, EVEX_B;
3690 multiclass avx512_shift_rrm<bits<8> opc, string OpcodeStr, SDNode OpNode,
3691 ValueType SrcVT, PatFrag bc_frag, X86VectorVTInfo _> {
3692 // src2 is always 128-bit
3693 defm rr : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
3694 (ins _.RC:$src1, VR128X:$src2), OpcodeStr,
3695 "$src2, $src1", "$src1, $src2",
3696 (_.VT (OpNode _.RC:$src1, (SrcVT VR128X:$src2))),
3697 " ", SSE_INTSHIFT_ITINS_P.rr>, AVX512BIBase, EVEX_4V;
3698 defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
3699 (ins _.RC:$src1, i128mem:$src2), OpcodeStr,
3700 "$src2, $src1", "$src1, $src2",
3701 (_.VT (OpNode _.RC:$src1, (bc_frag (loadv2i64 addr:$src2)))),
3702 " ", SSE_INTSHIFT_ITINS_P.rm>, AVX512BIBase,
3706 multiclass avx512_shift_sizes<bits<8> opc, string OpcodeStr, SDNode OpNode,
3707 ValueType SrcVT, PatFrag bc_frag,
3708 AVX512VLVectorVTInfo VTInfo, Predicate prd> {
3709 let Predicates = [prd] in
3710 defm Z : avx512_shift_rrm<opc, OpcodeStr, OpNode, SrcVT, bc_frag,
3711 VTInfo.info512>, EVEX_V512,
3712 EVEX_CD8<VTInfo.info512.EltSize, CD8VQ> ;
3713 let Predicates = [prd, HasVLX] in {
3714 defm Z256 : avx512_shift_rrm<opc, OpcodeStr, OpNode, SrcVT, bc_frag,
3715 VTInfo.info256>, EVEX_V256,
3716 EVEX_CD8<VTInfo.info256.EltSize, CD8VH>;
3717 defm Z128 : avx512_shift_rrm<opc, OpcodeStr, OpNode, SrcVT, bc_frag,
3718 VTInfo.info128>, EVEX_V128,
3719 EVEX_CD8<VTInfo.info128.EltSize, CD8VF>;
3723 multiclass avx512_shift_types<bits<8> opcd, bits<8> opcq, bits<8> opcw,
3724 string OpcodeStr, SDNode OpNode> {
3725 defm D : avx512_shift_sizes<opcd, OpcodeStr#"d", OpNode, v4i32, bc_v4i32,
3726 avx512vl_i32_info, HasAVX512>;
3727 defm Q : avx512_shift_sizes<opcq, OpcodeStr#"q", OpNode, v2i64, bc_v2i64,
3728 avx512vl_i64_info, HasAVX512>, VEX_W;
3729 defm W : avx512_shift_sizes<opcw, OpcodeStr#"w", OpNode, v8i16, bc_v8i16,
3730 avx512vl_i16_info, HasBWI>;
3733 multiclass avx512_shift_rmi_sizes<bits<8> opc, Format ImmFormR, Format ImmFormM,
3734 string OpcodeStr, SDNode OpNode,
3735 AVX512VLVectorVTInfo VTInfo> {
3736 let Predicates = [HasAVX512] in
3737 defm Z: avx512_shift_rmi<opc, ImmFormR, ImmFormM, OpcodeStr, OpNode,
3739 avx512_shift_rmbi<opc, ImmFormM, OpcodeStr, OpNode,
3740 VTInfo.info512>, EVEX_V512;
3741 let Predicates = [HasAVX512, HasVLX] in {
3742 defm Z256: avx512_shift_rmi<opc, ImmFormR, ImmFormM, OpcodeStr, OpNode,
3744 avx512_shift_rmbi<opc, ImmFormM, OpcodeStr, OpNode,
3745 VTInfo.info256>, EVEX_V256;
3746 defm Z128: avx512_shift_rmi<opc, ImmFormR, ImmFormM, OpcodeStr, OpNode,
3748 avx512_shift_rmbi<opc, ImmFormM, OpcodeStr, OpNode,
3749 VTInfo.info128>, EVEX_V128;
3753 multiclass avx512_shift_rmi_w<bits<8> opcw,
3754 Format ImmFormR, Format ImmFormM,
3755 string OpcodeStr, SDNode OpNode> {
3756 let Predicates = [HasBWI] in
3757 defm WZ: avx512_shift_rmi<opcw, ImmFormR, ImmFormM, OpcodeStr, OpNode,
3758 v32i16_info>, EVEX_V512;
3759 let Predicates = [HasVLX, HasBWI] in {
3760 defm WZ256: avx512_shift_rmi<opcw, ImmFormR, ImmFormM, OpcodeStr, OpNode,
3761 v16i16x_info>, EVEX_V256;
3762 defm WZ128: avx512_shift_rmi<opcw, ImmFormR, ImmFormM, OpcodeStr, OpNode,
3763 v8i16x_info>, EVEX_V128;
3767 multiclass avx512_shift_rmi_dq<bits<8> opcd, bits<8> opcq,
3768 Format ImmFormR, Format ImmFormM,
3769 string OpcodeStr, SDNode OpNode> {
3770 defm D: avx512_shift_rmi_sizes<opcd, ImmFormR, ImmFormM, OpcodeStr#"d", OpNode,
3771 avx512vl_i32_info>, EVEX_CD8<32, CD8VF>;
3772 defm Q: avx512_shift_rmi_sizes<opcq, ImmFormR, ImmFormM, OpcodeStr#"q", OpNode,
3773 avx512vl_i64_info>, EVEX_CD8<64, CD8VF>, VEX_W;
3776 defm VPSRL : avx512_shift_rmi_dq<0x72, 0x73, MRM2r, MRM2m, "vpsrl", X86vsrli>,
3777 avx512_shift_rmi_w<0x71, MRM2r, MRM2m, "vpsrlw", X86vsrli>;
3779 defm VPSLL : avx512_shift_rmi_dq<0x72, 0x73, MRM6r, MRM6m, "vpsll", X86vshli>,
3780 avx512_shift_rmi_w<0x71, MRM6r, MRM6m, "vpsllw", X86vshli>;
3782 defm VPSRA : avx512_shift_rmi_dq<0x72, 0x73, MRM4r, MRM4m, "vpsra", X86vsrai>,
3783 avx512_shift_rmi_w<0x71, MRM4r, MRM4m, "vpsraw", X86vsrai>;
3785 defm VPROR : avx512_shift_rmi_dq<0x72, 0x72, MRM0r, MRM0m, "vpror", rotr>;
3786 defm VPROL : avx512_shift_rmi_dq<0x72, 0x72, MRM1r, MRM1m, "vprol", rotl>;
3788 defm VPSLL : avx512_shift_types<0xF2, 0xF3, 0xF1, "vpsll", X86vshl>;
3789 defm VPSRA : avx512_shift_types<0xE2, 0xE2, 0xE1, "vpsra", X86vsra>;
3790 defm VPSRL : avx512_shift_types<0xD2, 0xD3, 0xD1, "vpsrl", X86vsrl>;
3792 //===-------------------------------------------------------------------===//
3793 // Variable Bit Shifts
3794 //===-------------------------------------------------------------------===//
3795 multiclass avx512_var_shift<bits<8> opc, string OpcodeStr, SDNode OpNode,
3796 X86VectorVTInfo _> {
3797 defm rr : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
3798 (ins _.RC:$src1, _.RC:$src2), OpcodeStr,
3799 "$src2, $src1", "$src1, $src2",
3800 (_.VT (OpNode _.RC:$src1, (_.VT _.RC:$src2))),
3801 " ", SSE_INTSHIFT_ITINS_P.rr>, AVX5128IBase, EVEX_4V;
3803 defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
3804 (ins _.RC:$src1, _.MemOp:$src2), OpcodeStr,
3805 "$src2, $src1", "$src1, $src2",
3806 (_.VT (OpNode _.RC:$src1, (_.LdFrag addr:$src2))),
3807 " ", SSE_INTSHIFT_ITINS_P.rm>, AVX5128IBase, EVEX_4V,
3808 EVEX_CD8<_.EltSize, CD8VF>;
3811 multiclass avx512_var_shift_mb<bits<8> opc, string OpcodeStr, SDNode OpNode,
3812 X86VectorVTInfo _> {
3814 defm rmb : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
3815 (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr,
3816 "${src2}"##_.BroadcastStr##", $src1",
3817 "$src1, ${src2}"##_.BroadcastStr,
3818 (_.VT (OpNode _.RC:$src1, (_.VT (X86VBroadcast
3819 (_.ScalarLdFrag addr:$src2))))),
3820 " ", SSE_INTSHIFT_ITINS_P.rm>, AVX5128IBase, EVEX_B,
3821 EVEX_4V, EVEX_CD8<_.EltSize, CD8VF>;
3823 multiclass avx512_var_shift_sizes<bits<8> opc, string OpcodeStr, SDNode OpNode,
3824 AVX512VLVectorVTInfo _> {
3825 let Predicates = [HasAVX512] in
3826 defm Z : avx512_var_shift<opc, OpcodeStr, OpNode, _.info512>,
3827 avx512_var_shift_mb<opc, OpcodeStr, OpNode, _.info512>, EVEX_V512;
3829 let Predicates = [HasAVX512, HasVLX] in {
3830 defm Z256 : avx512_var_shift<opc, OpcodeStr, OpNode, _.info256>,
3831 avx512_var_shift_mb<opc, OpcodeStr, OpNode, _.info256>, EVEX_V256;
3832 defm Z128 : avx512_var_shift<opc, OpcodeStr, OpNode, _.info128>,
3833 avx512_var_shift_mb<opc, OpcodeStr, OpNode, _.info128>, EVEX_V128;
3837 multiclass avx512_var_shift_types<bits<8> opc, string OpcodeStr,
3839 defm D : avx512_var_shift_sizes<opc, OpcodeStr#"d", OpNode,
3841 defm Q : avx512_var_shift_sizes<opc, OpcodeStr#"q", OpNode,
3842 avx512vl_i64_info>, VEX_W;
3845 multiclass avx512_var_shift_w<bits<8> opc, string OpcodeStr,
3847 let Predicates = [HasBWI] in
3848 defm WZ: avx512_var_shift<opc, OpcodeStr, OpNode, v32i16_info>,
3850 let Predicates = [HasVLX, HasBWI] in {
3852 defm WZ256: avx512_var_shift<opc, OpcodeStr, OpNode, v16i16x_info>,
3854 defm WZ128: avx512_var_shift<opc, OpcodeStr, OpNode, v8i16x_info>,
3859 defm VPSLLV : avx512_var_shift_types<0x47, "vpsllv", shl>,
3860 avx512_var_shift_w<0x12, "vpsllvw", shl>;
3861 defm VPSRAV : avx512_var_shift_types<0x46, "vpsrav", sra>,
3862 avx512_var_shift_w<0x11, "vpsravw", sra>;
3863 defm VPSRLV : avx512_var_shift_types<0x45, "vpsrlv", srl>,
3864 avx512_var_shift_w<0x10, "vpsrlvw", srl>;
3865 defm VPRORV : avx512_var_shift_types<0x14, "vprorv", rotr>;
3866 defm VPROLV : avx512_var_shift_types<0x15, "vprolv", rotl>;
3868 //===----------------------------------------------------------------------===//
3869 // AVX-512 - MOVDDUP
3870 //===----------------------------------------------------------------------===//
3872 multiclass avx512_movddup<string OpcodeStr, RegisterClass RC, ValueType VT,
3873 X86MemOperand x86memop, PatFrag memop_frag> {
3874 def rr : AVX512PDI<0x12, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
3875 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3876 [(set RC:$dst, (VT (X86Movddup RC:$src)))]>, EVEX;
3877 def rm : AVX512PDI<0x12, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
3878 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3880 (VT (X86Movddup (memop_frag addr:$src))))]>, EVEX;
3883 defm VMOVDDUPZ : avx512_movddup<"vmovddup", VR512, v8f64, f512mem, loadv8f64>,
3884 VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
3885 def : Pat<(X86Movddup (v8f64 (scalar_to_vector (loadf64 addr:$src)))),
3886 (VMOVDDUPZrm addr:$src)>;
3888 //===---------------------------------------------------------------------===//
3889 // Replicate Single FP - MOVSHDUP and MOVSLDUP
3890 //===---------------------------------------------------------------------===//
3891 multiclass avx512_replicate_sfp<bits<8> op, SDNode OpNode, string OpcodeStr,
3892 ValueType vt, RegisterClass RC, PatFrag mem_frag,
3893 X86MemOperand x86memop> {
3894 def rr : AVX512XSI<op, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
3895 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3896 [(set RC:$dst, (vt (OpNode RC:$src)))]>, EVEX;
3898 def rm : AVX512XSI<op, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
3899 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3900 [(set RC:$dst, (OpNode (mem_frag addr:$src)))]>, EVEX;
3903 defm VMOVSHDUPZ : avx512_replicate_sfp<0x16, X86Movshdup, "vmovshdup",
3904 v16f32, VR512, loadv16f32, f512mem>, EVEX_V512,
3905 EVEX_CD8<32, CD8VF>;
3906 defm VMOVSLDUPZ : avx512_replicate_sfp<0x12, X86Movsldup, "vmovsldup",
3907 v16f32, VR512, loadv16f32, f512mem>, EVEX_V512,
3908 EVEX_CD8<32, CD8VF>;
3910 def : Pat<(v16i32 (X86Movshdup VR512:$src)), (VMOVSHDUPZrr VR512:$src)>;
3911 def : Pat<(v16i32 (X86Movshdup (loadv16i32 addr:$src))),
3912 (VMOVSHDUPZrm addr:$src)>;
3913 def : Pat<(v16i32 (X86Movsldup VR512:$src)), (VMOVSLDUPZrr VR512:$src)>;
3914 def : Pat<(v16i32 (X86Movsldup (loadv16i32 addr:$src))),
3915 (VMOVSLDUPZrm addr:$src)>;
3917 //===----------------------------------------------------------------------===//
3918 // Move Low to High and High to Low packed FP Instructions
3919 //===----------------------------------------------------------------------===//
3920 def VMOVLHPSZrr : AVX512PSI<0x16, MRMSrcReg, (outs VR128X:$dst),
3921 (ins VR128X:$src1, VR128X:$src2),
3922 "vmovlhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3923 [(set VR128X:$dst, (v4f32 (X86Movlhps VR128X:$src1, VR128X:$src2)))],
3924 IIC_SSE_MOV_LH>, EVEX_4V;
3925 def VMOVHLPSZrr : AVX512PSI<0x12, MRMSrcReg, (outs VR128X:$dst),
3926 (ins VR128X:$src1, VR128X:$src2),
3927 "vmovhlps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3928 [(set VR128X:$dst, (v4f32 (X86Movhlps VR128X:$src1, VR128X:$src2)))],
3929 IIC_SSE_MOV_LH>, EVEX_4V;
3931 let Predicates = [HasAVX512] in {
3933 def : Pat<(v4i32 (X86Movlhps VR128X:$src1, VR128X:$src2)),
3934 (VMOVLHPSZrr VR128X:$src1, VR128X:$src2)>;
3935 def : Pat<(v2i64 (X86Movlhps VR128X:$src1, VR128X:$src2)),
3936 (VMOVLHPSZrr (v2i64 VR128X:$src1), VR128X:$src2)>;
3939 def : Pat<(v4i32 (X86Movhlps VR128X:$src1, VR128X:$src2)),
3940 (VMOVHLPSZrr VR128X:$src1, VR128X:$src2)>;
3943 //===----------------------------------------------------------------------===//
3944 // FMA - Fused Multiply Operations
3947 let Constraints = "$src1 = $dst" in {
3948 // Omitting the parameter OpNode (= null_frag) disables ISel pattern matching.
3949 multiclass avx512_fma3p_rm<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
3950 SDPatternOperator OpNode = null_frag> {
3951 defm r: AVX512_maskable_3src<opc, MRMSrcReg, _, (outs _.RC:$dst),
3952 (ins _.RC:$src2, _.RC:$src3),
3953 OpcodeStr, "$src3, $src2", "$src2, $src3",
3954 (_.VT (OpNode _.RC:$src1, _.RC:$src2, _.RC:$src3))>,
3958 defm m: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
3959 (ins _.RC:$src2, _.MemOp:$src3),
3960 OpcodeStr, "$src3, $src2", "$src2, $src3",
3961 (_.VT (OpNode _.RC:$src1, _.RC:$src2, (_.LdFrag addr:$src3)))>,
3964 defm mb: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
3965 (ins _.RC:$src2, _.ScalarMemOp:$src3),
3966 OpcodeStr, !strconcat("${src3}", _.BroadcastStr,", $src2"),
3967 !strconcat("$src2, ${src3}", _.BroadcastStr ),
3969 _.RC:$src2,(_.VT (X86VBroadcast (_.ScalarLdFrag addr:$src3))))>,
3970 AVX512FMA3Base, EVEX_B;
3972 } // Constraints = "$src1 = $dst"
3974 let Constraints = "$src1 = $dst" in {
3975 // Omitting the parameter OpNode (= null_frag) disables ISel pattern matching.
3976 multiclass avx512_fma3_round_rrb<bits<8> opc, string OpcodeStr,
3978 SDPatternOperator OpNode> {
3979 defm rb: AVX512_maskable_3src<opc, MRMSrcReg, _, (outs _.RC:$dst),
3980 (ins _.RC:$src2, _.RC:$src3, AVX512RC:$rc),
3981 OpcodeStr, "$rc, $src3, $src2", "$src2, $src3, $rc",
3982 (_.VT ( OpNode _.RC:$src1, _.RC:$src2, _.RC:$src3, (i32 imm:$rc)))>,
3983 AVX512FMA3Base, EVEX_B, EVEX_RC;
3985 } // Constraints = "$src1 = $dst"
3987 multiclass avx512_fma3_round_forms<bits<8> opc213, string OpcodeStr,
3988 X86VectorVTInfo VTI, SDPatternOperator OpNode> {
3989 defm v213r : avx512_fma3_round_rrb<opc213, !strconcat(OpcodeStr, "213", VTI.Suffix),
3990 VTI, OpNode>, EVEX_CD8<VTI.EltSize, CD8VF>;
3993 multiclass avx512_fma3p_forms<bits<8> opc213, bits<8> opc231,
3994 string OpcodeStr, X86VectorVTInfo VTI,
3995 SDPatternOperator OpNode> {
3996 defm v213r : avx512_fma3p_rm<opc213, !strconcat(OpcodeStr, "213", VTI.Suffix),
3997 VTI, OpNode>, EVEX_CD8<VTI.EltSize, CD8VF>;
3998 defm v231r : avx512_fma3p_rm<opc231, !strconcat(OpcodeStr, "231", VTI.Suffix),
3999 VTI>, EVEX_CD8<VTI.EltSize, CD8VF>;
4002 multiclass avx512_fma3p<bits<8> opc213, bits<8> opc231,
4004 SDPatternOperator OpNode,
4005 SDPatternOperator OpNodeRnd> {
4006 let ExeDomain = SSEPackedSingle in {
4007 defm NAME##PSZ : avx512_fma3p_forms<opc213, opc231, OpcodeStr,
4008 v16f32_info, OpNode>,
4009 avx512_fma3_round_forms<opc213, OpcodeStr,
4010 v16f32_info, OpNodeRnd>, EVEX_V512;
4011 defm NAME##PSZ256 : avx512_fma3p_forms<opc213, opc231, OpcodeStr,
4012 v8f32x_info, OpNode>, EVEX_V256;
4013 defm NAME##PSZ128 : avx512_fma3p_forms<opc213, opc231, OpcodeStr,
4014 v4f32x_info, OpNode>, EVEX_V128;
4016 let ExeDomain = SSEPackedDouble in {
4017 defm NAME##PDZ : avx512_fma3p_forms<opc213, opc231, OpcodeStr,
4018 v8f64_info, OpNode>,
4019 avx512_fma3_round_forms<opc213, OpcodeStr, v8f64_info,
4020 OpNodeRnd>, EVEX_V512, VEX_W;
4021 defm NAME##PDZ256 : avx512_fma3p_forms<opc213, opc231, OpcodeStr,
4022 v4f64x_info, OpNode>,
4024 defm NAME##PDZ128 : avx512_fma3p_forms<opc213, opc231, OpcodeStr,
4025 v2f64x_info, OpNode>,
4030 defm VFMADD : avx512_fma3p<0xA8, 0xB8, "vfmadd", X86Fmadd, X86FmaddRnd>;
4031 defm VFMSUB : avx512_fma3p<0xAA, 0xBA, "vfmsub", X86Fmsub, X86FmsubRnd>;
4032 defm VFMADDSUB : avx512_fma3p<0xA6, 0xB6, "vfmaddsub", X86Fmaddsub, X86FmaddsubRnd>;
4033 defm VFMSUBADD : avx512_fma3p<0xA7, 0xB7, "vfmsubadd", X86Fmsubadd, X86FmsubaddRnd>;
4034 defm VFNMADD : avx512_fma3p<0xAC, 0xBC, "vfnmadd", X86Fnmadd, X86FnmaddRnd>;
4035 defm VFNMSUB : avx512_fma3p<0xAE, 0xBE, "vfnmsub", X86Fnmsub, X86FnmsubRnd>;
4037 let Constraints = "$src1 = $dst" in {
4038 multiclass avx512_fma3p_m132<bits<8> opc, string OpcodeStr, SDNode OpNode,
4039 X86VectorVTInfo _> {
4041 def m: AVX512FMA3<opc, MRMSrcMem, (outs _.RC:$dst),
4042 (ins _.RC:$src1, _.RC:$src3, _.MemOp:$src2),
4043 !strconcat(OpcodeStr, "\t{$src2, $src3, $dst|$dst, $src3, $src2}"),
4044 [(set _.RC:$dst, (_.VT (OpNode _.RC:$src1, (_.LdFrag addr:$src2),
4046 def mb: AVX512FMA3<opc, MRMSrcMem, (outs _.RC:$dst),
4047 (ins _.RC:$src1, _.RC:$src3, _.ScalarMemOp:$src2),
4048 !strconcat(OpcodeStr, "\t{${src2}", _.BroadcastStr,
4049 ", $src3, $dst|$dst, $src3, ${src2}", _.BroadcastStr, "}"),
4051 (OpNode _.RC:$src1, (_.VT (X86VBroadcast
4052 (_.ScalarLdFrag addr:$src2))),
4053 _.RC:$src3))]>, EVEX_B;
4055 } // Constraints = "$src1 = $dst"
4057 multiclass avx512_fma3p_m132_f<bits<8> opc, string OpcodeStr, SDNode OpNode> {
4059 let ExeDomain = SSEPackedSingle in {
4060 defm NAME##PSZ : avx512_fma3p_m132<opc, OpcodeStr##ps,
4061 OpNode,v16f32_info>, EVEX_V512,
4062 EVEX_CD8<32, CD8VF>;
4063 defm NAME##PSZ256 : avx512_fma3p_m132<opc, OpcodeStr##ps,
4064 OpNode, v8f32x_info>, EVEX_V256,
4065 EVEX_CD8<32, CD8VF>;
4066 defm NAME##PSZ128 : avx512_fma3p_m132<opc, OpcodeStr##ps,
4067 OpNode, v4f32x_info>, EVEX_V128,
4068 EVEX_CD8<32, CD8VF>;
4070 let ExeDomain = SSEPackedDouble in {
4071 defm NAME##PDZ : avx512_fma3p_m132<opc, OpcodeStr##pd,
4072 OpNode, v8f64_info>, EVEX_V512,
4073 VEX_W, EVEX_CD8<32, CD8VF>;
4074 defm NAME##PDZ256 : avx512_fma3p_m132<opc, OpcodeStr##pd,
4075 OpNode, v4f64x_info>, EVEX_V256,
4076 VEX_W, EVEX_CD8<32, CD8VF>;
4077 defm NAME##PDZ128 : avx512_fma3p_m132<opc, OpcodeStr##pd,
4078 OpNode, v2f64x_info>, EVEX_V128,
4079 VEX_W, EVEX_CD8<32, CD8VF>;
4083 defm VFMADD132 : avx512_fma3p_m132_f<0x98, "vfmadd132", X86Fmadd>;
4084 defm VFMSUB132 : avx512_fma3p_m132_f<0x9A, "vfmsub132", X86Fmsub>;
4085 defm VFMADDSUB132 : avx512_fma3p_m132_f<0x96, "vfmaddsub132", X86Fmaddsub>;
4086 defm VFMSUBADD132 : avx512_fma3p_m132_f<0x97, "vfmsubadd132", X86Fmsubadd>;
4087 defm VFNMADD132 : avx512_fma3p_m132_f<0x9C, "vfnmadd132", X86Fnmadd>;
4088 defm VFNMSUB132 : avx512_fma3p_m132_f<0x9E, "vfnmsub132", X86Fnmsub>;
4091 let Constraints = "$src1 = $dst" in {
4092 multiclass avx512_fma3s_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
4093 RegisterClass RC, ValueType OpVT,
4094 X86MemOperand x86memop, Operand memop,
4096 let isCommutable = 1 in
4097 def r : AVX512FMA3<opc, MRMSrcReg, (outs RC:$dst),
4098 (ins RC:$src1, RC:$src2, RC:$src3),
4099 !strconcat(OpcodeStr,
4100 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4102 (OpVT (OpNode RC:$src2, RC:$src1, RC:$src3)))]>;
4104 def m : AVX512FMA3<opc, MRMSrcMem, (outs RC:$dst),
4105 (ins RC:$src1, RC:$src2, f128mem:$src3),
4106 !strconcat(OpcodeStr,
4107 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4109 (OpVT (OpNode RC:$src2, RC:$src1,
4110 (mem_frag addr:$src3))))]>;
4112 } // Constraints = "$src1 = $dst"
4114 defm VFMADDSSZ : avx512_fma3s_rm<0xA9, "vfmadd213ss", X86Fmadd, FR32X,
4115 f32, f32mem, ssmem, loadf32>, EVEX_CD8<32, CD8VT1>;
4116 defm VFMADDSDZ : avx512_fma3s_rm<0xA9, "vfmadd213sd", X86Fmadd, FR64X,
4117 f64, f64mem, sdmem, loadf64>, VEX_W, EVEX_CD8<64, CD8VT1>;
4118 defm VFMSUBSSZ : avx512_fma3s_rm<0xAB, "vfmsub213ss", X86Fmsub, FR32X,
4119 f32, f32mem, ssmem, loadf32>, EVEX_CD8<32, CD8VT1>;
4120 defm VFMSUBSDZ : avx512_fma3s_rm<0xAB, "vfmsub213sd", X86Fmsub, FR64X,
4121 f64, f64mem, sdmem, loadf64>, VEX_W, EVEX_CD8<64, CD8VT1>;
4122 defm VFNMADDSSZ : avx512_fma3s_rm<0xAD, "vfnmadd213ss", X86Fnmadd, FR32X,
4123 f32, f32mem, ssmem, loadf32>, EVEX_CD8<32, CD8VT1>;
4124 defm VFNMADDSDZ : avx512_fma3s_rm<0xAD, "vfnmadd213sd", X86Fnmadd, FR64X,
4125 f64, f64mem, sdmem, loadf64>, VEX_W, EVEX_CD8<64, CD8VT1>;
4126 defm VFNMSUBSSZ : avx512_fma3s_rm<0xAF, "vfnmsub213ss", X86Fnmsub, FR32X,
4127 f32, f32mem, ssmem, loadf32>, EVEX_CD8<32, CD8VT1>;
4128 defm VFNMSUBSDZ : avx512_fma3s_rm<0xAF, "vfnmsub213sd", X86Fnmsub, FR64X,
4129 f64, f64mem, sdmem, loadf64>, VEX_W, EVEX_CD8<64, CD8VT1>;
4131 //===----------------------------------------------------------------------===//
4132 // AVX-512 Scalar convert from sign integer to float/double
4133 //===----------------------------------------------------------------------===//
4135 multiclass avx512_vcvtsi<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
4136 X86MemOperand x86memop, string asm> {
4137 let hasSideEffects = 0 in {
4138 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src),
4139 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>,
4142 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
4143 (ins DstRC:$src1, x86memop:$src),
4144 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>,
4146 } // hasSideEffects = 0
4149 let Predicates = [HasAVX512] in {
4150 defm VCVTSI2SSZ : avx512_vcvtsi<0x2A, GR32, FR32X, i32mem, "cvtsi2ss{l}">,
4151 XS, VEX_LIG, EVEX_CD8<32, CD8VT1>;
4152 defm VCVTSI642SSZ : avx512_vcvtsi<0x2A, GR64, FR32X, i64mem, "cvtsi2ss{q}">,
4153 XS, VEX_W, VEX_LIG, EVEX_CD8<64, CD8VT1>;
4154 defm VCVTSI2SDZ : avx512_vcvtsi<0x2A, GR32, FR64X, i32mem, "cvtsi2sd{l}">,
4155 XD, VEX_LIG, EVEX_CD8<32, CD8VT1>;
4156 defm VCVTSI642SDZ : avx512_vcvtsi<0x2A, GR64, FR64X, i64mem, "cvtsi2sd{q}">,
4157 XD, VEX_W, VEX_LIG, EVEX_CD8<64, CD8VT1>;
4159 def : Pat<(f32 (sint_to_fp (loadi32 addr:$src))),
4160 (VCVTSI2SSZrm (f32 (IMPLICIT_DEF)), addr:$src)>;
4161 def : Pat<(f32 (sint_to_fp (loadi64 addr:$src))),
4162 (VCVTSI642SSZrm (f32 (IMPLICIT_DEF)), addr:$src)>;
4163 def : Pat<(f64 (sint_to_fp (loadi32 addr:$src))),
4164 (VCVTSI2SDZrm (f64 (IMPLICIT_DEF)), addr:$src)>;
4165 def : Pat<(f64 (sint_to_fp (loadi64 addr:$src))),
4166 (VCVTSI642SDZrm (f64 (IMPLICIT_DEF)), addr:$src)>;
4168 def : Pat<(f32 (sint_to_fp GR32:$src)),
4169 (VCVTSI2SSZrr (f32 (IMPLICIT_DEF)), GR32:$src)>;
4170 def : Pat<(f32 (sint_to_fp GR64:$src)),
4171 (VCVTSI642SSZrr (f32 (IMPLICIT_DEF)), GR64:$src)>;
4172 def : Pat<(f64 (sint_to_fp GR32:$src)),
4173 (VCVTSI2SDZrr (f64 (IMPLICIT_DEF)), GR32:$src)>;
4174 def : Pat<(f64 (sint_to_fp GR64:$src)),
4175 (VCVTSI642SDZrr (f64 (IMPLICIT_DEF)), GR64:$src)>;
4177 defm VCVTUSI2SSZ : avx512_vcvtsi<0x7B, GR32, FR32X, i32mem, "cvtusi2ss{l}">,
4178 XS, VEX_LIG, EVEX_CD8<32, CD8VT1>;
4179 defm VCVTUSI642SSZ : avx512_vcvtsi<0x7B, GR64, FR32X, i64mem, "cvtusi2ss{q}">,
4180 XS, VEX_W, VEX_LIG, EVEX_CD8<64, CD8VT1>;
4181 defm VCVTUSI2SDZ : avx512_vcvtsi<0x7B, GR32, FR64X, i32mem, "cvtusi2sd{l}">,
4182 XD, VEX_LIG, EVEX_CD8<32, CD8VT1>;
4183 defm VCVTUSI642SDZ : avx512_vcvtsi<0x7B, GR64, FR64X, i64mem, "cvtusi2sd{q}">,
4184 XD, VEX_W, VEX_LIG, EVEX_CD8<64, CD8VT1>;
4186 def : Pat<(f32 (uint_to_fp (loadi32 addr:$src))),
4187 (VCVTUSI2SSZrm (f32 (IMPLICIT_DEF)), addr:$src)>;
4188 def : Pat<(f32 (uint_to_fp (loadi64 addr:$src))),
4189 (VCVTUSI642SSZrm (f32 (IMPLICIT_DEF)), addr:$src)>;
4190 def : Pat<(f64 (uint_to_fp (loadi32 addr:$src))),
4191 (VCVTUSI2SDZrm (f64 (IMPLICIT_DEF)), addr:$src)>;
4192 def : Pat<(f64 (uint_to_fp (loadi64 addr:$src))),
4193 (VCVTUSI642SDZrm (f64 (IMPLICIT_DEF)), addr:$src)>;
4195 def : Pat<(f32 (uint_to_fp GR32:$src)),
4196 (VCVTUSI2SSZrr (f32 (IMPLICIT_DEF)), GR32:$src)>;
4197 def : Pat<(f32 (uint_to_fp GR64:$src)),
4198 (VCVTUSI642SSZrr (f32 (IMPLICIT_DEF)), GR64:$src)>;
4199 def : Pat<(f64 (uint_to_fp GR32:$src)),
4200 (VCVTUSI2SDZrr (f64 (IMPLICIT_DEF)), GR32:$src)>;
4201 def : Pat<(f64 (uint_to_fp GR64:$src)),
4202 (VCVTUSI642SDZrr (f64 (IMPLICIT_DEF)), GR64:$src)>;
4205 //===----------------------------------------------------------------------===//
4206 // AVX-512 Scalar convert from float/double to integer
4207 //===----------------------------------------------------------------------===//
4208 multiclass avx512_cvt_s_int<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
4209 Intrinsic Int, Operand memop, ComplexPattern mem_cpat,
4211 let hasSideEffects = 0 in {
4212 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
4213 !strconcat(asm,"\t{$src, $dst|$dst, $src}"),
4214 [(set DstRC:$dst, (Int SrcRC:$src))]>, EVEX, VEX_LIG,
4215 Requires<[HasAVX512]>;
4217 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins memop:$src),
4218 !strconcat(asm,"\t{$src, $dst|$dst, $src}"), []>, EVEX, VEX_LIG,
4219 Requires<[HasAVX512]>;
4220 } // hasSideEffects = 0
4222 let Predicates = [HasAVX512] in {
4223 // Convert float/double to signed/unsigned int 32/64
4224 defm VCVTSS2SIZ: avx512_cvt_s_int<0x2D, VR128X, GR32, int_x86_sse_cvtss2si,
4225 ssmem, sse_load_f32, "cvtss2si">,
4226 XS, EVEX_CD8<32, CD8VT1>;
4227 defm VCVTSS2SI64Z: avx512_cvt_s_int<0x2D, VR128X, GR64, int_x86_sse_cvtss2si64,
4228 ssmem, sse_load_f32, "cvtss2si">,
4229 XS, VEX_W, EVEX_CD8<32, CD8VT1>;
4230 defm VCVTSS2USIZ: avx512_cvt_s_int<0x79, VR128X, GR32, int_x86_avx512_cvtss2usi,
4231 ssmem, sse_load_f32, "cvtss2usi">,
4232 XS, EVEX_CD8<32, CD8VT1>;
4233 defm VCVTSS2USI64Z: avx512_cvt_s_int<0x79, VR128X, GR64,
4234 int_x86_avx512_cvtss2usi64, ssmem,
4235 sse_load_f32, "cvtss2usi">, XS, VEX_W,
4236 EVEX_CD8<32, CD8VT1>;
4237 defm VCVTSD2SIZ: avx512_cvt_s_int<0x2D, VR128X, GR32, int_x86_sse2_cvtsd2si,
4238 sdmem, sse_load_f64, "cvtsd2si">,
4239 XD, EVEX_CD8<64, CD8VT1>;
4240 defm VCVTSD2SI64Z: avx512_cvt_s_int<0x2D, VR128X, GR64, int_x86_sse2_cvtsd2si64,
4241 sdmem, sse_load_f64, "cvtsd2si">,
4242 XD, VEX_W, EVEX_CD8<64, CD8VT1>;
4243 defm VCVTSD2USIZ: avx512_cvt_s_int<0x79, VR128X, GR32, int_x86_avx512_cvtsd2usi,
4244 sdmem, sse_load_f64, "cvtsd2usi">,
4245 XD, EVEX_CD8<64, CD8VT1>;
4246 defm VCVTSD2USI64Z: avx512_cvt_s_int<0x79, VR128X, GR64,
4247 int_x86_avx512_cvtsd2usi64, sdmem,
4248 sse_load_f64, "cvtsd2usi">, XD, VEX_W,
4249 EVEX_CD8<64, CD8VT1>;
4251 let isCodeGenOnly = 1 in {
4252 defm Int_VCVTSI2SSZ : sse12_cvt_sint_3addr<0x2A, GR32, VR128X,
4253 int_x86_sse_cvtsi2ss, i32mem, loadi32, "cvtsi2ss{l}",
4254 SSE_CVT_Scalar, 0>, XS, EVEX_4V;
4255 defm Int_VCVTSI2SS64Z : sse12_cvt_sint_3addr<0x2A, GR64, VR128X,
4256 int_x86_sse_cvtsi642ss, i64mem, loadi64, "cvtsi2ss{q}",
4257 SSE_CVT_Scalar, 0>, XS, EVEX_4V, VEX_W;
4258 defm Int_VCVTSI2SDZ : sse12_cvt_sint_3addr<0x2A, GR32, VR128X,
4259 int_x86_sse2_cvtsi2sd, i32mem, loadi32, "cvtsi2sd{l}",
4260 SSE_CVT_Scalar, 0>, XD, EVEX_4V;
4261 defm Int_VCVTSI2SD64Z : sse12_cvt_sint_3addr<0x2A, GR64, VR128X,
4262 int_x86_sse2_cvtsi642sd, i64mem, loadi64, "cvtsi2sd{q}",
4263 SSE_CVT_Scalar, 0>, XD, EVEX_4V, VEX_W;
4265 defm Int_VCVTUSI2SSZ : sse12_cvt_sint_3addr<0x2A, GR32, VR128X,
4266 int_x86_avx512_cvtusi2ss, i32mem, loadi32, "cvtusi2ss{l}",
4267 SSE_CVT_Scalar, 0>, XS, EVEX_4V;
4268 defm Int_VCVTUSI2SS64Z : sse12_cvt_sint_3addr<0x2A, GR64, VR128X,
4269 int_x86_avx512_cvtusi642ss, i64mem, loadi64, "cvtusi2ss{q}",
4270 SSE_CVT_Scalar, 0>, XS, EVEX_4V, VEX_W;
4271 defm Int_VCVTUSI2SDZ : sse12_cvt_sint_3addr<0x2A, GR32, VR128X,
4272 int_x86_avx512_cvtusi2sd, i32mem, loadi32, "cvtusi2sd{l}",
4273 SSE_CVT_Scalar, 0>, XD, EVEX_4V;
4274 defm Int_VCVTUSI2SD64Z : sse12_cvt_sint_3addr<0x2A, GR64, VR128X,
4275 int_x86_avx512_cvtusi642sd, i64mem, loadi64, "cvtusi2sd{q}",
4276 SSE_CVT_Scalar, 0>, XD, EVEX_4V, VEX_W;
4277 } // isCodeGenOnly = 1
4279 // Convert float/double to signed/unsigned int 32/64 with truncation
4280 let isCodeGenOnly = 1 in {
4281 defm Int_VCVTTSS2SIZ : avx512_cvt_s_int<0x2C, VR128X, GR32, int_x86_sse_cvttss2si,
4282 ssmem, sse_load_f32, "cvttss2si">,
4283 XS, EVEX_CD8<32, CD8VT1>;
4284 defm Int_VCVTTSS2SI64Z : avx512_cvt_s_int<0x2C, VR128X, GR64,
4285 int_x86_sse_cvttss2si64, ssmem, sse_load_f32,
4286 "cvttss2si">, XS, VEX_W,
4287 EVEX_CD8<32, CD8VT1>;
4288 defm Int_VCVTTSD2SIZ : avx512_cvt_s_int<0x2C, VR128X, GR32, int_x86_sse2_cvttsd2si,
4289 sdmem, sse_load_f64, "cvttsd2si">, XD,
4290 EVEX_CD8<64, CD8VT1>;
4291 defm Int_VCVTTSD2SI64Z : avx512_cvt_s_int<0x2C, VR128X, GR64,
4292 int_x86_sse2_cvttsd2si64, sdmem, sse_load_f64,
4293 "cvttsd2si">, XD, VEX_W,
4294 EVEX_CD8<64, CD8VT1>;
4295 defm Int_VCVTTSS2USIZ : avx512_cvt_s_int<0x78, VR128X, GR32,
4296 int_x86_avx512_cvttss2usi, ssmem, sse_load_f32,
4297 "cvttss2usi">, XS, EVEX_CD8<32, CD8VT1>;
4298 defm Int_VCVTTSS2USI64Z : avx512_cvt_s_int<0x78, VR128X, GR64,
4299 int_x86_avx512_cvttss2usi64, ssmem,
4300 sse_load_f32, "cvttss2usi">, XS, VEX_W,
4301 EVEX_CD8<32, CD8VT1>;
4302 defm Int_VCVTTSD2USIZ : avx512_cvt_s_int<0x78, VR128X, GR32,
4303 int_x86_avx512_cvttsd2usi,
4304 sdmem, sse_load_f64, "cvttsd2usi">, XD,
4305 EVEX_CD8<64, CD8VT1>;
4306 defm Int_VCVTTSD2USI64Z : avx512_cvt_s_int<0x78, VR128X, GR64,
4307 int_x86_avx512_cvttsd2usi64, sdmem,
4308 sse_load_f64, "cvttsd2usi">, XD, VEX_W,
4309 EVEX_CD8<64, CD8VT1>;
4310 } // isCodeGenOnly = 1
4312 multiclass avx512_cvt_s<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
4313 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
4315 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
4316 !strconcat(asm,"\t{$src, $dst|$dst, $src}"),
4317 [(set DstRC:$dst, (OpNode SrcRC:$src))]>, EVEX;
4318 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
4319 !strconcat(asm,"\t{$src, $dst|$dst, $src}"),
4320 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))]>, EVEX;
4323 defm VCVTTSS2SIZ : avx512_cvt_s<0x2C, FR32X, GR32, fp_to_sint, f32mem,
4324 loadf32, "cvttss2si">, XS,
4325 EVEX_CD8<32, CD8VT1>;
4326 defm VCVTTSS2USIZ : avx512_cvt_s<0x78, FR32X, GR32, fp_to_uint, f32mem,
4327 loadf32, "cvttss2usi">, XS,
4328 EVEX_CD8<32, CD8VT1>;
4329 defm VCVTTSS2SI64Z : avx512_cvt_s<0x2C, FR32X, GR64, fp_to_sint, f32mem,
4330 loadf32, "cvttss2si">, XS, VEX_W,
4331 EVEX_CD8<32, CD8VT1>;
4332 defm VCVTTSS2USI64Z : avx512_cvt_s<0x78, FR32X, GR64, fp_to_uint, f32mem,
4333 loadf32, "cvttss2usi">, XS, VEX_W,
4334 EVEX_CD8<32, CD8VT1>;
4335 defm VCVTTSD2SIZ : avx512_cvt_s<0x2C, FR64X, GR32, fp_to_sint, f64mem,
4336 loadf64, "cvttsd2si">, XD,
4337 EVEX_CD8<64, CD8VT1>;
4338 defm VCVTTSD2USIZ : avx512_cvt_s<0x78, FR64X, GR32, fp_to_uint, f64mem,
4339 loadf64, "cvttsd2usi">, XD,
4340 EVEX_CD8<64, CD8VT1>;
4341 defm VCVTTSD2SI64Z : avx512_cvt_s<0x2C, FR64X, GR64, fp_to_sint, f64mem,
4342 loadf64, "cvttsd2si">, XD, VEX_W,
4343 EVEX_CD8<64, CD8VT1>;
4344 defm VCVTTSD2USI64Z : avx512_cvt_s<0x78, FR64X, GR64, fp_to_uint, f64mem,
4345 loadf64, "cvttsd2usi">, XD, VEX_W,
4346 EVEX_CD8<64, CD8VT1>;
4348 //===----------------------------------------------------------------------===//
4349 // AVX-512 Convert form float to double and back
4350 //===----------------------------------------------------------------------===//
4351 let hasSideEffects = 0 in {
4352 def VCVTSS2SDZrr : AVX512XSI<0x5A, MRMSrcReg, (outs FR64X:$dst),
4353 (ins FR32X:$src1, FR32X:$src2),
4354 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4355 []>, EVEX_4V, VEX_LIG, Sched<[WriteCvtF2F]>;
4357 def VCVTSS2SDZrm : AVX512XSI<0x5A, MRMSrcMem, (outs FR64X:$dst),
4358 (ins FR32X:$src1, f32mem:$src2),
4359 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4360 []>, EVEX_4V, VEX_LIG, Sched<[WriteCvtF2FLd, ReadAfterLd]>,
4361 EVEX_CD8<32, CD8VT1>;
4363 // Convert scalar double to scalar single
4364 def VCVTSD2SSZrr : AVX512XDI<0x5A, MRMSrcReg, (outs FR32X:$dst),
4365 (ins FR64X:$src1, FR64X:$src2),
4366 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4367 []>, EVEX_4V, VEX_LIG, VEX_W, Sched<[WriteCvtF2F]>;
4369 def VCVTSD2SSZrm : AVX512XDI<0x5A, MRMSrcMem, (outs FR32X:$dst),
4370 (ins FR64X:$src1, f64mem:$src2),
4371 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4372 []>, EVEX_4V, VEX_LIG, VEX_W,
4373 Sched<[WriteCvtF2FLd, ReadAfterLd]>, EVEX_CD8<64, CD8VT1>;
4376 def : Pat<(f64 (fextend FR32X:$src)), (VCVTSS2SDZrr FR32X:$src, FR32X:$src)>,
4377 Requires<[HasAVX512]>;
4378 def : Pat<(fextend (loadf32 addr:$src)),
4379 (VCVTSS2SDZrm (f32 (IMPLICIT_DEF)), addr:$src)>, Requires<[HasAVX512]>;
4381 def : Pat<(extloadf32 addr:$src),
4382 (VCVTSS2SDZrm (f32 (IMPLICIT_DEF)), addr:$src)>,
4383 Requires<[HasAVX512, OptForSize]>;
4385 def : Pat<(extloadf32 addr:$src),
4386 (VCVTSS2SDZrr (f32 (IMPLICIT_DEF)), (VMOVSSZrm addr:$src))>,
4387 Requires<[HasAVX512, OptForSpeed]>;
4389 def : Pat<(f32 (fround FR64X:$src)), (VCVTSD2SSZrr FR64X:$src, FR64X:$src)>,
4390 Requires<[HasAVX512]>;
4392 multiclass avx512_vcvt_fp_with_rc<bits<8> opc, string asm, RegisterClass SrcRC,
4393 RegisterClass DstRC, SDNode OpNode, PatFrag mem_frag,
4394 X86MemOperand x86memop, ValueType OpVT, ValueType InVT,
4396 let hasSideEffects = 0 in {
4397 def rr : AVX512PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
4398 !strconcat(asm,"\t{$src, $dst|$dst, $src}"),
4400 (OpVT (OpNode (InVT SrcRC:$src))))], d>, EVEX;
4401 def rrb : AVX512PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src, AVX512RC:$rc),
4402 !strconcat(asm,"\t{$rc, $src, $dst|$dst, $src, $rc}"),
4403 [], d>, EVEX, EVEX_B, EVEX_RC;
4405 def rm : AVX512PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
4406 !strconcat(asm,"\t{$src, $dst|$dst, $src}"),
4408 (OpVT (OpNode (InVT (bitconvert (mem_frag addr:$src))))))], d>, EVEX;
4409 } // hasSideEffects = 0
4412 multiclass avx512_vcvt_fp<bits<8> opc, string asm, RegisterClass SrcRC,
4413 RegisterClass DstRC, SDNode OpNode, PatFrag mem_frag,
4414 X86MemOperand x86memop, ValueType OpVT, ValueType InVT,
4416 let hasSideEffects = 0 in {
4417 def rr : AVX512PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
4418 !strconcat(asm,"\t{$src, $dst|$dst, $src}"),
4420 (OpVT (OpNode (InVT SrcRC:$src))))], d>, EVEX;
4422 def rm : AVX512PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
4423 !strconcat(asm,"\t{$src, $dst|$dst, $src}"),
4425 (OpVT (OpNode (InVT (bitconvert (mem_frag addr:$src))))))], d>, EVEX;
4426 } // hasSideEffects = 0
4429 defm VCVTPD2PSZ : avx512_vcvt_fp_with_rc<0x5A, "vcvtpd2ps", VR512, VR256X, fround,
4430 loadv8f64, f512mem, v8f32, v8f64,
4431 SSEPackedSingle>, EVEX_V512, VEX_W, PD,
4432 EVEX_CD8<64, CD8VF>;
4434 defm VCVTPS2PDZ : avx512_vcvt_fp<0x5A, "vcvtps2pd", VR256X, VR512, fextend,
4435 loadv4f64, f256mem, v8f64, v8f32,
4436 SSEPackedDouble>, EVEX_V512, PS,
4437 EVEX_CD8<32, CD8VH>;
4438 def : Pat<(v8f64 (extloadv8f32 addr:$src)),
4439 (VCVTPS2PDZrm addr:$src)>;
4441 def : Pat<(v8f32 (int_x86_avx512_mask_cvtpd2ps_512 (v8f64 VR512:$src),
4442 (bc_v8f32(v8i32 immAllZerosV)), (i8 -1), (i32 FROUND_CURRENT))),
4443 (VCVTPD2PSZrr VR512:$src)>;
4445 def : Pat<(v8f32 (int_x86_avx512_mask_cvtpd2ps_512 (v8f64 VR512:$src),
4446 (bc_v8f32(v8i32 immAllZerosV)), (i8 -1), imm:$rc)),
4447 (VCVTPD2PSZrrb VR512:$src, imm:$rc)>;
4449 //===----------------------------------------------------------------------===//
4450 // AVX-512 Vector convert from sign integer to float/double
4451 //===----------------------------------------------------------------------===//
4453 defm VCVTDQ2PSZ : avx512_vcvt_fp_with_rc<0x5B, "vcvtdq2ps", VR512, VR512, sint_to_fp,
4454 loadv8i64, i512mem, v16f32, v16i32,
4455 SSEPackedSingle>, EVEX_V512, PS,
4456 EVEX_CD8<32, CD8VF>;
4458 defm VCVTDQ2PDZ : avx512_vcvt_fp<0xE6, "vcvtdq2pd", VR256X, VR512, sint_to_fp,
4459 loadv4i64, i256mem, v8f64, v8i32,
4460 SSEPackedDouble>, EVEX_V512, XS,
4461 EVEX_CD8<32, CD8VH>;
4463 defm VCVTTPS2DQZ : avx512_vcvt_fp<0x5B, "vcvttps2dq", VR512, VR512, fp_to_sint,
4464 loadv16f32, f512mem, v16i32, v16f32,
4465 SSEPackedSingle>, EVEX_V512, XS,
4466 EVEX_CD8<32, CD8VF>;
4468 defm VCVTTPD2DQZ : avx512_vcvt_fp<0xE6, "vcvttpd2dq", VR512, VR256X, fp_to_sint,
4469 loadv8f64, f512mem, v8i32, v8f64,
4470 SSEPackedDouble>, EVEX_V512, PD, VEX_W,
4471 EVEX_CD8<64, CD8VF>;
4473 defm VCVTTPS2UDQZ : avx512_vcvt_fp<0x78, "vcvttps2udq", VR512, VR512, fp_to_uint,
4474 loadv16f32, f512mem, v16i32, v16f32,
4475 SSEPackedSingle>, EVEX_V512, PS,
4476 EVEX_CD8<32, CD8VF>;
4478 // cvttps2udq (src, 0, mask-all-ones, sae-current)
4479 def : Pat<(v16i32 (int_x86_avx512_mask_cvttps2udq_512 (v16f32 VR512:$src),
4480 (v16i32 immAllZerosV), (i16 -1), FROUND_CURRENT)),
4481 (VCVTTPS2UDQZrr VR512:$src)>;
4483 defm VCVTTPD2UDQZ : avx512_vcvt_fp<0x78, "vcvttpd2udq", VR512, VR256X, fp_to_uint,
4484 loadv8f64, f512mem, v8i32, v8f64,
4485 SSEPackedDouble>, EVEX_V512, PS, VEX_W,
4486 EVEX_CD8<64, CD8VF>;
4488 // cvttpd2udq (src, 0, mask-all-ones, sae-current)
4489 def : Pat<(v8i32 (int_x86_avx512_mask_cvttpd2udq_512 (v8f64 VR512:$src),
4490 (v8i32 immAllZerosV), (i8 -1), FROUND_CURRENT)),
4491 (VCVTTPD2UDQZrr VR512:$src)>;
4493 defm VCVTUDQ2PDZ : avx512_vcvt_fp<0x7A, "vcvtudq2pd", VR256X, VR512, uint_to_fp,
4494 loadv4i64, f256mem, v8f64, v8i32,
4495 SSEPackedDouble>, EVEX_V512, XS,
4496 EVEX_CD8<32, CD8VH>;
4498 defm VCVTUDQ2PSZ : avx512_vcvt_fp_with_rc<0x7A, "vcvtudq2ps", VR512, VR512, uint_to_fp,
4499 loadv16i32, f512mem, v16f32, v16i32,
4500 SSEPackedSingle>, EVEX_V512, XD,
4501 EVEX_CD8<32, CD8VF>;
4503 def : Pat<(v8i32 (fp_to_uint (v8f32 VR256X:$src1))),
4504 (EXTRACT_SUBREG (v16i32 (VCVTTPS2UDQZrr
4505 (v16f32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)))), sub_ymm)>;
4507 def : Pat<(v4i32 (fp_to_uint (v4f32 VR128X:$src1))),
4508 (EXTRACT_SUBREG (v16i32 (VCVTTPS2UDQZrr
4509 (v16f32 (SUBREG_TO_REG (i32 0), VR128X:$src1, sub_xmm)))), sub_xmm)>;
4511 def : Pat<(v8f32 (uint_to_fp (v8i32 VR256X:$src1))),
4512 (EXTRACT_SUBREG (v16f32 (VCVTUDQ2PSZrr
4513 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)))), sub_ymm)>;
4515 def : Pat<(v4f32 (uint_to_fp (v4i32 VR128X:$src1))),
4516 (EXTRACT_SUBREG (v16f32 (VCVTUDQ2PSZrr
4517 (v16i32 (SUBREG_TO_REG (i32 0), VR128X:$src1, sub_xmm)))), sub_xmm)>;
4519 def : Pat<(v4f64 (uint_to_fp (v4i32 VR128X:$src1))),
4520 (EXTRACT_SUBREG (v8f64 (VCVTUDQ2PDZrr
4521 (v8i32 (SUBREG_TO_REG (i32 0), VR128X:$src1, sub_xmm)))), sub_ymm)>;
4523 def : Pat<(v16f32 (int_x86_avx512_mask_cvtdq2ps_512 (v16i32 VR512:$src),
4524 (bc_v16f32 (v16i32 immAllZerosV)), (i16 -1), imm:$rc)),
4525 (VCVTDQ2PSZrrb VR512:$src, imm:$rc)>;
4526 def : Pat<(v8f64 (int_x86_avx512_mask_cvtdq2pd_512 (v8i32 VR256X:$src),
4527 (bc_v8f64 (v16i32 immAllZerosV)), (i8 -1))),
4528 (VCVTDQ2PDZrr VR256X:$src)>;
4529 def : Pat<(v16f32 (int_x86_avx512_mask_cvtudq2ps_512 (v16i32 VR512:$src),
4530 (bc_v16f32 (v16i32 immAllZerosV)), (i16 -1), imm:$rc)),
4531 (VCVTUDQ2PSZrrb VR512:$src, imm:$rc)>;
4532 def : Pat<(v8f64 (int_x86_avx512_mask_cvtudq2pd_512 (v8i32 VR256X:$src),
4533 (bc_v8f64 (v16i32 immAllZerosV)), (i8 -1))),
4534 (VCVTUDQ2PDZrr VR256X:$src)>;
4536 multiclass avx512_vcvt_fp2int<bits<8> opc, string asm, RegisterClass SrcRC,
4537 RegisterClass DstRC, PatFrag mem_frag,
4538 X86MemOperand x86memop, Domain d> {
4539 let hasSideEffects = 0 in {
4540 def rr : AVX512PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
4541 !strconcat(asm,"\t{$src, $dst|$dst, $src}"),
4543 def rrb : AVX512PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src, AVX512RC:$rc),
4544 !strconcat(asm,"\t{$rc, $src, $dst|$dst, $src, $rc}"),
4545 [], d>, EVEX, EVEX_B, EVEX_RC;
4547 def rm : AVX512PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
4548 !strconcat(asm,"\t{$src, $dst|$dst, $src}"),
4550 } // hasSideEffects = 0
4553 defm VCVTPS2DQZ : avx512_vcvt_fp2int<0x5B, "vcvtps2dq", VR512, VR512,
4554 loadv16f32, f512mem, SSEPackedSingle>, PD,
4555 EVEX_V512, EVEX_CD8<32, CD8VF>;
4556 defm VCVTPD2DQZ : avx512_vcvt_fp2int<0xE6, "vcvtpd2dq", VR512, VR256X,
4557 loadv8f64, f512mem, SSEPackedDouble>, XD, VEX_W,
4558 EVEX_V512, EVEX_CD8<64, CD8VF>;
4560 def : Pat <(v16i32 (int_x86_avx512_mask_cvtps2dq_512 (v16f32 VR512:$src),
4561 (v16i32 immAllZerosV), (i16 -1), imm:$rc)),
4562 (VCVTPS2DQZrrb VR512:$src, imm:$rc)>;
4564 def : Pat <(v8i32 (int_x86_avx512_mask_cvtpd2dq_512 (v8f64 VR512:$src),
4565 (v8i32 immAllZerosV), (i8 -1), imm:$rc)),
4566 (VCVTPD2DQZrrb VR512:$src, imm:$rc)>;
4568 defm VCVTPS2UDQZ : avx512_vcvt_fp2int<0x79, "vcvtps2udq", VR512, VR512,
4569 loadv16f32, f512mem, SSEPackedSingle>,
4570 PS, EVEX_V512, EVEX_CD8<32, CD8VF>;
4571 defm VCVTPD2UDQZ : avx512_vcvt_fp2int<0x79, "vcvtpd2udq", VR512, VR256X,
4572 loadv8f64, f512mem, SSEPackedDouble>, VEX_W,
4573 PS, EVEX_V512, EVEX_CD8<64, CD8VF>;
4575 def : Pat <(v16i32 (int_x86_avx512_mask_cvtps2udq_512 (v16f32 VR512:$src),
4576 (v16i32 immAllZerosV), (i16 -1), imm:$rc)),
4577 (VCVTPS2UDQZrrb VR512:$src, imm:$rc)>;
4579 def : Pat <(v8i32 (int_x86_avx512_mask_cvtpd2udq_512 (v8f64 VR512:$src),
4580 (v8i32 immAllZerosV), (i8 -1), imm:$rc)),
4581 (VCVTPD2UDQZrrb VR512:$src, imm:$rc)>;
4583 let Predicates = [HasAVX512] in {
4584 def : Pat<(v8f32 (fround (loadv8f64 addr:$src))),
4585 (VCVTPD2PSZrm addr:$src)>;
4586 def : Pat<(v8f64 (extloadv8f32 addr:$src)),
4587 (VCVTPS2PDZrm addr:$src)>;
4590 //===----------------------------------------------------------------------===//
4591 // Half precision conversion instructions
4592 //===----------------------------------------------------------------------===//
4593 multiclass avx512_cvtph2ps<RegisterClass destRC, RegisterClass srcRC,
4594 X86MemOperand x86memop> {
4595 def rr : AVX5128I<0x13, MRMSrcReg, (outs destRC:$dst), (ins srcRC:$src),
4596 "vcvtph2ps\t{$src, $dst|$dst, $src}",
4598 let hasSideEffects = 0, mayLoad = 1 in
4599 def rm : AVX5128I<0x13, MRMSrcMem, (outs destRC:$dst), (ins x86memop:$src),
4600 "vcvtph2ps\t{$src, $dst|$dst, $src}", []>, EVEX;
4603 multiclass avx512_cvtps2ph<RegisterClass destRC, RegisterClass srcRC,
4604 X86MemOperand x86memop> {
4605 def rr : AVX512AIi8<0x1D, MRMDestReg, (outs destRC:$dst),
4606 (ins srcRC:$src1, i32u8imm:$src2),
4607 "vcvtps2ph\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4609 let hasSideEffects = 0, mayStore = 1 in
4610 def mr : AVX512AIi8<0x1D, MRMDestMem, (outs),
4611 (ins x86memop:$dst, srcRC:$src1, i32u8imm:$src2),
4612 "vcvtps2ph\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>, EVEX;
4615 defm VCVTPH2PSZ : avx512_cvtph2ps<VR512, VR256X, f256mem>, EVEX_V512,
4616 EVEX_CD8<32, CD8VH>;
4617 defm VCVTPS2PHZ : avx512_cvtps2ph<VR256X, VR512, f256mem>, EVEX_V512,
4618 EVEX_CD8<32, CD8VH>;
4620 def : Pat<(v16i16 (int_x86_avx512_mask_vcvtps2ph_512 (v16f32 VR512:$src),
4621 imm:$rc, (bc_v16i16(v8i32 immAllZerosV)), (i16 -1))),
4622 (VCVTPS2PHZrr VR512:$src, imm:$rc)>;
4624 def : Pat<(v16f32 (int_x86_avx512_mask_vcvtph2ps_512 (v16i16 VR256X:$src),
4625 (bc_v16f32(v16i32 immAllZerosV)), (i16 -1), (i32 FROUND_CURRENT))),
4626 (VCVTPH2PSZrr VR256X:$src)>;
4628 let Defs = [EFLAGS], Predicates = [HasAVX512] in {
4629 defm VUCOMISSZ : sse12_ord_cmp<0x2E, FR32X, X86cmp, f32, f32mem, loadf32,
4630 "ucomiss">, PS, EVEX, VEX_LIG,
4631 EVEX_CD8<32, CD8VT1>;
4632 defm VUCOMISDZ : sse12_ord_cmp<0x2E, FR64X, X86cmp, f64, f64mem, loadf64,
4633 "ucomisd">, PD, EVEX,
4634 VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
4635 let Pattern = []<dag> in {
4636 defm VCOMISSZ : sse12_ord_cmp<0x2F, VR128X, undef, v4f32, f128mem, load,
4637 "comiss">, PS, EVEX, VEX_LIG,
4638 EVEX_CD8<32, CD8VT1>;
4639 defm VCOMISDZ : sse12_ord_cmp<0x2F, VR128X, undef, v2f64, f128mem, load,
4640 "comisd">, PD, EVEX,
4641 VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
4643 let isCodeGenOnly = 1 in {
4644 defm Int_VUCOMISSZ : sse12_ord_cmp<0x2E, VR128X, X86ucomi, v4f32, f128mem,
4645 load, "ucomiss">, PS, EVEX, VEX_LIG,
4646 EVEX_CD8<32, CD8VT1>;
4647 defm Int_VUCOMISDZ : sse12_ord_cmp<0x2E, VR128X, X86ucomi, v2f64, f128mem,
4648 load, "ucomisd">, PD, EVEX,
4649 VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
4651 defm Int_VCOMISSZ : sse12_ord_cmp<0x2F, VR128X, X86comi, v4f32, f128mem,
4652 load, "comiss">, PS, EVEX, VEX_LIG,
4653 EVEX_CD8<32, CD8VT1>;
4654 defm Int_VCOMISDZ : sse12_ord_cmp<0x2F, VR128X, X86comi, v2f64, f128mem,
4655 load, "comisd">, PD, EVEX,
4656 VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
4660 /// avx512_fp14_s rcp14ss, rcp14sd, rsqrt14ss, rsqrt14sd
4661 multiclass avx512_fp14_s<bits<8> opc, string OpcodeStr, RegisterClass RC,
4662 X86MemOperand x86memop> {
4663 let hasSideEffects = 0 in {
4664 def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
4665 (ins RC:$src1, RC:$src2),
4666 !strconcat(OpcodeStr,
4667 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>, EVEX_4V;
4668 let mayLoad = 1 in {
4669 def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
4670 (ins RC:$src1, x86memop:$src2),
4671 !strconcat(OpcodeStr,
4672 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>, EVEX_4V;
4677 defm VRCP14SS : avx512_fp14_s<0x4D, "vrcp14ss", FR32X, f32mem>,
4678 EVEX_CD8<32, CD8VT1>;
4679 defm VRCP14SD : avx512_fp14_s<0x4D, "vrcp14sd", FR64X, f64mem>,
4680 VEX_W, EVEX_CD8<64, CD8VT1>;
4681 defm VRSQRT14SS : avx512_fp14_s<0x4F, "vrsqrt14ss", FR32X, f32mem>,
4682 EVEX_CD8<32, CD8VT1>;
4683 defm VRSQRT14SD : avx512_fp14_s<0x4F, "vrsqrt14sd", FR64X, f64mem>,
4684 VEX_W, EVEX_CD8<64, CD8VT1>;
4686 def : Pat <(v4f32 (int_x86_avx512_rcp14_ss (v4f32 VR128X:$src1),
4687 (v4f32 VR128X:$src2), (bc_v4f32 (v4i32 immAllZerosV)), (i8 -1))),
4688 (COPY_TO_REGCLASS (VRCP14SSrr (COPY_TO_REGCLASS VR128X:$src1, FR32X),
4689 (COPY_TO_REGCLASS VR128X:$src2, FR32X)), VR128X)>;
4691 def : Pat <(v2f64 (int_x86_avx512_rcp14_sd (v2f64 VR128X:$src1),
4692 (v2f64 VR128X:$src2), (bc_v2f64 (v4i32 immAllZerosV)), (i8 -1))),
4693 (COPY_TO_REGCLASS (VRCP14SDrr (COPY_TO_REGCLASS VR128X:$src1, FR64X),
4694 (COPY_TO_REGCLASS VR128X:$src2, FR64X)), VR128X)>;
4696 def : Pat <(v4f32 (int_x86_avx512_rsqrt14_ss (v4f32 VR128X:$src1),
4697 (v4f32 VR128X:$src2), (bc_v4f32 (v4i32 immAllZerosV)), (i8 -1))),
4698 (COPY_TO_REGCLASS (VRSQRT14SSrr (COPY_TO_REGCLASS VR128X:$src1, FR32X),
4699 (COPY_TO_REGCLASS VR128X:$src2, FR32X)), VR128X)>;
4701 def : Pat <(v2f64 (int_x86_avx512_rsqrt14_sd (v2f64 VR128X:$src1),
4702 (v2f64 VR128X:$src2), (bc_v2f64 (v4i32 immAllZerosV)), (i8 -1))),
4703 (COPY_TO_REGCLASS (VRSQRT14SDrr (COPY_TO_REGCLASS VR128X:$src1, FR64X),
4704 (COPY_TO_REGCLASS VR128X:$src2, FR64X)), VR128X)>;
4706 /// avx512_fp14_p rcp14ps, rcp14pd, rsqrt14ps, rsqrt14pd
4707 multiclass avx512_fp14_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
4708 X86VectorVTInfo _> {
4709 defm r: AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
4710 (ins _.RC:$src), OpcodeStr, "$src", "$src",
4711 (_.FloatVT (OpNode _.RC:$src))>, EVEX, T8PD;
4712 let mayLoad = 1 in {
4713 defm m: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
4714 (ins _.MemOp:$src), OpcodeStr, "$src", "$src",
4716 (bitconvert (_.LdFrag addr:$src))))>, EVEX, T8PD;
4717 defm mb: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
4718 (ins _.ScalarMemOp:$src), OpcodeStr,
4719 "${src}"##_.BroadcastStr, "${src}"##_.BroadcastStr,
4721 (X86VBroadcast (_.ScalarLdFrag addr:$src))))>,
4726 multiclass avx512_fp14_p_vl_all<bits<8> opc, string OpcodeStr, SDNode OpNode> {
4727 defm PSZ : avx512_fp14_p<opc, !strconcat(OpcodeStr, "ps"), OpNode, v16f32_info>,
4728 EVEX_V512, EVEX_CD8<32, CD8VF>;
4729 defm PDZ : avx512_fp14_p<opc, !strconcat(OpcodeStr, "pd"), OpNode, v8f64_info>,
4730 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
4732 // Define only if AVX512VL feature is present.
4733 let Predicates = [HasVLX] in {
4734 defm PSZ128 : avx512_fp14_p<opc, !strconcat(OpcodeStr, "ps"),
4735 OpNode, v4f32x_info>,
4736 EVEX_V128, EVEX_CD8<32, CD8VF>;
4737 defm PSZ256 : avx512_fp14_p<opc, !strconcat(OpcodeStr, "ps"),
4738 OpNode, v8f32x_info>,
4739 EVEX_V256, EVEX_CD8<32, CD8VF>;
4740 defm PDZ128 : avx512_fp14_p<opc, !strconcat(OpcodeStr, "pd"),
4741 OpNode, v2f64x_info>,
4742 EVEX_V128, VEX_W, EVEX_CD8<64, CD8VF>;
4743 defm PDZ256 : avx512_fp14_p<opc, !strconcat(OpcodeStr, "pd"),
4744 OpNode, v4f64x_info>,
4745 EVEX_V256, VEX_W, EVEX_CD8<64, CD8VF>;
4749 defm VRSQRT14 : avx512_fp14_p_vl_all<0x4E, "vrsqrt14", X86frsqrt>;
4750 defm VRCP14 : avx512_fp14_p_vl_all<0x4C, "vrcp14", X86frcp>;
4752 def : Pat <(v16f32 (int_x86_avx512_rsqrt14_ps_512 (v16f32 VR512:$src),
4753 (bc_v16f32 (v16i32 immAllZerosV)), (i16 -1))),
4754 (VRSQRT14PSZr VR512:$src)>;
4755 def : Pat <(v8f64 (int_x86_avx512_rsqrt14_pd_512 (v8f64 VR512:$src),
4756 (bc_v8f64 (v16i32 immAllZerosV)), (i8 -1))),
4757 (VRSQRT14PDZr VR512:$src)>;
4759 def : Pat <(v16f32 (int_x86_avx512_rcp14_ps_512 (v16f32 VR512:$src),
4760 (bc_v16f32 (v16i32 immAllZerosV)), (i16 -1))),
4761 (VRCP14PSZr VR512:$src)>;
4762 def : Pat <(v8f64 (int_x86_avx512_rcp14_pd_512 (v8f64 VR512:$src),
4763 (bc_v8f64 (v16i32 immAllZerosV)), (i8 -1))),
4764 (VRCP14PDZr VR512:$src)>;
4766 /// avx512_fp28_s rcp28ss, rcp28sd, rsqrt28ss, rsqrt28sd
4767 multiclass avx512_fp28_s<bits<8> opc, string OpcodeStr,X86VectorVTInfo _,
4770 defm r : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
4771 (ins _.RC:$src1, _.RC:$src2), OpcodeStr,
4772 "$src2, $src1", "$src1, $src2",
4773 (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2),
4774 (i32 FROUND_CURRENT))>;
4776 defm rb : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
4777 (ins _.RC:$src1, _.RC:$src2), OpcodeStr,
4778 "$src2, $src1", "$src1, $src2",
4779 (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2),
4780 (i32 FROUND_NO_EXC)), "{sae}">, EVEX_B;
4782 defm m : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
4783 (ins _.RC:$src1, _.MemOp:$src2), OpcodeStr,
4784 "$src2, $src1", "$src1, $src2",
4785 (OpNode (_.VT _.RC:$src1),
4786 (_.VT (scalar_to_vector (_.ScalarLdFrag addr:$src2))),
4787 (i32 FROUND_CURRENT))>;
4790 multiclass avx512_eri_s<bits<8> opc, string OpcodeStr, SDNode OpNode> {
4791 defm SS : avx512_fp28_s<opc, OpcodeStr#"ss", f32x_info, OpNode>,
4792 EVEX_CD8<32, CD8VT1>;
4793 defm SD : avx512_fp28_s<opc, OpcodeStr#"sd", f64x_info, OpNode>,
4794 EVEX_CD8<64, CD8VT1>, VEX_W;
4797 let hasSideEffects = 0, Predicates = [HasERI] in {
4798 defm VRCP28 : avx512_eri_s<0xCB, "vrcp28", X86rcp28s>, T8PD, EVEX_4V;
4799 defm VRSQRT28 : avx512_eri_s<0xCD, "vrsqrt28", X86rsqrt28s>, T8PD, EVEX_4V;
4801 /// avx512_fp28_p rcp28ps, rcp28pd, rsqrt28ps, rsqrt28pd
4803 multiclass avx512_fp28_p<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
4806 defm r : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
4807 (ins _.RC:$src), OpcodeStr, "$src", "$src",
4808 (OpNode (_.VT _.RC:$src), (i32 FROUND_CURRENT))>;
4810 defm rb : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
4811 (ins _.RC:$src), OpcodeStr,
4813 (OpNode (_.VT _.RC:$src), (i32 FROUND_NO_EXC)),
4816 defm m : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
4817 (ins _.MemOp:$src), OpcodeStr, "$src", "$src",
4819 (bitconvert (_.LdFrag addr:$src))),
4820 (i32 FROUND_CURRENT))>;
4822 defm mb : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
4823 (ins _.MemOp:$src), OpcodeStr, "$src", "$src",
4825 (X86VBroadcast (_.ScalarLdFrag addr:$src))),
4826 (i32 FROUND_CURRENT))>, EVEX_B;
4829 multiclass avx512_eri<bits<8> opc, string OpcodeStr, SDNode OpNode> {
4830 defm PS : avx512_fp28_p<opc, OpcodeStr#"ps", v16f32_info, OpNode>,
4831 EVEX_CD8<32, CD8VF>;
4832 defm PD : avx512_fp28_p<opc, OpcodeStr#"pd", v8f64_info, OpNode>,
4833 VEX_W, EVEX_CD8<32, CD8VF>;
4836 let Predicates = [HasERI], hasSideEffects = 0 in {
4838 defm VRSQRT28 : avx512_eri<0xCC, "vrsqrt28", X86rsqrt28>, EVEX, EVEX_V512, T8PD;
4839 defm VRCP28 : avx512_eri<0xCA, "vrcp28", X86rcp28>, EVEX, EVEX_V512, T8PD;
4840 defm VEXP2 : avx512_eri<0xC8, "vexp2", X86exp2>, EVEX, EVEX_V512, T8PD;
4843 multiclass avx512_sqrt_packed<bits<8> opc, string OpcodeStr,
4844 SDNode OpNode, X86VectorVTInfo _>{
4845 defm r: AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
4846 (ins _.RC:$src), OpcodeStr, "$src", "$src",
4847 (_.FloatVT (OpNode _.RC:$src))>, EVEX;
4848 let mayLoad = 1 in {
4849 defm m: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
4850 (ins _.MemOp:$src), OpcodeStr, "$src", "$src",
4852 (bitconvert (_.LdFrag addr:$src))))>, EVEX;
4854 defm mb: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
4855 (ins _.ScalarMemOp:$src), OpcodeStr,
4856 "${src}"##_.BroadcastStr, "${src}"##_.BroadcastStr,
4858 (X86VBroadcast (_.ScalarLdFrag addr:$src))))>,
4863 multiclass avx512_sqrt_scalar<bits<8> opc, string OpcodeStr,
4864 Intrinsic F32Int, Intrinsic F64Int,
4865 OpndItins itins_s, OpndItins itins_d> {
4866 def SSZr : SI<opc, MRMSrcReg, (outs FR32X:$dst),
4867 (ins FR32X:$src1, FR32X:$src2),
4868 !strconcat(OpcodeStr,
4869 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4870 [], itins_s.rr>, XS, EVEX_4V;
4871 let isCodeGenOnly = 1 in
4872 def SSZr_Int : SIi8<opc, MRMSrcReg, (outs VR128X:$dst),
4873 (ins VR128X:$src1, VR128X:$src2),
4874 !strconcat(OpcodeStr,
4875 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4877 (F32Int VR128X:$src1, VR128X:$src2))],
4878 itins_s.rr>, XS, EVEX_4V;
4879 let mayLoad = 1 in {
4880 def SSZm : SI<opc, MRMSrcMem, (outs FR32X:$dst),
4881 (ins FR32X:$src1, f32mem:$src2),
4882 !strconcat(OpcodeStr,
4883 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4884 [], itins_s.rm>, XS, EVEX_4V, EVEX_CD8<32, CD8VT1>;
4885 let isCodeGenOnly = 1 in
4886 def SSZm_Int : SIi8<opc, MRMSrcMem, (outs VR128X:$dst),
4887 (ins VR128X:$src1, ssmem:$src2),
4888 !strconcat(OpcodeStr,
4889 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4891 (F32Int VR128X:$src1, sse_load_f32:$src2))],
4892 itins_s.rm>, XS, EVEX_4V, EVEX_CD8<32, CD8VT1>;
4894 def SDZr : SI<opc, MRMSrcReg, (outs FR64X:$dst),
4895 (ins FR64X:$src1, FR64X:$src2),
4896 !strconcat(OpcodeStr,
4897 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>,
4899 let isCodeGenOnly = 1 in
4900 def SDZr_Int : SIi8<opc, MRMSrcReg, (outs VR128X:$dst),
4901 (ins VR128X:$src1, VR128X:$src2),
4902 !strconcat(OpcodeStr,
4903 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4905 (F64Int VR128X:$src1, VR128X:$src2))],
4906 itins_s.rr>, XD, EVEX_4V, VEX_W;
4907 let mayLoad = 1 in {
4908 def SDZm : SI<opc, MRMSrcMem, (outs FR64X:$dst),
4909 (ins FR64X:$src1, f64mem:$src2),
4910 !strconcat(OpcodeStr,
4911 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>,
4912 XD, EVEX_4V, VEX_W, EVEX_CD8<64, CD8VT1>;
4913 let isCodeGenOnly = 1 in
4914 def SDZm_Int : SIi8<opc, MRMSrcMem, (outs VR128X:$dst),
4915 (ins VR128X:$src1, sdmem:$src2),
4916 !strconcat(OpcodeStr,
4917 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4919 (F64Int VR128X:$src1, sse_load_f64:$src2))]>,
4920 XD, EVEX_4V, VEX_W, EVEX_CD8<64, CD8VT1>;
4924 multiclass avx512_sqrt_packed_all<bits<8> opc, string OpcodeStr,
4926 defm PSZ : avx512_sqrt_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
4928 EVEX_V512, PS, EVEX_CD8<32, CD8VF>;
4929 defm PDZ : avx512_sqrt_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
4931 EVEX_V512, VEX_W, PD, EVEX_CD8<64, CD8VF>;
4932 // Define only if AVX512VL feature is present.
4933 let Predicates = [HasVLX] in {
4934 defm PSZ128 : avx512_sqrt_packed<opc, !strconcat(OpcodeStr, "ps"),
4935 OpNode, v4f32x_info>,
4936 EVEX_V128, PS, EVEX_CD8<32, CD8VF>;
4937 defm PSZ256 : avx512_sqrt_packed<opc, !strconcat(OpcodeStr, "ps"),
4938 OpNode, v8f32x_info>,
4939 EVEX_V256, PS, EVEX_CD8<32, CD8VF>;
4940 defm PDZ128 : avx512_sqrt_packed<opc, !strconcat(OpcodeStr, "pd"),
4941 OpNode, v2f64x_info>,
4942 EVEX_V128, VEX_W, PD, EVEX_CD8<64, CD8VF>;
4943 defm PDZ256 : avx512_sqrt_packed<opc, !strconcat(OpcodeStr, "pd"),
4944 OpNode, v4f64x_info>,
4945 EVEX_V256, VEX_W, PD, EVEX_CD8<64, CD8VF>;
4949 defm VSQRT : avx512_sqrt_packed_all<0x51, "vsqrt", fsqrt>;
4951 defm VSQRT : avx512_sqrt_scalar<0x51, "sqrt",
4952 int_x86_avx512_sqrt_ss, int_x86_avx512_sqrt_sd,
4953 SSE_SQRTSS, SSE_SQRTSD>;
4955 let Predicates = [HasAVX512] in {
4956 def : Pat<(v16f32 (int_x86_avx512_sqrt_ps_512 (v16f32 VR512:$src1),
4957 (bc_v16f32 (v16i32 immAllZerosV)), (i16 -1), FROUND_CURRENT)),
4958 (VSQRTPSZr VR512:$src1)>;
4959 def : Pat<(v8f64 (int_x86_avx512_sqrt_pd_512 (v8f64 VR512:$src1),
4960 (bc_v8f64 (v16i32 immAllZerosV)), (i8 -1), FROUND_CURRENT)),
4961 (VSQRTPDZr VR512:$src1)>;
4963 def : Pat<(f32 (fsqrt FR32X:$src)),
4964 (VSQRTSSZr (f32 (IMPLICIT_DEF)), FR32X:$src)>;
4965 def : Pat<(f32 (fsqrt (load addr:$src))),
4966 (VSQRTSSZm (f32 (IMPLICIT_DEF)), addr:$src)>,
4967 Requires<[OptForSize]>;
4968 def : Pat<(f64 (fsqrt FR64X:$src)),
4969 (VSQRTSDZr (f64 (IMPLICIT_DEF)), FR64X:$src)>;
4970 def : Pat<(f64 (fsqrt (load addr:$src))),
4971 (VSQRTSDZm (f64 (IMPLICIT_DEF)), addr:$src)>,
4972 Requires<[OptForSize]>;
4974 def : Pat<(f32 (X86frsqrt FR32X:$src)),
4975 (VRSQRT14SSrr (f32 (IMPLICIT_DEF)), FR32X:$src)>;
4976 def : Pat<(f32 (X86frsqrt (load addr:$src))),
4977 (VRSQRT14SSrm (f32 (IMPLICIT_DEF)), addr:$src)>,
4978 Requires<[OptForSize]>;
4980 def : Pat<(f32 (X86frcp FR32X:$src)),
4981 (VRCP14SSrr (f32 (IMPLICIT_DEF)), FR32X:$src)>;
4982 def : Pat<(f32 (X86frcp (load addr:$src))),
4983 (VRCP14SSrm (f32 (IMPLICIT_DEF)), addr:$src)>,
4984 Requires<[OptForSize]>;
4986 def : Pat<(int_x86_sse_sqrt_ss VR128X:$src),
4987 (COPY_TO_REGCLASS (VSQRTSSZr (f32 (IMPLICIT_DEF)),
4988 (COPY_TO_REGCLASS VR128X:$src, FR32)),
4990 def : Pat<(int_x86_sse_sqrt_ss sse_load_f32:$src),
4991 (VSQRTSSZm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
4993 def : Pat<(int_x86_sse2_sqrt_sd VR128X:$src),
4994 (COPY_TO_REGCLASS (VSQRTSDZr (f64 (IMPLICIT_DEF)),
4995 (COPY_TO_REGCLASS VR128X:$src, FR64)),
4997 def : Pat<(int_x86_sse2_sqrt_sd sse_load_f64:$src),
4998 (VSQRTSDZm_Int (v2f64 (IMPLICIT_DEF)), sse_load_f64:$src)>;
5002 multiclass avx512_rndscale<bits<8> opc, string OpcodeStr,
5003 X86MemOperand x86memop, RegisterClass RC,
5004 PatFrag mem_frag, Domain d> {
5005 let ExeDomain = d in {
5006 // Intrinsic operation, reg.
5007 // Vector intrinsic operation, reg
5008 def r : AVX512AIi8<opc, MRMSrcReg,
5009 (outs RC:$dst), (ins RC:$src1, i32u8imm:$src2),
5010 !strconcat(OpcodeStr,
5011 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5014 // Vector intrinsic operation, mem
5015 def m : AVX512AIi8<opc, MRMSrcMem,
5016 (outs RC:$dst), (ins x86memop:$src1, i32u8imm:$src2),
5017 !strconcat(OpcodeStr,
5018 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5023 defm VRNDSCALEPSZ : avx512_rndscale<0x08, "vrndscaleps", f512mem, VR512,
5024 loadv16f32, SSEPackedSingle>, EVEX_V512,
5025 EVEX_CD8<32, CD8VF>;
5027 def : Pat<(v16f32 (int_x86_avx512_mask_rndscale_ps_512 (v16f32 VR512:$src1),
5028 imm:$src2, (v16f32 VR512:$src1), (i16 -1),
5030 (VRNDSCALEPSZr VR512:$src1, imm:$src2)>;
5033 defm VRNDSCALEPDZ : avx512_rndscale<0x09, "vrndscalepd", f512mem, VR512,
5034 loadv8f64, SSEPackedDouble>, EVEX_V512,
5035 VEX_W, EVEX_CD8<64, CD8VF>;
5037 def : Pat<(v8f64 (int_x86_avx512_mask_rndscale_pd_512 (v8f64 VR512:$src1),
5038 imm:$src2, (v8f64 VR512:$src1), (i8 -1),
5040 (VRNDSCALEPDZr VR512:$src1, imm:$src2)>;
5043 avx512_rndscale_scalar<bits<8> opc, string OpcodeStr, X86VectorVTInfo _> {
5045 let ExeDomain = _.ExeDomain in {
5046 defm r : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
5047 (ins _.RC:$src1, _.RC:$src2, i32u8imm:$src3), OpcodeStr,
5048 "$src3, $src2, $src1", "$src1, $src2, $src3",
5049 (_.VT (X86RndScale (_.VT _.RC:$src1), (_.VT _.RC:$src2),
5050 (i32 imm:$src3), (i32 FROUND_CURRENT)))>;
5052 defm rb : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
5053 (ins _.RC:$src1, _.RC:$src2, i32u8imm:$src3), OpcodeStr,
5054 "$src3, $src2, $src1", "$src1, $src2, $src3",
5055 (_.VT (X86RndScale (_.VT _.RC:$src1), (_.VT _.RC:$src2),
5056 (i32 imm:$src3), (i32 FROUND_NO_EXC))), "{sae}">, EVEX_B;
5059 defm m : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
5060 (ins _.RC:$src1, _.MemOp:$src2, i32u8imm:$src3), OpcodeStr,
5061 "$src3, $src2, $src1", "$src1, $src2, $src3",
5062 (_.VT (X86RndScale (_.VT _.RC:$src1),
5063 (_.VT (scalar_to_vector (_.ScalarLdFrag addr:$src2))),
5064 (i32 imm:$src3), (i32 FROUND_CURRENT)))>;
5066 let Predicates = [HasAVX512] in {
5067 def : Pat<(ffloor _.FRC:$src), (COPY_TO_REGCLASS
5068 (_.VT (!cast<Instruction>(NAME##r) (_.VT (IMPLICIT_DEF)),
5069 (_.VT (COPY_TO_REGCLASS _.FRC:$src, _.RC)), (i32 0x1))), _.FRC)>;
5070 def : Pat<(fceil _.FRC:$src), (COPY_TO_REGCLASS
5071 (_.VT (!cast<Instruction>(NAME##r) (_.VT (IMPLICIT_DEF)),
5072 (_.VT (COPY_TO_REGCLASS _.FRC:$src, _.RC)), (i32 0x2))), _.FRC)>;
5073 def : Pat<(ftrunc _.FRC:$src), (COPY_TO_REGCLASS
5074 (_.VT (!cast<Instruction>(NAME##r) (_.VT (IMPLICIT_DEF)),
5075 (_.VT (COPY_TO_REGCLASS _.FRC:$src, _.RC)), (i32 0x3))), _.FRC)>;
5076 def : Pat<(frint _.FRC:$src), (COPY_TO_REGCLASS
5077 (_.VT (!cast<Instruction>(NAME##r) (_.VT (IMPLICIT_DEF)),
5078 (_.VT (COPY_TO_REGCLASS _.FRC:$src, _.RC)), (i32 0x4))), _.FRC)>;
5079 def : Pat<(fnearbyint _.FRC:$src), (COPY_TO_REGCLASS
5080 (_.VT (!cast<Instruction>(NAME##r) (_.VT (IMPLICIT_DEF)),
5081 (_.VT (COPY_TO_REGCLASS _.FRC:$src, _.RC)), (i32 0xc))), _.FRC)>;
5083 def : Pat<(ffloor (_.ScalarLdFrag addr:$src)), (COPY_TO_REGCLASS
5084 (_.VT (!cast<Instruction>(NAME##m) (_.VT (IMPLICIT_DEF)),
5085 addr:$src, (i32 0x1))), _.FRC)>;
5086 def : Pat<(fceil (_.ScalarLdFrag addr:$src)), (COPY_TO_REGCLASS
5087 (_.VT (!cast<Instruction>(NAME##m) (_.VT (IMPLICIT_DEF)),
5088 addr:$src, (i32 0x2))), _.FRC)>;
5089 def : Pat<(ftrunc (_.ScalarLdFrag addr:$src)), (COPY_TO_REGCLASS
5090 (_.VT (!cast<Instruction>(NAME##m) (_.VT (IMPLICIT_DEF)),
5091 addr:$src, (i32 0x3))), _.FRC)>;
5092 def : Pat<(frint (_.ScalarLdFrag addr:$src)), (COPY_TO_REGCLASS
5093 (_.VT (!cast<Instruction>(NAME##m) (_.VT (IMPLICIT_DEF)),
5094 addr:$src, (i32 0x4))), _.FRC)>;
5095 def : Pat<(fnearbyint (_.ScalarLdFrag addr:$src)), (COPY_TO_REGCLASS
5096 (_.VT (!cast<Instruction>(NAME##m) (_.VT (IMPLICIT_DEF)),
5097 addr:$src, (i32 0xc))), _.FRC)>;
5101 defm VRNDSCALESS : avx512_rndscale_scalar<0x0A, "vrndscaless", f32x_info>,
5102 AVX512AIi8Base, EVEX_4V, EVEX_CD8<32, CD8VT1>;
5104 defm VRNDSCALESD : avx512_rndscale_scalar<0x0B, "vrndscalesd", f64x_info>, VEX_W,
5105 AVX512AIi8Base, EVEX_4V, EVEX_CD8<64, CD8VT1>;
5107 let Predicates = [HasAVX512] in {
5108 def : Pat<(v16f32 (ffloor VR512:$src)),
5109 (VRNDSCALEPSZr VR512:$src, (i32 0x1))>;
5110 def : Pat<(v16f32 (fnearbyint VR512:$src)),
5111 (VRNDSCALEPSZr VR512:$src, (i32 0xC))>;
5112 def : Pat<(v16f32 (fceil VR512:$src)),
5113 (VRNDSCALEPSZr VR512:$src, (i32 0x2))>;
5114 def : Pat<(v16f32 (frint VR512:$src)),
5115 (VRNDSCALEPSZr VR512:$src, (i32 0x4))>;
5116 def : Pat<(v16f32 (ftrunc VR512:$src)),
5117 (VRNDSCALEPSZr VR512:$src, (i32 0x3))>;
5119 def : Pat<(v8f64 (ffloor VR512:$src)),
5120 (VRNDSCALEPDZr VR512:$src, (i32 0x1))>;
5121 def : Pat<(v8f64 (fnearbyint VR512:$src)),
5122 (VRNDSCALEPDZr VR512:$src, (i32 0xC))>;
5123 def : Pat<(v8f64 (fceil VR512:$src)),
5124 (VRNDSCALEPDZr VR512:$src, (i32 0x2))>;
5125 def : Pat<(v8f64 (frint VR512:$src)),
5126 (VRNDSCALEPDZr VR512:$src, (i32 0x4))>;
5127 def : Pat<(v8f64 (ftrunc VR512:$src)),
5128 (VRNDSCALEPDZr VR512:$src, (i32 0x3))>;
5130 //-------------------------------------------------
5131 // Integer truncate and extend operations
5132 //-------------------------------------------------
5134 multiclass avx512_trunc_sat<bits<8> opc, string OpcodeStr,
5135 RegisterClass dstRC, RegisterClass srcRC,
5136 RegisterClass KRC, X86MemOperand x86memop> {
5137 def rr : AVX512XS8I<opc, MRMDestReg, (outs dstRC:$dst),
5139 !strconcat(OpcodeStr,"\t{$src, $dst|$dst, $src}"),
5142 def rrk : AVX512XS8I<opc, MRMDestReg, (outs dstRC:$dst),
5143 (ins KRC:$mask, srcRC:$src),
5144 !strconcat(OpcodeStr,
5145 "\t{$src, ${dst} {${mask}}|${dst} {${mask}}, $src}"),
5148 def rrkz : AVX512XS8I<opc, MRMDestReg, (outs dstRC:$dst),
5149 (ins KRC:$mask, srcRC:$src),
5150 !strconcat(OpcodeStr,
5151 "\t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
5154 def mr : AVX512XS8I<opc, MRMDestMem, (outs), (ins x86memop:$dst, srcRC:$src),
5155 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5158 def mrk : AVX512XS8I<opc, MRMDestMem, (outs),
5159 (ins x86memop:$dst, KRC:$mask, srcRC:$src),
5160 !strconcat(OpcodeStr, "\t{$src, $dst {${mask}}|${dst} {${mask}}, $src}"),
5164 defm VPMOVQB : avx512_trunc_sat<0x32, "vpmovqb", VR128X, VR512, VK8WM,
5165 i128mem>, EVEX_V512, EVEX_CD8<8, CD8VO>;
5166 defm VPMOVSQB : avx512_trunc_sat<0x22, "vpmovsqb", VR128X, VR512, VK8WM,
5167 i128mem>, EVEX_V512, EVEX_CD8<8, CD8VO>;
5168 defm VPMOVUSQB : avx512_trunc_sat<0x12, "vpmovusqb", VR128X, VR512, VK8WM,
5169 i128mem>, EVEX_V512, EVEX_CD8<8, CD8VO>;
5170 defm VPMOVQW : avx512_trunc_sat<0x34, "vpmovqw", VR128X, VR512, VK8WM,
5171 i128mem>, EVEX_V512, EVEX_CD8<16, CD8VQ>;
5172 defm VPMOVSQW : avx512_trunc_sat<0x24, "vpmovsqw", VR128X, VR512, VK8WM,
5173 i128mem>, EVEX_V512, EVEX_CD8<16, CD8VQ>;
5174 defm VPMOVUSQW : avx512_trunc_sat<0x14, "vpmovusqw", VR128X, VR512, VK8WM,
5175 i128mem>, EVEX_V512, EVEX_CD8<16, CD8VQ>;
5176 defm VPMOVQD : avx512_trunc_sat<0x35, "vpmovqd", VR256X, VR512, VK8WM,
5177 i256mem>, EVEX_V512, EVEX_CD8<32, CD8VH>;
5178 defm VPMOVSQD : avx512_trunc_sat<0x25, "vpmovsqd", VR256X, VR512, VK8WM,
5179 i256mem>, EVEX_V512, EVEX_CD8<32, CD8VH>;
5180 defm VPMOVUSQD : avx512_trunc_sat<0x15, "vpmovusqd", VR256X, VR512, VK8WM,
5181 i256mem>, EVEX_V512, EVEX_CD8<32, CD8VH>;
5182 defm VPMOVDW : avx512_trunc_sat<0x33, "vpmovdw", VR256X, VR512, VK16WM,
5183 i256mem>, EVEX_V512, EVEX_CD8<16, CD8VH>;
5184 defm VPMOVSDW : avx512_trunc_sat<0x23, "vpmovsdw", VR256X, VR512, VK16WM,
5185 i256mem>, EVEX_V512, EVEX_CD8<16, CD8VH>;
5186 defm VPMOVUSDW : avx512_trunc_sat<0x13, "vpmovusdw", VR256X, VR512, VK16WM,
5187 i256mem>, EVEX_V512, EVEX_CD8<16, CD8VH>;
5188 defm VPMOVDB : avx512_trunc_sat<0x31, "vpmovdb", VR128X, VR512, VK16WM,
5189 i128mem>, EVEX_V512, EVEX_CD8<8, CD8VQ>;
5190 defm VPMOVSDB : avx512_trunc_sat<0x21, "vpmovsdb", VR128X, VR512, VK16WM,
5191 i128mem>, EVEX_V512, EVEX_CD8<8, CD8VQ>;
5192 defm VPMOVUSDB : avx512_trunc_sat<0x11, "vpmovusdb", VR128X, VR512, VK16WM,
5193 i128mem>, EVEX_V512, EVEX_CD8<8, CD8VQ>;
5195 def : Pat<(v16i8 (X86vtrunc (v8i64 VR512:$src))), (VPMOVQBrr VR512:$src)>;
5196 def : Pat<(v8i16 (X86vtrunc (v8i64 VR512:$src))), (VPMOVQWrr VR512:$src)>;
5197 def : Pat<(v16i16 (X86vtrunc (v16i32 VR512:$src))), (VPMOVDWrr VR512:$src)>;
5198 def : Pat<(v16i8 (X86vtrunc (v16i32 VR512:$src))), (VPMOVDBrr VR512:$src)>;
5199 def : Pat<(v8i32 (X86vtrunc (v8i64 VR512:$src))), (VPMOVQDrr VR512:$src)>;
5201 def : Pat<(v16i8 (X86vtruncm VK16WM:$mask, (v16i32 VR512:$src))),
5202 (VPMOVDBrrkz VK16WM:$mask, VR512:$src)>;
5203 def : Pat<(v16i16 (X86vtruncm VK16WM:$mask, (v16i32 VR512:$src))),
5204 (VPMOVDWrrkz VK16WM:$mask, VR512:$src)>;
5205 def : Pat<(v8i16 (X86vtruncm VK8WM:$mask, (v8i64 VR512:$src))),
5206 (VPMOVQWrrkz VK8WM:$mask, VR512:$src)>;
5207 def : Pat<(v8i32 (X86vtruncm VK8WM:$mask, (v8i64 VR512:$src))),
5208 (VPMOVQDrrkz VK8WM:$mask, VR512:$src)>;
5211 multiclass avx512_extend<bits<8> opc, string OpcodeStr, RegisterClass KRC,
5212 RegisterClass DstRC, RegisterClass SrcRC, SDNode OpNode,
5213 PatFrag mem_frag, X86MemOperand x86memop,
5214 ValueType OpVT, ValueType InVT> {
5216 def rr : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst),
5218 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5219 [(set DstRC:$dst, (OpVT (OpNode (InVT SrcRC:$src))))]>, EVEX;
5221 def rrk : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst),
5222 (ins KRC:$mask, SrcRC:$src),
5223 !strconcat(OpcodeStr, "\t{$src, $dst {${mask}} |$dst {${mask}}, $src}"),
5226 def rrkz : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst),
5227 (ins KRC:$mask, SrcRC:$src),
5228 !strconcat(OpcodeStr, "\t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}"),
5231 let mayLoad = 1 in {
5232 def rm : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst),
5233 (ins x86memop:$src),
5234 !strconcat(OpcodeStr,"\t{$src, $dst|$dst, $src}"),
5236 (OpVT (OpNode (InVT (bitconvert (mem_frag addr:$src))))))]>,
5239 def rmk : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst),
5240 (ins KRC:$mask, x86memop:$src),
5241 !strconcat(OpcodeStr,"\t{$src, $dst {${mask}} |$dst {${mask}}, $src}"),
5245 def rmkz : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst),
5246 (ins KRC:$mask, x86memop:$src),
5247 !strconcat(OpcodeStr,"\t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}"),
5253 defm VPMOVZXBDZ: avx512_extend<0x31, "vpmovzxbd", VK16WM, VR512, VR128X, X86vzext,
5254 loadv2i64, i128mem, v16i32, v16i8>, EVEX_V512,
5256 defm VPMOVZXBQZ: avx512_extend<0x32, "vpmovzxbq", VK8WM, VR512, VR128X, X86vzext,
5257 loadv2i64, i128mem, v8i64, v16i8>, EVEX_V512,
5259 defm VPMOVZXWDZ: avx512_extend<0x33, "vpmovzxwd", VK16WM, VR512, VR256X, X86vzext,
5260 loadv4i64, i256mem, v16i32, v16i16>, EVEX_V512,
5261 EVEX_CD8<16, CD8VH>;
5262 defm VPMOVZXWQZ: avx512_extend<0x34, "vpmovzxwq", VK8WM, VR512, VR128X, X86vzext,
5263 loadv2i64, i128mem, v8i64, v8i16>, EVEX_V512,
5264 EVEX_CD8<16, CD8VQ>;
5265 defm VPMOVZXDQZ: avx512_extend<0x35, "vpmovzxdq", VK8WM, VR512, VR256X, X86vzext,
5266 loadv4i64, i256mem, v8i64, v8i32>, EVEX_V512,
5267 EVEX_CD8<32, CD8VH>;
5269 defm VPMOVSXBDZ: avx512_extend<0x21, "vpmovsxbd", VK16WM, VR512, VR128X, X86vsext,
5270 loadv2i64, i128mem, v16i32, v16i8>, EVEX_V512,
5272 defm VPMOVSXBQZ: avx512_extend<0x22, "vpmovsxbq", VK8WM, VR512, VR128X, X86vsext,
5273 loadv2i64, i128mem, v8i64, v16i8>, EVEX_V512,
5275 defm VPMOVSXWDZ: avx512_extend<0x23, "vpmovsxwd", VK16WM, VR512, VR256X, X86vsext,
5276 loadv4i64, i256mem, v16i32, v16i16>, EVEX_V512,
5277 EVEX_CD8<16, CD8VH>;
5278 defm VPMOVSXWQZ: avx512_extend<0x24, "vpmovsxwq", VK8WM, VR512, VR128X, X86vsext,
5279 loadv2i64, i128mem, v8i64, v8i16>, EVEX_V512,
5280 EVEX_CD8<16, CD8VQ>;
5281 defm VPMOVSXDQZ: avx512_extend<0x25, "vpmovsxdq", VK8WM, VR512, VR256X, X86vsext,
5282 loadv4i64, i256mem, v8i64, v8i32>, EVEX_V512,
5283 EVEX_CD8<32, CD8VH>;
5285 //===----------------------------------------------------------------------===//
5286 // GATHER - SCATTER Operations
5288 multiclass avx512_gather<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
5289 X86MemOperand memop, PatFrag GatherNode> {
5290 let Constraints = "@earlyclobber $dst, $src1 = $dst, $mask = $mask_wb" in
5291 def rm : AVX5128I<opc, MRMSrcMem, (outs _.RC:$dst, _.KRCWM:$mask_wb),
5292 (ins _.RC:$src1, _.KRCWM:$mask, memop:$src2),
5293 !strconcat(OpcodeStr,
5294 "\t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"),
5295 [(set _.RC:$dst, _.KRCWM:$mask_wb,
5296 (GatherNode (_.VT _.RC:$src1), _.KRCWM:$mask,
5297 vectoraddr:$src2))]>, EVEX, EVEX_K,
5298 EVEX_CD8<_.EltSize, CD8VT1>;
5301 let ExeDomain = SSEPackedDouble in {
5302 defm VGATHERDPDZ : avx512_gather<0x92, "vgatherdpd", v8f64_info, vy64xmem,
5303 mgatherv8i32>, EVEX_V512, VEX_W;
5304 defm VGATHERQPDZ : avx512_gather<0x93, "vgatherqpd", v8f64_info, vz64mem,
5305 mgatherv8i64>, EVEX_V512, VEX_W;
5308 let ExeDomain = SSEPackedSingle in {
5309 defm VGATHERDPSZ : avx512_gather<0x92, "vgatherdps", v16f32_info, vz32mem,
5310 mgatherv16i32>, EVEX_V512;
5311 defm VGATHERQPSZ : avx512_gather<0x93, "vgatherqps", v8f32x_info, vz64mem,
5312 mgatherv8i64>, EVEX_V512;
5315 defm VPGATHERDQZ : avx512_gather<0x90, "vpgatherdq", v8i64_info, vy64xmem,
5316 mgatherv8i32>, EVEX_V512, VEX_W;
5317 defm VPGATHERDDZ : avx512_gather<0x90, "vpgatherdd", v16i32_info, vz32mem,
5318 mgatherv16i32>, EVEX_V512;
5320 defm VPGATHERQQZ : avx512_gather<0x91, "vpgatherqq", v8i64_info, vz64mem,
5321 mgatherv8i64>, EVEX_V512, VEX_W;
5322 defm VPGATHERQDZ : avx512_gather<0x91, "vpgatherqd", v8i32x_info, vz64mem,
5323 mgatherv8i64>, EVEX_V512;
5325 multiclass avx512_scatter<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
5326 X86MemOperand memop, PatFrag ScatterNode> {
5328 let mayStore = 1, Constraints = "$mask = $mask_wb" in
5330 def mr : AVX5128I<opc, MRMDestMem, (outs _.KRCWM:$mask_wb),
5331 (ins memop:$dst, _.KRCWM:$mask, _.RC:$src),
5332 !strconcat(OpcodeStr,
5333 "\t{$src, ${dst} {${mask}}|${dst} {${mask}}, $src}"),
5334 [(set _.KRCWM:$mask_wb, (ScatterNode (_.VT _.RC:$src),
5335 _.KRCWM:$mask, vectoraddr:$dst))]>,
5336 EVEX, EVEX_K, EVEX_CD8<_.EltSize, CD8VT1>;
5339 let ExeDomain = SSEPackedDouble in {
5340 defm VSCATTERDPDZ : avx512_scatter<0xA2, "vscatterdpd", v8f64_info, vy64xmem,
5341 mscatterv8i32>, EVEX_V512, VEX_W;
5342 defm VSCATTERQPDZ : avx512_scatter<0xA3, "vscatterqpd", v8f64_info, vz64mem,
5343 mscatterv8i64>, EVEX_V512, VEX_W;
5346 let ExeDomain = SSEPackedSingle in {
5347 defm VSCATTERDPSZ : avx512_scatter<0xA2, "vscatterdps", v16f32_info, vz32mem,
5348 mscatterv16i32>, EVEX_V512;
5349 defm VSCATTERQPSZ : avx512_scatter<0xA3, "vscatterqps", v8f32x_info, vz64mem,
5350 mscatterv8i64>, EVEX_V512;
5353 defm VPSCATTERDQZ : avx512_scatter<0xA0, "vpscatterdq", v8i64_info, vy64xmem,
5354 mscatterv8i32>, EVEX_V512, VEX_W;
5355 defm VPSCATTERDDZ : avx512_scatter<0xA0, "vpscatterdd", v16i32_info, vz32mem,
5356 mscatterv16i32>, EVEX_V512;
5358 defm VPSCATTERQQZ : avx512_scatter<0xA1, "vpscatterqq", v8i64_info, vz64mem,
5359 mscatterv8i64>, EVEX_V512, VEX_W;
5360 defm VPSCATTERQDZ : avx512_scatter<0xA1, "vpscatterqd", v8i32x_info, vz64mem,
5361 mscatterv8i64>, EVEX_V512;
5364 multiclass avx512_gather_scatter_prefetch<bits<8> opc, Format F, string OpcodeStr,
5365 RegisterClass KRC, X86MemOperand memop> {
5366 let Predicates = [HasPFI], hasSideEffects = 1 in
5367 def m : AVX5128I<opc, F, (outs), (ins KRC:$mask, memop:$src),
5368 !strconcat(OpcodeStr, "\t{$src {${mask}}|{${mask}}, $src}"),
5372 defm VGATHERPF0DPS: avx512_gather_scatter_prefetch<0xC6, MRM1m, "vgatherpf0dps",
5373 VK16WM, vz32mem>, EVEX_V512, EVEX_CD8<32, CD8VT1>;
5375 defm VGATHERPF0QPS: avx512_gather_scatter_prefetch<0xC7, MRM1m, "vgatherpf0qps",
5376 VK8WM, vz64mem>, EVEX_V512, EVEX_CD8<64, CD8VT1>;
5378 defm VGATHERPF0DPD: avx512_gather_scatter_prefetch<0xC6, MRM1m, "vgatherpf0dpd",
5379 VK8WM, vy32mem>, EVEX_V512, VEX_W, EVEX_CD8<32, CD8VT1>;
5381 defm VGATHERPF0QPD: avx512_gather_scatter_prefetch<0xC7, MRM1m, "vgatherpf0qpd",
5382 VK8WM, vz64mem>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
5384 defm VGATHERPF1DPS: avx512_gather_scatter_prefetch<0xC6, MRM2m, "vgatherpf1dps",
5385 VK16WM, vz32mem>, EVEX_V512, EVEX_CD8<32, CD8VT1>;
5387 defm VGATHERPF1QPS: avx512_gather_scatter_prefetch<0xC7, MRM2m, "vgatherpf1qps",
5388 VK8WM, vz64mem>, EVEX_V512, EVEX_CD8<64, CD8VT1>;
5390 defm VGATHERPF1DPD: avx512_gather_scatter_prefetch<0xC6, MRM2m, "vgatherpf1dpd",
5391 VK8WM, vy32mem>, EVEX_V512, VEX_W, EVEX_CD8<32, CD8VT1>;
5393 defm VGATHERPF1QPD: avx512_gather_scatter_prefetch<0xC7, MRM2m, "vgatherpf1qpd",
5394 VK8WM, vz64mem>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
5396 defm VSCATTERPF0DPS: avx512_gather_scatter_prefetch<0xC6, MRM5m, "vscatterpf0dps",
5397 VK16WM, vz32mem>, EVEX_V512, EVEX_CD8<32, CD8VT1>;
5399 defm VSCATTERPF0QPS: avx512_gather_scatter_prefetch<0xC7, MRM5m, "vscatterpf0qps",
5400 VK8WM, vz64mem>, EVEX_V512, EVEX_CD8<64, CD8VT1>;
5402 defm VSCATTERPF0DPD: avx512_gather_scatter_prefetch<0xC6, MRM5m, "vscatterpf0dpd",
5403 VK8WM, vy32mem>, EVEX_V512, VEX_W, EVEX_CD8<32, CD8VT1>;
5405 defm VSCATTERPF0QPD: avx512_gather_scatter_prefetch<0xC7, MRM5m, "vscatterpf0qpd",
5406 VK8WM, vz64mem>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
5408 defm VSCATTERPF1DPS: avx512_gather_scatter_prefetch<0xC6, MRM6m, "vscatterpf1dps",
5409 VK16WM, vz32mem>, EVEX_V512, EVEX_CD8<32, CD8VT1>;
5411 defm VSCATTERPF1QPS: avx512_gather_scatter_prefetch<0xC7, MRM6m, "vscatterpf1qps",
5412 VK8WM, vz64mem>, EVEX_V512, EVEX_CD8<64, CD8VT1>;
5414 defm VSCATTERPF1DPD: avx512_gather_scatter_prefetch<0xC6, MRM6m, "vscatterpf1dpd",
5415 VK8WM, vy32mem>, EVEX_V512, VEX_W, EVEX_CD8<32, CD8VT1>;
5417 defm VSCATTERPF1QPD: avx512_gather_scatter_prefetch<0xC7, MRM6m, "vscatterpf1qpd",
5418 VK8WM, vz64mem>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
5419 //===----------------------------------------------------------------------===//
5420 // VSHUFPS - VSHUFPD Operations
5422 multiclass avx512_shufp<RegisterClass RC, X86MemOperand x86memop,
5423 ValueType vt, string OpcodeStr, PatFrag mem_frag,
5425 def rmi : AVX512PIi8<0xC6, MRMSrcMem, (outs RC:$dst),
5426 (ins RC:$src1, x86memop:$src2, u8imm:$src3),
5427 !strconcat(OpcodeStr,
5428 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
5429 [(set RC:$dst, (vt (X86Shufp RC:$src1, (mem_frag addr:$src2),
5430 (i8 imm:$src3))))], d, IIC_SSE_SHUFP>,
5431 EVEX_4V, Sched<[WriteShuffleLd, ReadAfterLd]>;
5432 def rri : AVX512PIi8<0xC6, MRMSrcReg, (outs RC:$dst),
5433 (ins RC:$src1, RC:$src2, u8imm:$src3),
5434 !strconcat(OpcodeStr,
5435 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
5436 [(set RC:$dst, (vt (X86Shufp RC:$src1, RC:$src2,
5437 (i8 imm:$src3))))], d, IIC_SSE_SHUFP>,
5438 EVEX_4V, Sched<[WriteShuffle]>;
5441 defm VSHUFPSZ : avx512_shufp<VR512, f512mem, v16f32, "vshufps", loadv16f32,
5442 SSEPackedSingle>, PS, EVEX_V512, EVEX_CD8<32, CD8VF>;
5443 defm VSHUFPDZ : avx512_shufp<VR512, f512mem, v8f64, "vshufpd", loadv8f64,
5444 SSEPackedDouble>, PD, VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
5446 def : Pat<(v16i32 (X86Shufp VR512:$src1, VR512:$src2, (i8 imm:$imm))),
5447 (VSHUFPSZrri VR512:$src1, VR512:$src2, imm:$imm)>;
5448 def : Pat<(v16i32 (X86Shufp VR512:$src1,
5449 (loadv16i32 addr:$src2), (i8 imm:$imm))),
5450 (VSHUFPSZrmi VR512:$src1, addr:$src2, imm:$imm)>;
5452 def : Pat<(v8i64 (X86Shufp VR512:$src1, VR512:$src2, (i8 imm:$imm))),
5453 (VSHUFPDZrri VR512:$src1, VR512:$src2, imm:$imm)>;
5454 def : Pat<(v8i64 (X86Shufp VR512:$src1,
5455 (loadv8i64 addr:$src2), (i8 imm:$imm))),
5456 (VSHUFPDZrmi VR512:$src1, addr:$src2, imm:$imm)>;
5458 multiclass avx512_valign<X86VectorVTInfo _> {
5459 defm rri : AVX512_maskable<0x03, MRMSrcReg, _, (outs _.RC:$dst),
5460 (ins _.RC:$src1, _.RC:$src2, u8imm:$src3),
5462 "$src3, $src2, $src1", "$src1, $src2, $src3",
5463 (_.VT (X86VAlign _.RC:$src2, _.RC:$src1,
5465 AVX512AIi8Base, EVEX_4V;
5467 // Also match valign of packed floats.
5468 def : Pat<(_.FloatVT (X86VAlign _.RC:$src1, _.RC:$src2, (i8 imm:$imm))),
5469 (!cast<Instruction>(NAME##rri) _.RC:$src2, _.RC:$src1, imm:$imm)>;
5472 def rmi : AVX512AIi8<0x03, MRMSrcMem, (outs _.RC:$dst),
5473 (ins _.RC:$src1, _.MemOp:$src2, u8imm:$src3),
5474 !strconcat("valign"##_.Suffix,
5475 "\t{$src3, $src2, $src1, $dst|"
5476 "$dst, $src1, $src2, $src3}"),
5479 defm VALIGND : avx512_valign<v16i32_info>, EVEX_V512, EVEX_CD8<32, CD8VF>;
5480 defm VALIGNQ : avx512_valign<v8i64_info>, VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
5482 // Helper fragments to match sext vXi1 to vXiY.
5483 def v16i1sextv16i32 : PatLeaf<(v16i32 (X86vsrai VR512:$src, (i8 31)))>;
5484 def v8i1sextv8i64 : PatLeaf<(v8i64 (X86vsrai VR512:$src, (i8 63)))>;
5486 multiclass avx512_vpabs<bits<8> opc, string OpcodeStr, ValueType OpVT,
5487 RegisterClass KRC, RegisterClass RC,
5488 X86MemOperand x86memop, X86MemOperand x86scalar_mop,
5490 def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
5491 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5493 def rrk : AVX5128I<opc, MRMSrcReg, (outs RC:$dst), (ins KRC:$mask, RC:$src),
5494 !strconcat(OpcodeStr, "\t{$src, $dst {${mask}}|$dst {${mask}}, $src}"),
5496 def rrkz : AVX5128I<opc, MRMSrcReg, (outs RC:$dst), (ins KRC:$mask, RC:$src),
5497 !strconcat(OpcodeStr,
5498 "\t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}"),
5500 let mayLoad = 1 in {
5501 def rm : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst),
5502 (ins x86memop:$src),
5503 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5505 def rmk : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst),
5506 (ins KRC:$mask, x86memop:$src),
5507 !strconcat(OpcodeStr,
5508 "\t{$src, $dst {${mask}}|$dst {${mask}}, $src}"),
5510 def rmkz : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst),
5511 (ins KRC:$mask, x86memop:$src),
5512 !strconcat(OpcodeStr,
5513 "\t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}"),
5515 def rmb : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst),
5516 (ins x86scalar_mop:$src),
5517 !strconcat(OpcodeStr, "\t{${src}", BrdcstStr,
5518 ", $dst|$dst, ${src}", BrdcstStr, "}"),
5520 def rmbk : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst),
5521 (ins KRC:$mask, x86scalar_mop:$src),
5522 !strconcat(OpcodeStr, "\t{${src}", BrdcstStr,
5523 ", $dst {${mask}}|$dst {${mask}}, ${src}", BrdcstStr, "}"),
5524 []>, EVEX, EVEX_B, EVEX_K;
5525 def rmbkz : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst),
5526 (ins KRC:$mask, x86scalar_mop:$src),
5527 !strconcat(OpcodeStr, "\t{${src}", BrdcstStr,
5528 ", $dst {${mask}} {z}|$dst {${mask}} {z}, ${src}",
5530 []>, EVEX, EVEX_B, EVEX_KZ;
5534 defm VPABSDZ : avx512_vpabs<0x1E, "vpabsd", v16i32, VK16WM, VR512,
5535 i512mem, i32mem, "{1to16}">, EVEX_V512,
5536 EVEX_CD8<32, CD8VF>;
5537 defm VPABSQZ : avx512_vpabs<0x1F, "vpabsq", v8i64, VK8WM, VR512,
5538 i512mem, i64mem, "{1to8}">, EVEX_V512, VEX_W,
5539 EVEX_CD8<64, CD8VF>;
5542 (bc_v16i32 (v16i1sextv16i32)),
5543 (bc_v16i32 (add (v16i32 VR512:$src), (v16i1sextv16i32)))),
5544 (VPABSDZrr VR512:$src)>;
5546 (bc_v8i64 (v8i1sextv8i64)),
5547 (bc_v8i64 (add (v8i64 VR512:$src), (v8i1sextv8i64)))),
5548 (VPABSQZrr VR512:$src)>;
5550 def : Pat<(v16i32 (int_x86_avx512_mask_pabs_d_512 (v16i32 VR512:$src),
5551 (v16i32 immAllZerosV), (i16 -1))),
5552 (VPABSDZrr VR512:$src)>;
5553 def : Pat<(v8i64 (int_x86_avx512_mask_pabs_q_512 (v8i64 VR512:$src),
5554 (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
5555 (VPABSQZrr VR512:$src)>;
5557 multiclass avx512_conflict<bits<8> opc, string OpcodeStr,
5558 RegisterClass RC, RegisterClass KRC,
5559 X86MemOperand x86memop,
5560 X86MemOperand x86scalar_mop, string BrdcstStr> {
5561 let hasSideEffects = 0 in {
5562 def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
5564 !strconcat(OpcodeStr, "\t{$src, ${dst} |${dst}, $src}"),
5567 def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
5568 (ins x86memop:$src),
5569 !strconcat(OpcodeStr, "\t{$src, ${dst}|${dst}, $src}"),
5572 def rmb : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
5573 (ins x86scalar_mop:$src),
5574 !strconcat(OpcodeStr, "\t{${src}", BrdcstStr,
5575 ", ${dst}|${dst}, ${src}", BrdcstStr, "}"),
5577 def rrkz : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
5578 (ins KRC:$mask, RC:$src),
5579 !strconcat(OpcodeStr,
5580 "\t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
5583 def rmkz : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
5584 (ins KRC:$mask, x86memop:$src),
5585 !strconcat(OpcodeStr,
5586 "\t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
5589 def rmbkz : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
5590 (ins KRC:$mask, x86scalar_mop:$src),
5591 !strconcat(OpcodeStr, "\t{${src}", BrdcstStr,
5592 ", ${dst} {${mask}} {z}|${dst} {${mask}} {z}, ${src}",
5594 []>, EVEX, EVEX_KZ, EVEX_B;
5596 let Constraints = "$src1 = $dst" in {
5597 def rrk : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
5598 (ins RC:$src1, KRC:$mask, RC:$src2),
5599 !strconcat(OpcodeStr,
5600 "\t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"),
5603 def rmk : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
5604 (ins RC:$src1, KRC:$mask, x86memop:$src2),
5605 !strconcat(OpcodeStr,
5606 "\t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"),
5609 def rmbk : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
5610 (ins RC:$src1, KRC:$mask, x86scalar_mop:$src2),
5611 !strconcat(OpcodeStr, "\t{${src2}", BrdcstStr,
5612 ", ${dst} {${mask}}|${dst} {${mask}}, ${src2}", BrdcstStr, "}"),
5613 []>, EVEX, EVEX_K, EVEX_B;
5618 let Predicates = [HasCDI] in {
5619 defm VPCONFLICTD : avx512_conflict<0xC4, "vpconflictd", VR512, VK16WM,
5620 i512mem, i32mem, "{1to16}">,
5621 EVEX_V512, EVEX_CD8<32, CD8VF>;
5624 defm VPCONFLICTQ : avx512_conflict<0xC4, "vpconflictq", VR512, VK8WM,
5625 i512mem, i64mem, "{1to8}">,
5626 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
5630 def : Pat<(int_x86_avx512_mask_conflict_d_512 VR512:$src2, VR512:$src1,
5632 (VPCONFLICTDrrk VR512:$src1,
5633 (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)), VR512:$src2)>;
5635 def : Pat<(int_x86_avx512_mask_conflict_q_512 VR512:$src2, VR512:$src1,
5637 (VPCONFLICTQrrk VR512:$src1,
5638 (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)), VR512:$src2)>;
5640 let Predicates = [HasCDI] in {
5641 defm VPLZCNTD : avx512_conflict<0x44, "vplzcntd", VR512, VK16WM,
5642 i512mem, i32mem, "{1to16}">,
5643 EVEX_V512, EVEX_CD8<32, CD8VF>;
5646 defm VPLZCNTQ : avx512_conflict<0x44, "vplzcntq", VR512, VK8WM,
5647 i512mem, i64mem, "{1to8}">,
5648 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
5652 def : Pat<(int_x86_avx512_mask_lzcnt_d_512 VR512:$src2, VR512:$src1,
5654 (VPLZCNTDrrk VR512:$src1,
5655 (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)), VR512:$src2)>;
5657 def : Pat<(int_x86_avx512_mask_lzcnt_q_512 VR512:$src2, VR512:$src1,
5659 (VPLZCNTQrrk VR512:$src1,
5660 (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)), VR512:$src2)>;
5662 def : Pat<(v16i32 (ctlz (loadv16i32 addr:$src))),
5663 (VPLZCNTDrm addr:$src)>;
5664 def : Pat<(v16i32 (ctlz (v16i32 VR512:$src))),
5665 (VPLZCNTDrr VR512:$src)>;
5666 def : Pat<(v8i64 (ctlz (loadv8i64 addr:$src))),
5667 (VPLZCNTQrm addr:$src)>;
5668 def : Pat<(v8i64 (ctlz (v8i64 VR512:$src))),
5669 (VPLZCNTQrr VR512:$src)>;
5671 def : Pat<(store (i1 -1), addr:$dst), (MOV8mi addr:$dst, (i8 1))>;
5672 def : Pat<(store (i1 1), addr:$dst), (MOV8mi addr:$dst, (i8 1))>;
5673 def : Pat<(store (i1 0), addr:$dst), (MOV8mi addr:$dst, (i8 0))>;
5675 def : Pat<(store VK1:$src, addr:$dst),
5677 (EXTRACT_SUBREG (KMOVWrk (COPY_TO_REGCLASS VK1:$src, VK16)),
5678 sub_8bit))>, Requires<[HasAVX512, NoDQI]>;
5680 def : Pat<(store VK8:$src, addr:$dst),
5682 (EXTRACT_SUBREG (KMOVWrk (COPY_TO_REGCLASS VK8:$src, VK16)),
5683 sub_8bit))>, Requires<[HasAVX512, NoDQI]>;
5685 def truncstorei1 : PatFrag<(ops node:$val, node:$ptr),
5686 (truncstore node:$val, node:$ptr), [{
5687 return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i1;
5690 def : Pat<(truncstorei1 GR8:$src, addr:$dst),
5691 (MOV8mr addr:$dst, GR8:$src)>;
5693 multiclass cvt_by_vec_width<bits<8> opc, X86VectorVTInfo Vec, string OpcodeStr > {
5694 def rr : AVX512XS8I<opc, MRMSrcReg, (outs Vec.RC:$dst), (ins Vec.KRC:$src),
5695 !strconcat(OpcodeStr##Vec.Suffix, "\t{$src, $dst|$dst, $src}"),
5696 [(set Vec.RC:$dst, (Vec.VT (X86vsext Vec.KRC:$src)))]>, EVEX;
5699 multiclass cvt_mask_by_elt_width<bits<8> opc, AVX512VLVectorVTInfo VTInfo,
5700 string OpcodeStr, Predicate prd> {
5701 let Predicates = [prd] in
5702 defm Z : cvt_by_vec_width<opc, VTInfo.info512, OpcodeStr>, EVEX_V512;
5704 let Predicates = [prd, HasVLX] in {
5705 defm Z256 : cvt_by_vec_width<opc, VTInfo.info256, OpcodeStr>, EVEX_V256;
5706 defm Z128 : cvt_by_vec_width<opc, VTInfo.info128, OpcodeStr>, EVEX_V128;
5710 multiclass avx512_convert_mask_to_vector<string OpcodeStr> {
5711 defm NAME##B : cvt_mask_by_elt_width<0x28, avx512vl_i8_info, OpcodeStr,
5713 defm NAME##W : cvt_mask_by_elt_width<0x28, avx512vl_i16_info, OpcodeStr,
5715 defm NAME##D : cvt_mask_by_elt_width<0x38, avx512vl_i32_info, OpcodeStr,
5717 defm NAME##Q : cvt_mask_by_elt_width<0x38, avx512vl_i64_info, OpcodeStr,
5721 defm VPMOVM2 : avx512_convert_mask_to_vector<"vpmovm2">;
5723 multiclass convert_vector_to_mask_common<bits<8> opc, X86VectorVTInfo _, string OpcodeStr > {
5724 def rr : AVX512XS8I<opc, MRMSrcReg, (outs _.KRC:$dst), (ins _.RC:$src),
5725 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5726 [(set _.KRC:$dst, (trunc (_.VT _.RC:$src)))]>, EVEX;
5729 multiclass avx512_convert_vector_to_mask<bits<8> opc, string OpcodeStr,
5730 AVX512VLVectorVTInfo VTInfo, Predicate prd> {
5731 let Predicates = [prd] in
5732 defm Z : convert_vector_to_mask_common <opc, VTInfo.info512, OpcodeStr>,
5735 let Predicates = [prd, HasVLX] in {
5736 defm Z256 : convert_vector_to_mask_common<opc, VTInfo.info256, OpcodeStr>,
5738 defm Z128 : convert_vector_to_mask_common<opc, VTInfo.info128, OpcodeStr>,
5743 defm VPMOVB2M : avx512_convert_vector_to_mask<0x29, "vpmovb2m",
5744 avx512vl_i8_info, HasBWI>;
5745 defm VPMOVW2M : avx512_convert_vector_to_mask<0x29, "vpmovw2m",
5746 avx512vl_i16_info, HasBWI>, VEX_W;
5747 defm VPMOVD2M : avx512_convert_vector_to_mask<0x39, "vpmovd2m",
5748 avx512vl_i32_info, HasDQI>;
5749 defm VPMOVQ2M : avx512_convert_vector_to_mask<0x39, "vpmovq2m",
5750 avx512vl_i64_info, HasDQI>, VEX_W;
5752 //===----------------------------------------------------------------------===//
5753 // AVX-512 - COMPRESS and EXPAND
5755 multiclass compress_by_vec_width<bits<8> opc, X86VectorVTInfo _,
5757 def rrkz : AVX5128I<opc, MRMDestReg, (outs _.RC:$dst),
5758 (ins _.KRCWM:$mask, _.RC:$src),
5759 OpcodeStr # "\t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}",
5760 [(set _.RC:$dst, (_.VT (X86compress _.KRCWM:$mask, _.RC:$src,
5761 _.ImmAllZerosV)))]>, EVEX_KZ;
5763 let Constraints = "$src0 = $dst" in
5764 def rrk : AVX5128I<opc, MRMDestReg, (outs _.RC:$dst),
5765 (ins _.RC:$src0, _.KRCWM:$mask, _.RC:$src),
5766 OpcodeStr # "\t{$src, $dst {${mask}} |$dst {${mask}}, $src}",
5767 [(set _.RC:$dst, (_.VT (X86compress _.KRCWM:$mask, _.RC:$src,
5768 _.RC:$src0)))]>, EVEX_K;
5770 let mayStore = 1 in {
5771 def mrk : AVX5128I<opc, MRMDestMem, (outs),
5772 (ins _.MemOp:$dst, _.KRCWM:$mask, _.RC:$src),
5773 OpcodeStr # "\t{$src, $dst {${mask}} |$dst {${mask}}, $src}",
5774 [(store (_.VT (X86compress _.KRCWM:$mask, _.RC:$src, undef)),
5776 EVEX_K, EVEX_CD8<_.EltSize, CD8VT1>;
5780 multiclass compress_by_elt_width<bits<8> opc, string OpcodeStr,
5781 AVX512VLVectorVTInfo VTInfo> {
5782 defm Z : compress_by_vec_width<opc, VTInfo.info512, OpcodeStr>, EVEX_V512;
5784 let Predicates = [HasVLX] in {
5785 defm Z256 : compress_by_vec_width<opc, VTInfo.info256, OpcodeStr>, EVEX_V256;
5786 defm Z128 : compress_by_vec_width<opc, VTInfo.info128, OpcodeStr>, EVEX_V128;
5790 defm VPCOMPRESSD : compress_by_elt_width <0x8B, "vpcompressd", avx512vl_i32_info>,
5792 defm VPCOMPRESSQ : compress_by_elt_width <0x8B, "vpcompressq", avx512vl_i64_info>,
5794 defm VCOMPRESSPS : compress_by_elt_width <0x8A, "vcompressps", avx512vl_f32_info>,
5796 defm VCOMPRESSPD : compress_by_elt_width <0x8A, "vcompresspd", avx512vl_f64_info>,
5800 multiclass expand_by_vec_width<bits<8> opc, X86VectorVTInfo _,
5802 def rrkz : AVX5128I<opc, MRMSrcReg, (outs _.RC:$dst),
5803 (ins _.KRCWM:$mask, _.RC:$src),
5804 OpcodeStr # "\t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}",
5805 [(set _.RC:$dst, (_.VT (X86expand _.KRCWM:$mask, (_.VT _.RC:$src),
5806 _.ImmAllZerosV)))]>, EVEX_KZ;
5808 let Constraints = "$src0 = $dst" in
5809 def rrk : AVX5128I<opc, MRMSrcReg, (outs _.RC:$dst),
5810 (ins _.RC:$src0, _.KRCWM:$mask, _.RC:$src),
5811 OpcodeStr # "\t{$src, $dst {${mask}} |$dst {${mask}}, $src}",
5812 [(set _.RC:$dst, (_.VT (X86expand _.KRCWM:$mask,
5813 (_.VT _.RC:$src), _.RC:$src0)))]>, EVEX_K;
5815 let mayLoad = 1, Constraints = "$src0 = $dst" in
5816 def rmk : AVX5128I<opc, MRMSrcMem, (outs _.RC:$dst),
5817 (ins _.RC:$src0, _.KRCWM:$mask, _.MemOp:$src),
5818 OpcodeStr # "\t{$src, $dst {${mask}} |$dst {${mask}}, $src}",
5819 [(set _.RC:$dst, (_.VT (X86expand _.KRCWM:$mask,
5821 (_.LdFrag addr:$src))),
5823 EVEX_K, EVEX_CD8<_.EltSize, CD8VT1>;
5826 def rmkz : AVX5128I<opc, MRMSrcMem, (outs _.RC:$dst),
5827 (ins _.KRCWM:$mask, _.MemOp:$src),
5828 OpcodeStr # "\t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}",
5829 [(set _.RC:$dst, (_.VT (X86expand _.KRCWM:$mask,
5830 (_.VT (bitconvert (_.LdFrag addr:$src))),
5831 _.ImmAllZerosV)))]>,
5832 EVEX_KZ, EVEX_CD8<_.EltSize, CD8VT1>;
5836 multiclass expand_by_elt_width<bits<8> opc, string OpcodeStr,
5837 AVX512VLVectorVTInfo VTInfo> {
5838 defm Z : expand_by_vec_width<opc, VTInfo.info512, OpcodeStr>, EVEX_V512;
5840 let Predicates = [HasVLX] in {
5841 defm Z256 : expand_by_vec_width<opc, VTInfo.info256, OpcodeStr>, EVEX_V256;
5842 defm Z128 : expand_by_vec_width<opc, VTInfo.info128, OpcodeStr>, EVEX_V128;
5846 defm VPEXPANDD : expand_by_elt_width <0x89, "vpexpandd", avx512vl_i32_info>,
5848 defm VPEXPANDQ : expand_by_elt_width <0x89, "vpexpandq", avx512vl_i64_info>,
5850 defm VEXPANDPS : expand_by_elt_width <0x88, "vexpandps", avx512vl_f32_info>,
5852 defm VEXPANDPD : expand_by_elt_width <0x88, "vexpandpd", avx512vl_f64_info>,