1 // Group template arguments that can be derived from the vector type (EltNum x
2 // EltVT). These are things like the register class for the writemask, etc.
3 // The idea is to pass one of these as the template argument rather than the
4 // individual arguments.
5 class X86VectorVTInfo<int numelts, ValueType EltVT, RegisterClass rc,
10 // Corresponding mask register class.
11 RegisterClass KRC = !cast<RegisterClass>("VK" # NumElts);
13 // Corresponding write-mask register class.
14 RegisterClass KRCWM = !cast<RegisterClass>("VK" # NumElts # "WM");
16 // The GPR register class that can hold the write mask. Use GR8 for fewer
17 // than 8 elements. Use shift-right and equal to work around the lack of
20 !cast<RegisterClass>("GR" #
21 !if (!eq (!srl(NumElts, 3), 0), 8, NumElts));
23 // Suffix used in the instruction mnemonic.
24 string Suffix = suffix;
26 string VTName = "v" # NumElts # EltVT;
29 ValueType VT = !cast<ValueType>(VTName);
31 string EltTypeName = !cast<string>(EltVT);
32 // Size of the element type in bits, e.g. 32 for v16i32.
33 string EltSizeName = !subst("i", "", !subst("f", "", EltTypeName));
34 int EltSize = EltVT.Size;
36 // "i" for integer types and "f" for floating-point types
37 string TypeVariantName = !subst(EltSizeName, "", EltTypeName);
39 // Size of RC in bits, e.g. 512 for VR512.
42 // The corresponding memory operand, e.g. i512mem for VR512.
43 X86MemOperand MemOp = !cast<X86MemOperand>(TypeVariantName # Size # "mem");
44 X86MemOperand ScalarMemOp = !cast<X86MemOperand>(EltVT # "mem");
47 // Note: For 128/256-bit integer VT we choose loadv2i64/loadv4i64
48 // due to load promotion during legalization
49 PatFrag LdFrag = !cast<PatFrag>("load" #
50 !if (!eq (TypeVariantName, "i"),
51 !if (!eq (Size, 128), "v2i64",
52 !if (!eq (Size, 256), "v4i64",
54 PatFrag ScalarLdFrag = !cast<PatFrag>("load" # EltVT);
56 // Load patterns used for memory operands. We only have this defined in
57 // case of i64 element types for sub-512 integer vectors. For now, keep
58 // MemOpFrag undefined in these cases.
60 !if (!eq (TypeVariantName, "f"), !cast<PatFrag>("memop" # VTName),
61 !if (!eq (EltTypeName, "i64"), !cast<PatFrag>("memop" # VTName),
62 !if (!eq (VTName, "v16i32"), !cast<PatFrag>("memop" # VTName), ?)));
64 // The corresponding float type, e.g. v16f32 for v16i32
65 // Note: For EltSize < 32, FloatVT is illegal and TableGen
66 // fails to compile, so we choose FloatVT = VT
67 ValueType FloatVT = !cast<ValueType>(
68 !if (!eq (!srl(EltSize,5),0),
70 !if (!eq(TypeVariantName, "i"),
71 "v" # NumElts # "f" # EltSize,
74 // The string to specify embedded broadcast in assembly.
75 string BroadcastStr = "{1to" # NumElts # "}";
77 // 8-bit compressed displacement tuple/subvector format. This is only
78 // defined for NumElts <= 8.
79 CD8VForm CD8TupleForm = !if (!eq (!srl(NumElts, 4), 0),
80 !cast<CD8VForm>("CD8VT" # NumElts), ?);
82 SubRegIndex SubRegIdx = !if (!eq (Size, 128), sub_xmm,
83 !if (!eq (Size, 256), sub_ymm, ?));
85 Domain ExeDomain = !if (!eq (EltTypeName, "f32"), SSEPackedSingle,
86 !if (!eq (EltTypeName, "f64"), SSEPackedDouble,
89 // A vector type of the same width with element type i32. This is used to
90 // create the canonical constant zero node ImmAllZerosV.
91 ValueType i32VT = !cast<ValueType>("v" # !srl(Size, 5) # "i32");
92 dag ImmAllZerosV = (VT (bitconvert (i32VT immAllZerosV)));
95 def v64i8_info : X86VectorVTInfo<64, i8, VR512, "b">;
96 def v32i16_info : X86VectorVTInfo<32, i16, VR512, "w">;
97 def v16i32_info : X86VectorVTInfo<16, i32, VR512, "d">;
98 def v8i64_info : X86VectorVTInfo<8, i64, VR512, "q">;
99 def v16f32_info : X86VectorVTInfo<16, f32, VR512, "ps">;
100 def v8f64_info : X86VectorVTInfo<8, f64, VR512, "pd">;
102 // "x" in v32i8x_info means RC = VR256X
103 def v32i8x_info : X86VectorVTInfo<32, i8, VR256X, "b">;
104 def v16i16x_info : X86VectorVTInfo<16, i16, VR256X, "w">;
105 def v8i32x_info : X86VectorVTInfo<8, i32, VR256X, "d">;
106 def v4i64x_info : X86VectorVTInfo<4, i64, VR256X, "q">;
108 def v16i8x_info : X86VectorVTInfo<16, i8, VR128X, "b">;
109 def v8i16x_info : X86VectorVTInfo<8, i16, VR128X, "w">;
110 def v4i32x_info : X86VectorVTInfo<4, i32, VR128X, "d">;
111 def v2i64x_info : X86VectorVTInfo<2, i64, VR128X, "q">;
113 class AVX512VLVectorVTInfo<X86VectorVTInfo i512, X86VectorVTInfo i256,
114 X86VectorVTInfo i128> {
115 X86VectorVTInfo info512 = i512;
116 X86VectorVTInfo info256 = i256;
117 X86VectorVTInfo info128 = i128;
120 def avx512vl_i8_info : AVX512VLVectorVTInfo<v64i8_info, v32i8x_info,
122 def avx512vl_i16_info : AVX512VLVectorVTInfo<v32i16_info, v16i16x_info,
124 def avx512vl_i32_info : AVX512VLVectorVTInfo<v16i32_info, v8i32x_info,
126 def avx512vl_i64_info : AVX512VLVectorVTInfo<v8i64_info, v4i64x_info,
129 // This multiclass generates the masking variants from the non-masking
130 // variant. It only provides the assembly pieces for the masking variants.
131 // It assumes custom ISel patterns for masking which can be provided as
132 // template arguments.
133 multiclass AVX512_maskable_custom<bits<8> O, Format F,
135 dag Ins, dag MaskingIns, dag ZeroMaskingIns,
137 string AttSrcAsm, string IntelSrcAsm,
139 list<dag> MaskingPattern,
140 list<dag> ZeroMaskingPattern,
141 string MaskingConstraint = "",
142 InstrItinClass itin = NoItinerary,
143 bit IsCommutable = 0> {
144 let isCommutable = IsCommutable in
145 def NAME: AVX512<O, F, Outs, Ins,
146 OpcodeStr#"\t{"#AttSrcAsm#", $dst|"#
147 "$dst, "#IntelSrcAsm#"}",
150 // Prefer over VMOV*rrk Pat<>
151 let AddedComplexity = 20 in
152 def NAME#k: AVX512<O, F, Outs, MaskingIns,
153 OpcodeStr#"\t{"#AttSrcAsm#", $dst {${mask}}|"#
154 "$dst {${mask}}, "#IntelSrcAsm#"}",
155 MaskingPattern, itin>,
157 // In case of the 3src subclass this is overridden with a let.
158 string Constraints = MaskingConstraint;
160 let AddedComplexity = 30 in // Prefer over VMOV*rrkz Pat<>
161 def NAME#kz: AVX512<O, F, Outs, ZeroMaskingIns,
162 OpcodeStr#"\t{"#AttSrcAsm#", $dst {${mask}} {z}|"#
163 "$dst {${mask}} {z}, "#IntelSrcAsm#"}",
170 // Common base class of AVX512_maskable and AVX512_maskable_3src.
171 multiclass AVX512_maskable_common<bits<8> O, Format F, X86VectorVTInfo _,
173 dag Ins, dag MaskingIns, dag ZeroMaskingIns,
175 string AttSrcAsm, string IntelSrcAsm,
176 dag RHS, dag MaskingRHS,
177 string MaskingConstraint = "",
178 InstrItinClass itin = NoItinerary,
179 bit IsCommutable = 0> :
180 AVX512_maskable_custom<O, F, Outs, Ins, MaskingIns, ZeroMaskingIns, OpcodeStr,
181 AttSrcAsm, IntelSrcAsm,
182 [(set _.RC:$dst, RHS)],
183 [(set _.RC:$dst, MaskingRHS)],
185 (vselect _.KRCWM:$mask, RHS, _.ImmAllZerosV))],
186 MaskingConstraint, NoItinerary, IsCommutable>;
188 // This multiclass generates the unconditional/non-masking, the masking and
189 // the zero-masking variant of the instruction. In the masking case, the
190 // perserved vector elements come from a new dummy input operand tied to $dst.
191 multiclass AVX512_maskable<bits<8> O, Format F, X86VectorVTInfo _,
192 dag Outs, dag Ins, string OpcodeStr,
193 string AttSrcAsm, string IntelSrcAsm,
194 dag RHS, InstrItinClass itin = NoItinerary,
195 bit IsCommutable = 0> :
196 AVX512_maskable_common<O, F, _, Outs, Ins,
197 !con((ins _.RC:$src0, _.KRCWM:$mask), Ins),
198 !con((ins _.KRCWM:$mask), Ins),
199 OpcodeStr, AttSrcAsm, IntelSrcAsm, RHS,
200 (vselect _.KRCWM:$mask, RHS, _.RC:$src0),
201 "$src0 = $dst", itin, IsCommutable>;
203 // Similar to AVX512_maskable but in this case one of the source operands
204 // ($src1) is already tied to $dst so we just use that for the preserved
205 // vector elements. NOTE that the NonTiedIns (the ins dag) should exclude
207 multiclass AVX512_maskable_3src<bits<8> O, Format F, X86VectorVTInfo _,
208 dag Outs, dag NonTiedIns, string OpcodeStr,
209 string AttSrcAsm, string IntelSrcAsm,
211 AVX512_maskable_common<O, F, _, Outs,
212 !con((ins _.RC:$src1), NonTiedIns),
213 !con((ins _.RC:$src1, _.KRCWM:$mask), NonTiedIns),
214 !con((ins _.RC:$src1, _.KRCWM:$mask), NonTiedIns),
215 OpcodeStr, AttSrcAsm, IntelSrcAsm, RHS,
216 (vselect _.KRCWM:$mask, RHS, _.RC:$src1)>;
219 multiclass AVX512_maskable_in_asm<bits<8> O, Format F, X86VectorVTInfo _,
222 string AttSrcAsm, string IntelSrcAsm,
224 AVX512_maskable_custom<O, F, Outs, Ins,
225 !con((ins _.RC:$src0, _.KRCWM:$mask), Ins),
226 !con((ins _.KRCWM:$mask), Ins),
227 OpcodeStr, AttSrcAsm, IntelSrcAsm, Pattern, [], [],
230 // Bitcasts between 512-bit vector types. Return the original type since
231 // no instruction is needed for the conversion
232 let Predicates = [HasAVX512] in {
233 def : Pat<(v8f64 (bitconvert (v8i64 VR512:$src))), (v8f64 VR512:$src)>;
234 def : Pat<(v8f64 (bitconvert (v16i32 VR512:$src))), (v8f64 VR512:$src)>;
235 def : Pat<(v8f64 (bitconvert (v32i16 VR512:$src))), (v8f64 VR512:$src)>;
236 def : Pat<(v8f64 (bitconvert (v64i8 VR512:$src))), (v8f64 VR512:$src)>;
237 def : Pat<(v8f64 (bitconvert (v16f32 VR512:$src))), (v8f64 VR512:$src)>;
238 def : Pat<(v16f32 (bitconvert (v8i64 VR512:$src))), (v16f32 VR512:$src)>;
239 def : Pat<(v16f32 (bitconvert (v16i32 VR512:$src))), (v16f32 VR512:$src)>;
240 def : Pat<(v16f32 (bitconvert (v32i16 VR512:$src))), (v16f32 VR512:$src)>;
241 def : Pat<(v16f32 (bitconvert (v64i8 VR512:$src))), (v16f32 VR512:$src)>;
242 def : Pat<(v16f32 (bitconvert (v8f64 VR512:$src))), (v16f32 VR512:$src)>;
243 def : Pat<(v8i64 (bitconvert (v16i32 VR512:$src))), (v8i64 VR512:$src)>;
244 def : Pat<(v8i64 (bitconvert (v32i16 VR512:$src))), (v8i64 VR512:$src)>;
245 def : Pat<(v8i64 (bitconvert (v64i8 VR512:$src))), (v8i64 VR512:$src)>;
246 def : Pat<(v8i64 (bitconvert (v8f64 VR512:$src))), (v8i64 VR512:$src)>;
247 def : Pat<(v8i64 (bitconvert (v16f32 VR512:$src))), (v8i64 VR512:$src)>;
248 def : Pat<(v16i32 (bitconvert (v8i64 VR512:$src))), (v16i32 VR512:$src)>;
249 def : Pat<(v16i32 (bitconvert (v16f32 VR512:$src))), (v16i32 VR512:$src)>;
250 def : Pat<(v16i32 (bitconvert (v32i16 VR512:$src))), (v16i32 VR512:$src)>;
251 def : Pat<(v16i32 (bitconvert (v64i8 VR512:$src))), (v16i32 VR512:$src)>;
252 def : Pat<(v16i32 (bitconvert (v8f64 VR512:$src))), (v16i32 VR512:$src)>;
253 def : Pat<(v32i16 (bitconvert (v8i64 VR512:$src))), (v32i16 VR512:$src)>;
254 def : Pat<(v32i16 (bitconvert (v16i32 VR512:$src))), (v32i16 VR512:$src)>;
255 def : Pat<(v32i16 (bitconvert (v64i8 VR512:$src))), (v32i16 VR512:$src)>;
256 def : Pat<(v32i16 (bitconvert (v8f64 VR512:$src))), (v32i16 VR512:$src)>;
257 def : Pat<(v32i16 (bitconvert (v16f32 VR512:$src))), (v32i16 VR512:$src)>;
258 def : Pat<(v32i16 (bitconvert (v16f32 VR512:$src))), (v32i16 VR512:$src)>;
259 def : Pat<(v64i8 (bitconvert (v8i64 VR512:$src))), (v64i8 VR512:$src)>;
260 def : Pat<(v64i8 (bitconvert (v16i32 VR512:$src))), (v64i8 VR512:$src)>;
261 def : Pat<(v64i8 (bitconvert (v32i16 VR512:$src))), (v64i8 VR512:$src)>;
262 def : Pat<(v64i8 (bitconvert (v8f64 VR512:$src))), (v64i8 VR512:$src)>;
263 def : Pat<(v64i8 (bitconvert (v16f32 VR512:$src))), (v64i8 VR512:$src)>;
265 def : Pat<(v2i64 (bitconvert (v4i32 VR128X:$src))), (v2i64 VR128X:$src)>;
266 def : Pat<(v2i64 (bitconvert (v8i16 VR128X:$src))), (v2i64 VR128X:$src)>;
267 def : Pat<(v2i64 (bitconvert (v16i8 VR128X:$src))), (v2i64 VR128X:$src)>;
268 def : Pat<(v2i64 (bitconvert (v2f64 VR128X:$src))), (v2i64 VR128X:$src)>;
269 def : Pat<(v2i64 (bitconvert (v4f32 VR128X:$src))), (v2i64 VR128X:$src)>;
270 def : Pat<(v4i32 (bitconvert (v2i64 VR128X:$src))), (v4i32 VR128X:$src)>;
271 def : Pat<(v4i32 (bitconvert (v8i16 VR128X:$src))), (v4i32 VR128X:$src)>;
272 def : Pat<(v4i32 (bitconvert (v16i8 VR128X:$src))), (v4i32 VR128X:$src)>;
273 def : Pat<(v4i32 (bitconvert (v2f64 VR128X:$src))), (v4i32 VR128X:$src)>;
274 def : Pat<(v4i32 (bitconvert (v4f32 VR128X:$src))), (v4i32 VR128X:$src)>;
275 def : Pat<(v8i16 (bitconvert (v2i64 VR128X:$src))), (v8i16 VR128X:$src)>;
276 def : Pat<(v8i16 (bitconvert (v4i32 VR128X:$src))), (v8i16 VR128X:$src)>;
277 def : Pat<(v8i16 (bitconvert (v16i8 VR128X:$src))), (v8i16 VR128X:$src)>;
278 def : Pat<(v8i16 (bitconvert (v2f64 VR128X:$src))), (v8i16 VR128X:$src)>;
279 def : Pat<(v8i16 (bitconvert (v4f32 VR128X:$src))), (v8i16 VR128X:$src)>;
280 def : Pat<(v16i8 (bitconvert (v2i64 VR128X:$src))), (v16i8 VR128X:$src)>;
281 def : Pat<(v16i8 (bitconvert (v4i32 VR128X:$src))), (v16i8 VR128X:$src)>;
282 def : Pat<(v16i8 (bitconvert (v8i16 VR128X:$src))), (v16i8 VR128X:$src)>;
283 def : Pat<(v16i8 (bitconvert (v2f64 VR128X:$src))), (v16i8 VR128X:$src)>;
284 def : Pat<(v16i8 (bitconvert (v4f32 VR128X:$src))), (v16i8 VR128X:$src)>;
285 def : Pat<(v4f32 (bitconvert (v2i64 VR128X:$src))), (v4f32 VR128X:$src)>;
286 def : Pat<(v4f32 (bitconvert (v4i32 VR128X:$src))), (v4f32 VR128X:$src)>;
287 def : Pat<(v4f32 (bitconvert (v8i16 VR128X:$src))), (v4f32 VR128X:$src)>;
288 def : Pat<(v4f32 (bitconvert (v16i8 VR128X:$src))), (v4f32 VR128X:$src)>;
289 def : Pat<(v4f32 (bitconvert (v2f64 VR128X:$src))), (v4f32 VR128X:$src)>;
290 def : Pat<(v2f64 (bitconvert (v2i64 VR128X:$src))), (v2f64 VR128X:$src)>;
291 def : Pat<(v2f64 (bitconvert (v4i32 VR128X:$src))), (v2f64 VR128X:$src)>;
292 def : Pat<(v2f64 (bitconvert (v8i16 VR128X:$src))), (v2f64 VR128X:$src)>;
293 def : Pat<(v2f64 (bitconvert (v16i8 VR128X:$src))), (v2f64 VR128X:$src)>;
294 def : Pat<(v2f64 (bitconvert (v4f32 VR128X:$src))), (v2f64 VR128X:$src)>;
296 // Bitcasts between 256-bit vector types. Return the original type since
297 // no instruction is needed for the conversion
298 def : Pat<(v4f64 (bitconvert (v8f32 VR256X:$src))), (v4f64 VR256X:$src)>;
299 def : Pat<(v4f64 (bitconvert (v8i32 VR256X:$src))), (v4f64 VR256X:$src)>;
300 def : Pat<(v4f64 (bitconvert (v4i64 VR256X:$src))), (v4f64 VR256X:$src)>;
301 def : Pat<(v4f64 (bitconvert (v16i16 VR256X:$src))), (v4f64 VR256X:$src)>;
302 def : Pat<(v4f64 (bitconvert (v32i8 VR256X:$src))), (v4f64 VR256X:$src)>;
303 def : Pat<(v8f32 (bitconvert (v8i32 VR256X:$src))), (v8f32 VR256X:$src)>;
304 def : Pat<(v8f32 (bitconvert (v4i64 VR256X:$src))), (v8f32 VR256X:$src)>;
305 def : Pat<(v8f32 (bitconvert (v4f64 VR256X:$src))), (v8f32 VR256X:$src)>;
306 def : Pat<(v8f32 (bitconvert (v32i8 VR256X:$src))), (v8f32 VR256X:$src)>;
307 def : Pat<(v8f32 (bitconvert (v16i16 VR256X:$src))), (v8f32 VR256X:$src)>;
308 def : Pat<(v4i64 (bitconvert (v8f32 VR256X:$src))), (v4i64 VR256X:$src)>;
309 def : Pat<(v4i64 (bitconvert (v8i32 VR256X:$src))), (v4i64 VR256X:$src)>;
310 def : Pat<(v4i64 (bitconvert (v4f64 VR256X:$src))), (v4i64 VR256X:$src)>;
311 def : Pat<(v4i64 (bitconvert (v32i8 VR256X:$src))), (v4i64 VR256X:$src)>;
312 def : Pat<(v4i64 (bitconvert (v16i16 VR256X:$src))), (v4i64 VR256X:$src)>;
313 def : Pat<(v32i8 (bitconvert (v4f64 VR256X:$src))), (v32i8 VR256X:$src)>;
314 def : Pat<(v32i8 (bitconvert (v4i64 VR256X:$src))), (v32i8 VR256X:$src)>;
315 def : Pat<(v32i8 (bitconvert (v8f32 VR256X:$src))), (v32i8 VR256X:$src)>;
316 def : Pat<(v32i8 (bitconvert (v8i32 VR256X:$src))), (v32i8 VR256X:$src)>;
317 def : Pat<(v32i8 (bitconvert (v16i16 VR256X:$src))), (v32i8 VR256X:$src)>;
318 def : Pat<(v8i32 (bitconvert (v32i8 VR256X:$src))), (v8i32 VR256X:$src)>;
319 def : Pat<(v8i32 (bitconvert (v16i16 VR256X:$src))), (v8i32 VR256X:$src)>;
320 def : Pat<(v8i32 (bitconvert (v8f32 VR256X:$src))), (v8i32 VR256X:$src)>;
321 def : Pat<(v8i32 (bitconvert (v4i64 VR256X:$src))), (v8i32 VR256X:$src)>;
322 def : Pat<(v8i32 (bitconvert (v4f64 VR256X:$src))), (v8i32 VR256X:$src)>;
323 def : Pat<(v16i16 (bitconvert (v8f32 VR256X:$src))), (v16i16 VR256X:$src)>;
324 def : Pat<(v16i16 (bitconvert (v8i32 VR256X:$src))), (v16i16 VR256X:$src)>;
325 def : Pat<(v16i16 (bitconvert (v4i64 VR256X:$src))), (v16i16 VR256X:$src)>;
326 def : Pat<(v16i16 (bitconvert (v4f64 VR256X:$src))), (v16i16 VR256X:$src)>;
327 def : Pat<(v16i16 (bitconvert (v32i8 VR256X:$src))), (v16i16 VR256X:$src)>;
331 // AVX-512: VPXOR instruction writes zero to its upper part, it's safe build zeros.
334 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
335 isPseudo = 1, Predicates = [HasAVX512] in {
336 def AVX512_512_SET0 : I<0, Pseudo, (outs VR512:$dst), (ins), "",
337 [(set VR512:$dst, (v16f32 immAllZerosV))]>;
340 let Predicates = [HasAVX512] in {
341 def : Pat<(v8i64 immAllZerosV), (AVX512_512_SET0)>;
342 def : Pat<(v16i32 immAllZerosV), (AVX512_512_SET0)>;
343 def : Pat<(v8f64 immAllZerosV), (AVX512_512_SET0)>;
346 //===----------------------------------------------------------------------===//
347 // AVX-512 - VECTOR INSERT
350 multiclass vinsert_for_size_no_alt<int Opcode,
351 X86VectorVTInfo From, X86VectorVTInfo To,
352 PatFrag vinsert_insert,
353 SDNodeXForm INSERT_get_vinsert_imm> {
354 let hasSideEffects = 0, ExeDomain = To.ExeDomain in {
355 def rr : AVX512AIi8<Opcode, MRMSrcReg, (outs VR512:$dst),
356 (ins VR512:$src1, From.RC:$src2, i8imm:$src3),
357 "vinsert" # From.EltTypeName # "x" # From.NumElts #
358 "\t{$src3, $src2, $src1, $dst|"
359 "$dst, $src1, $src2, $src3}",
360 [(set To.RC:$dst, (vinsert_insert:$src3 (To.VT VR512:$src1),
361 (From.VT From.RC:$src2),
366 def rm : AVX512AIi8<Opcode, MRMSrcMem, (outs VR512:$dst),
367 (ins VR512:$src1, From.MemOp:$src2, i8imm:$src3),
368 "vinsert" # From.EltTypeName # "x" # From.NumElts #
369 "\t{$src3, $src2, $src1, $dst|"
370 "$dst, $src1, $src2, $src3}",
372 EVEX_4V, EVEX_V512, EVEX_CD8<From.EltSize, From.CD8TupleForm>;
376 multiclass vinsert_for_size<int Opcode,
377 X86VectorVTInfo From, X86VectorVTInfo To,
378 X86VectorVTInfo AltFrom, X86VectorVTInfo AltTo,
379 PatFrag vinsert_insert,
380 SDNodeXForm INSERT_get_vinsert_imm> :
381 vinsert_for_size_no_alt<Opcode, From, To,
382 vinsert_insert, INSERT_get_vinsert_imm> {
383 // Codegen pattern with the alternative types, e.g. v2i64 -> v8i64 for
384 // vinserti32x4. Only add this if 64x2 and friends are not supported
385 // natively via AVX512DQ.
386 let Predicates = [NoDQI] in
387 def : Pat<(vinsert_insert:$ins
388 (AltTo.VT VR512:$src1), (AltFrom.VT From.RC:$src2), (iPTR imm)),
389 (AltTo.VT (!cast<Instruction>(NAME # From.EltSize # "x4rr")
390 VR512:$src1, From.RC:$src2,
391 (INSERT_get_vinsert_imm VR512:$ins)))>;
394 multiclass vinsert_for_type<ValueType EltVT32, int Opcode128,
395 ValueType EltVT64, int Opcode256> {
396 defm NAME # "32x4" : vinsert_for_size<Opcode128,
397 X86VectorVTInfo< 4, EltVT32, VR128X>,
398 X86VectorVTInfo<16, EltVT32, VR512>,
399 X86VectorVTInfo< 2, EltVT64, VR128X>,
400 X86VectorVTInfo< 8, EltVT64, VR512>,
402 INSERT_get_vinsert128_imm>;
403 let Predicates = [HasDQI] in
404 defm NAME # "64x2" : vinsert_for_size_no_alt<Opcode128,
405 X86VectorVTInfo< 2, EltVT64, VR128X>,
406 X86VectorVTInfo< 8, EltVT64, VR512>,
408 INSERT_get_vinsert128_imm>, VEX_W;
409 defm NAME # "64x4" : vinsert_for_size<Opcode256,
410 X86VectorVTInfo< 4, EltVT64, VR256X>,
411 X86VectorVTInfo< 8, EltVT64, VR512>,
412 X86VectorVTInfo< 8, EltVT32, VR256>,
413 X86VectorVTInfo<16, EltVT32, VR512>,
415 INSERT_get_vinsert256_imm>, VEX_W;
416 let Predicates = [HasDQI] in
417 defm NAME # "32x8" : vinsert_for_size_no_alt<Opcode256,
418 X86VectorVTInfo< 8, EltVT32, VR256X>,
419 X86VectorVTInfo<16, EltVT32, VR512>,
421 INSERT_get_vinsert256_imm>;
424 defm VINSERTF : vinsert_for_type<f32, 0x18, f64, 0x1a>;
425 defm VINSERTI : vinsert_for_type<i32, 0x38, i64, 0x3a>;
427 // vinsertps - insert f32 to XMM
428 def VINSERTPSzrr : AVX512AIi8<0x21, MRMSrcReg, (outs VR128X:$dst),
429 (ins VR128X:$src1, VR128X:$src2, i8imm:$src3),
430 "vinsertps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
431 [(set VR128X:$dst, (X86insertps VR128X:$src1, VR128X:$src2, imm:$src3))]>,
433 def VINSERTPSzrm: AVX512AIi8<0x21, MRMSrcMem, (outs VR128X:$dst),
434 (ins VR128X:$src1, f32mem:$src2, i8imm:$src3),
435 "vinsertps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
436 [(set VR128X:$dst, (X86insertps VR128X:$src1,
437 (v4f32 (scalar_to_vector (loadf32 addr:$src2))),
438 imm:$src3))]>, EVEX_4V, EVEX_CD8<32, CD8VT1>;
440 //===----------------------------------------------------------------------===//
441 // AVX-512 VECTOR EXTRACT
444 multiclass vextract_for_size<int Opcode,
445 X86VectorVTInfo From, X86VectorVTInfo To,
446 X86VectorVTInfo AltFrom, X86VectorVTInfo AltTo,
447 PatFrag vextract_extract,
448 SDNodeXForm EXTRACT_get_vextract_imm> {
449 let hasSideEffects = 0, ExeDomain = To.ExeDomain in {
450 defm rr : AVX512_maskable_in_asm<Opcode, MRMDestReg, To, (outs To.RC:$dst),
451 (ins VR512:$src1, i8imm:$idx),
452 "vextract" # To.EltTypeName # "x4",
453 "$idx, $src1", "$src1, $idx",
454 [(set To.RC:$dst, (vextract_extract:$idx (From.VT VR512:$src1),
456 AVX512AIi8Base, EVEX, EVEX_V512;
458 def rm : AVX512AIi8<Opcode, MRMDestMem, (outs),
459 (ins To.MemOp:$dst, VR512:$src1, i8imm:$src2),
460 "vextract" # To.EltTypeName # "x4\t{$src2, $src1, $dst|"
461 "$dst, $src1, $src2}",
462 []>, EVEX, EVEX_V512, EVEX_CD8<To.EltSize, CD8VT4>;
465 // Codegen pattern with the alternative types, e.g. v8i64 -> v2i64 for
467 def : Pat<(vextract_extract:$ext (AltFrom.VT VR512:$src1), (iPTR imm)),
468 (AltTo.VT (!cast<Instruction>(NAME # To.EltSize # "x4rr")
470 (EXTRACT_get_vextract_imm To.RC:$ext)))>;
472 // A 128/256-bit subvector extract from the first 512-bit vector position is
473 // a subregister copy that needs no instruction.
474 def : Pat<(To.VT (extract_subvector (From.VT VR512:$src), (iPTR 0))),
476 (EXTRACT_SUBREG (From.VT VR512:$src), To.SubRegIdx))>;
478 // And for the alternative types.
479 def : Pat<(AltTo.VT (extract_subvector (AltFrom.VT VR512:$src), (iPTR 0))),
481 (EXTRACT_SUBREG (AltFrom.VT VR512:$src), AltTo.SubRegIdx))>;
483 // Intrinsic call with masking.
484 def : Pat<(!cast<Intrinsic>("int_x86_avx512_mask_vextract" # To.EltTypeName #
486 VR512:$src1, (iPTR imm:$idx), To.RC:$src0, GR8:$mask),
487 (!cast<Instruction>(NAME # To.EltSize # "x4rrk") To.RC:$src0,
488 (v4i1 (COPY_TO_REGCLASS GR8:$mask, VK4WM)),
489 VR512:$src1, imm:$idx)>;
491 // Intrinsic call with zero-masking.
492 def : Pat<(!cast<Intrinsic>("int_x86_avx512_mask_vextract" # To.EltTypeName #
494 VR512:$src1, (iPTR imm:$idx), To.ImmAllZerosV, GR8:$mask),
495 (!cast<Instruction>(NAME # To.EltSize # "x4rrkz")
496 (v4i1 (COPY_TO_REGCLASS GR8:$mask, VK4WM)),
497 VR512:$src1, imm:$idx)>;
499 // Intrinsic call without masking.
500 def : Pat<(!cast<Intrinsic>("int_x86_avx512_mask_vextract" # To.EltTypeName #
502 VR512:$src1, (iPTR imm:$idx), To.ImmAllZerosV, (i8 -1)),
503 (!cast<Instruction>(NAME # To.EltSize # "x4rr")
504 VR512:$src1, imm:$idx)>;
507 multiclass vextract_for_type<ValueType EltVT32, int Opcode32,
508 ValueType EltVT64, int Opcode64> {
509 defm NAME # "32x4" : vextract_for_size<Opcode32,
510 X86VectorVTInfo<16, EltVT32, VR512>,
511 X86VectorVTInfo< 4, EltVT32, VR128X>,
512 X86VectorVTInfo< 8, EltVT64, VR512>,
513 X86VectorVTInfo< 2, EltVT64, VR128X>,
515 EXTRACT_get_vextract128_imm>;
516 defm NAME # "64x4" : vextract_for_size<Opcode64,
517 X86VectorVTInfo< 8, EltVT64, VR512>,
518 X86VectorVTInfo< 4, EltVT64, VR256X>,
519 X86VectorVTInfo<16, EltVT32, VR512>,
520 X86VectorVTInfo< 8, EltVT32, VR256>,
522 EXTRACT_get_vextract256_imm>, VEX_W;
525 defm VEXTRACTF : vextract_for_type<f32, 0x19, f64, 0x1b>;
526 defm VEXTRACTI : vextract_for_type<i32, 0x39, i64, 0x3b>;
528 // A 128-bit subvector insert to the first 512-bit vector position
529 // is a subregister copy that needs no instruction.
530 def : Pat<(insert_subvector undef, (v2i64 VR128X:$src), (iPTR 0)),
531 (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)),
532 (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
534 def : Pat<(insert_subvector undef, (v2f64 VR128X:$src), (iPTR 0)),
535 (INSERT_SUBREG (v8f64 (IMPLICIT_DEF)),
536 (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
538 def : Pat<(insert_subvector undef, (v4i32 VR128X:$src), (iPTR 0)),
539 (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)),
540 (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
542 def : Pat<(insert_subvector undef, (v4f32 VR128X:$src), (iPTR 0)),
543 (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)),
544 (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
547 def : Pat<(insert_subvector undef, (v4i64 VR256X:$src), (iPTR 0)),
548 (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
549 def : Pat<(insert_subvector undef, (v4f64 VR256X:$src), (iPTR 0)),
550 (INSERT_SUBREG (v8f64 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
551 def : Pat<(insert_subvector undef, (v8i32 VR256X:$src), (iPTR 0)),
552 (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
553 def : Pat<(insert_subvector undef, (v8f32 VR256X:$src), (iPTR 0)),
554 (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
556 // vextractps - extract 32 bits from XMM
557 def VEXTRACTPSzrr : AVX512AIi8<0x17, MRMDestReg, (outs GR32:$dst),
558 (ins VR128X:$src1, i32i8imm:$src2),
559 "vextractps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
560 [(set GR32:$dst, (extractelt (bc_v4i32 (v4f32 VR128X:$src1)), imm:$src2))]>,
563 def VEXTRACTPSzmr : AVX512AIi8<0x17, MRMDestMem, (outs),
564 (ins f32mem:$dst, VR128X:$src1, i32i8imm:$src2),
565 "vextractps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
566 [(store (extractelt (bc_v4i32 (v4f32 VR128X:$src1)), imm:$src2),
567 addr:$dst)]>, EVEX, EVEX_CD8<32, CD8VT1>;
569 //===---------------------------------------------------------------------===//
572 multiclass avx512_fp_broadcast<bits<8> opc, string OpcodeStr,
573 RegisterClass DestRC,
574 RegisterClass SrcRC, X86MemOperand x86memop> {
575 def rr : AVX5128I<opc, MRMSrcReg, (outs DestRC:$dst), (ins SrcRC:$src),
576 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
578 def rm : AVX5128I<opc, MRMSrcMem, (outs DestRC:$dst), (ins x86memop:$src),
579 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),[]>, EVEX;
581 let ExeDomain = SSEPackedSingle in {
582 defm VBROADCASTSSZ : avx512_fp_broadcast<0x18, "vbroadcastss", VR512,
584 EVEX_V512, EVEX_CD8<32, CD8VT1>;
587 let ExeDomain = SSEPackedDouble in {
588 defm VBROADCASTSDZ : avx512_fp_broadcast<0x19, "vbroadcastsd", VR512,
590 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
593 def : Pat<(v16f32 (X86VBroadcast (loadf32 addr:$src))),
594 (VBROADCASTSSZrm addr:$src)>;
595 def : Pat<(v8f64 (X86VBroadcast (loadf64 addr:$src))),
596 (VBROADCASTSDZrm addr:$src)>;
598 def : Pat<(int_x86_avx512_vbroadcast_ss_512 addr:$src),
599 (VBROADCASTSSZrm addr:$src)>;
600 def : Pat<(int_x86_avx512_vbroadcast_sd_512 addr:$src),
601 (VBROADCASTSDZrm addr:$src)>;
603 multiclass avx512_int_broadcast_reg<bits<8> opc, string OpcodeStr,
604 RegisterClass SrcRC, RegisterClass KRC> {
605 def Zrr : AVX5128I<opc, MRMSrcReg, (outs VR512:$dst), (ins SrcRC:$src),
606 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
607 []>, EVEX, EVEX_V512;
608 def Zkrr : AVX5128I<opc, MRMSrcReg, (outs VR512:$dst),
609 (ins KRC:$mask, SrcRC:$src),
610 !strconcat(OpcodeStr,
611 " \t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}"),
612 []>, EVEX, EVEX_V512, EVEX_KZ;
615 defm VPBROADCASTDr : avx512_int_broadcast_reg<0x7C, "vpbroadcastd", GR32, VK16WM>;
616 defm VPBROADCASTQr : avx512_int_broadcast_reg<0x7C, "vpbroadcastq", GR64, VK8WM>,
619 def : Pat <(v16i32 (X86vzext VK16WM:$mask)),
620 (VPBROADCASTDrZkrr VK16WM:$mask, (i32 (MOV32ri 0x1)))>;
622 def : Pat <(v8i64 (X86vzext VK8WM:$mask)),
623 (VPBROADCASTQrZkrr VK8WM:$mask, (i64 (MOV64ri 0x1)))>;
625 def : Pat<(v16i32 (X86VBroadcast (i32 GR32:$src))),
626 (VPBROADCASTDrZrr GR32:$src)>;
627 def : Pat<(v16i32 (X86VBroadcastm VK16WM:$mask, (i32 GR32:$src))),
628 (VPBROADCASTDrZkrr VK16WM:$mask, GR32:$src)>;
629 def : Pat<(v8i64 (X86VBroadcast (i64 GR64:$src))),
630 (VPBROADCASTQrZrr GR64:$src)>;
631 def : Pat<(v8i64 (X86VBroadcastm VK8WM:$mask, (i64 GR64:$src))),
632 (VPBROADCASTQrZkrr VK8WM:$mask, GR64:$src)>;
634 def : Pat<(v16i32 (int_x86_avx512_pbroadcastd_i32_512 (i32 GR32:$src))),
635 (VPBROADCASTDrZrr GR32:$src)>;
636 def : Pat<(v8i64 (int_x86_avx512_pbroadcastq_i64_512 (i64 GR64:$src))),
637 (VPBROADCASTQrZrr GR64:$src)>;
639 def : Pat<(v16i32 (int_x86_avx512_mask_pbroadcast_d_gpr_512 (i32 GR32:$src),
640 (v16i32 immAllZerosV), (i16 GR16:$mask))),
641 (VPBROADCASTDrZkrr (COPY_TO_REGCLASS GR16:$mask, VK16WM), GR32:$src)>;
642 def : Pat<(v8i64 (int_x86_avx512_mask_pbroadcast_q_gpr_512 (i64 GR64:$src),
643 (bc_v8i64 (v16i32 immAllZerosV)), (i8 GR8:$mask))),
644 (VPBROADCASTQrZkrr (COPY_TO_REGCLASS GR8:$mask, VK8WM), GR64:$src)>;
646 multiclass avx512_int_broadcast_rm<bits<8> opc, string OpcodeStr,
647 X86MemOperand x86memop, PatFrag ld_frag,
648 RegisterClass DstRC, ValueType OpVT, ValueType SrcVT,
650 def rr : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst), (ins VR128X:$src),
651 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
653 (OpVT (X86VBroadcast (SrcVT VR128X:$src))))]>, EVEX;
654 def krr : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst), (ins KRC:$mask,
656 !strconcat(OpcodeStr,
657 " \t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
659 (OpVT (X86VBroadcastm KRC:$mask, (SrcVT VR128X:$src))))]>,
662 def rm : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
663 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
665 (OpVT (X86VBroadcast (ld_frag addr:$src))))]>, EVEX;
666 def krm : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst), (ins KRC:$mask,
668 !strconcat(OpcodeStr,
669 " \t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
670 [(set DstRC:$dst, (OpVT (X86VBroadcastm KRC:$mask,
671 (ld_frag addr:$src))))]>, EVEX, EVEX_KZ;
675 defm VPBROADCASTDZ : avx512_int_broadcast_rm<0x58, "vpbroadcastd", i32mem,
676 loadi32, VR512, v16i32, v4i32, VK16WM>,
677 EVEX_V512, EVEX_CD8<32, CD8VT1>;
678 defm VPBROADCASTQZ : avx512_int_broadcast_rm<0x59, "vpbroadcastq", i64mem,
679 loadi64, VR512, v8i64, v2i64, VK8WM>, EVEX_V512, VEX_W,
680 EVEX_CD8<64, CD8VT1>;
682 multiclass avx512_int_subvec_broadcast_rm<bits<8> opc, string OpcodeStr,
683 X86MemOperand x86memop, PatFrag ld_frag,
686 def rm : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst), (ins x86memop:$src),
687 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
689 def krm : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst), (ins KRC:$mask,
691 !strconcat(OpcodeStr,
692 " \t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
697 defm VBROADCASTI32X4 : avx512_int_subvec_broadcast_rm<0x5a, "vbroadcasti32x4",
698 i128mem, loadv2i64, VK16WM>,
699 EVEX_V512, EVEX_CD8<32, CD8VT4>;
700 defm VBROADCASTI64X4 : avx512_int_subvec_broadcast_rm<0x5b, "vbroadcasti64x4",
701 i256mem, loadv4i64, VK16WM>, VEX_W,
702 EVEX_V512, EVEX_CD8<64, CD8VT4>;
704 def : Pat<(v16i32 (int_x86_avx512_pbroadcastd_512 (v4i32 VR128X:$src))),
705 (VPBROADCASTDZrr VR128X:$src)>;
706 def : Pat<(v8i64 (int_x86_avx512_pbroadcastq_512 (v2i64 VR128X:$src))),
707 (VPBROADCASTQZrr VR128X:$src)>;
709 def : Pat<(v16f32 (X86VBroadcast (v4f32 VR128X:$src))),
710 (VBROADCASTSSZrr VR128X:$src)>;
711 def : Pat<(v8f64 (X86VBroadcast (v2f64 VR128X:$src))),
712 (VBROADCASTSDZrr VR128X:$src)>;
714 def : Pat<(v16f32 (int_x86_avx512_vbroadcast_ss_ps_512 (v4f32 VR128X:$src))),
715 (VBROADCASTSSZrr VR128X:$src)>;
716 def : Pat<(v8f64 (int_x86_avx512_vbroadcast_sd_pd_512 (v2f64 VR128X:$src))),
717 (VBROADCASTSDZrr VR128X:$src)>;
719 // Provide fallback in case the load node that is used in the patterns above
720 // is used by additional users, which prevents the pattern selection.
721 def : Pat<(v16f32 (X86VBroadcast FR32X:$src)),
722 (VBROADCASTSSZrr (COPY_TO_REGCLASS FR32X:$src, VR128X))>;
723 def : Pat<(v8f64 (X86VBroadcast FR64X:$src)),
724 (VBROADCASTSDZrr (COPY_TO_REGCLASS FR64X:$src, VR128X))>;
727 let Predicates = [HasAVX512] in {
728 def : Pat<(v8i32 (X86VBroadcastm (v8i1 VK8WM:$mask), (loadi32 addr:$src))),
730 (v16i32 (VPBROADCASTDZkrm (COPY_TO_REGCLASS VK8WM:$mask, VK16WM),
731 addr:$src)), sub_ymm)>;
733 //===----------------------------------------------------------------------===//
734 // AVX-512 BROADCAST MASK TO VECTOR REGISTER
737 multiclass avx512_mask_broadcast<bits<8> opc, string OpcodeStr,
739 let Predicates = [HasCDI] in
740 def Zrr : AVX512XS8I<opc, MRMSrcReg, (outs VR512:$dst), (ins KRC:$src),
741 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
742 []>, EVEX, EVEX_V512;
744 let Predicates = [HasCDI, HasVLX] in {
745 def Z128rr : AVX512XS8I<opc, MRMSrcReg, (outs VR128:$dst), (ins KRC:$src),
746 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
747 []>, EVEX, EVEX_V128;
748 def Z256rr : AVX512XS8I<opc, MRMSrcReg, (outs VR256:$dst), (ins KRC:$src),
749 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
750 []>, EVEX, EVEX_V256;
754 let Predicates = [HasCDI] in {
755 defm VPBROADCASTMW2D : avx512_mask_broadcast<0x3A, "vpbroadcastmw2d",
757 defm VPBROADCASTMB2Q : avx512_mask_broadcast<0x2A, "vpbroadcastmb2q",
761 //===----------------------------------------------------------------------===//
764 // -- immediate form --
765 multiclass avx512_perm_imm<bits<8> opc, string OpcodeStr, SDNode OpNode,
767 let ExeDomain = _.ExeDomain in {
768 def ri : AVX512AIi8<opc, MRMSrcReg, (outs _.RC:$dst),
769 (ins _.RC:$src1, i8imm:$src2),
770 !strconcat(OpcodeStr,
771 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
773 (_.VT (OpNode _.RC:$src1, (i8 imm:$src2))))]>,
775 def mi : AVX512AIi8<opc, MRMSrcMem, (outs _.RC:$dst),
776 (ins _.MemOp:$src1, i8imm:$src2),
777 !strconcat(OpcodeStr,
778 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
780 (_.VT (OpNode (_.MemOpFrag addr:$src1),
782 EVEX, EVEX_CD8<_.EltSize, CD8VF>;
786 defm VPERMQZ : avx512_perm_imm<0x00, "vpermq", X86VPermi, v8i64_info>,
788 defm VPERMPDZ : avx512_perm_imm<0x01, "vpermpd", X86VPermi, v8f64_info>,
791 defm VPERMILPSZ : avx512_perm_imm<0x04, "vpermilps", X86VPermilpi, v16f32_info>,
793 defm VPERMILPDZ : avx512_perm_imm<0x05, "vpermilpd", X86VPermilpi, v8f64_info>,
796 def : Pat<(v16i32 (X86VPermilpi VR512:$src1, (i8 imm:$imm))),
797 (VPERMILPSZri VR512:$src1, imm:$imm)>;
798 def : Pat<(v8i64 (X86VPermilpi VR512:$src1, (i8 imm:$imm))),
799 (VPERMILPDZri VR512:$src1, imm:$imm)>;
801 // -- VPERM - register form --
802 multiclass avx512_perm<bits<8> opc, string OpcodeStr, RegisterClass RC,
803 PatFrag mem_frag, X86MemOperand x86memop, ValueType OpVT> {
805 def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
806 (ins RC:$src1, RC:$src2),
807 !strconcat(OpcodeStr,
808 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
810 (OpVT (X86VPermv RC:$src1, RC:$src2)))]>, EVEX_4V;
812 def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
813 (ins RC:$src1, x86memop:$src2),
814 !strconcat(OpcodeStr,
815 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
817 (OpVT (X86VPermv RC:$src1, (mem_frag addr:$src2))))]>,
821 defm VPERMDZ : avx512_perm<0x36, "vpermd", VR512, memopv16i32, i512mem,
822 v16i32>, EVEX_V512, EVEX_CD8<32, CD8VF>;
823 defm VPERMQZ : avx512_perm<0x36, "vpermq", VR512, memopv8i64, i512mem,
824 v8i64>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
825 let ExeDomain = SSEPackedSingle in
826 defm VPERMPSZ : avx512_perm<0x16, "vpermps", VR512, memopv16f32, f512mem,
827 v16f32>, EVEX_V512, EVEX_CD8<32, CD8VF>;
828 let ExeDomain = SSEPackedDouble in
829 defm VPERMPDZ : avx512_perm<0x16, "vpermpd", VR512, memopv8f64, f512mem,
830 v8f64>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
832 // -- VPERM2I - 3 source operands form --
833 multiclass avx512_perm_3src<bits<8> opc, string OpcodeStr, RegisterClass RC,
834 PatFrag mem_frag, X86MemOperand x86memop,
835 SDNode OpNode, ValueType OpVT, RegisterClass KRC> {
836 let Constraints = "$src1 = $dst" in {
837 def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
838 (ins RC:$src1, RC:$src2, RC:$src3),
839 !strconcat(OpcodeStr,
840 " \t{$src3, $src2, $dst|$dst, $src2, $src3}"),
842 (OpVT (OpNode RC:$src1, RC:$src2, RC:$src3)))]>,
845 def rrk : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
846 (ins RC:$src1, KRC:$mask, RC:$src2, RC:$src3),
847 !strconcat(OpcodeStr,
848 " \t{$src3, $src2, $dst {${mask}}|"
849 "$dst {${mask}}, $src2, $src3}"),
850 [(set RC:$dst, (OpVT (vselect KRC:$mask,
851 (OpNode RC:$src1, RC:$src2,
856 let AddedComplexity = 30 in // Prefer over VMOV*rrkz Pat<>
857 def rrkz : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
858 (ins RC:$src1, KRC:$mask, RC:$src2, RC:$src3),
859 !strconcat(OpcodeStr,
860 " \t{$src3, $src2, $dst {${mask}} {z} |",
861 "$dst {${mask}} {z}, $src2, $src3}"),
862 [(set RC:$dst, (OpVT (vselect KRC:$mask,
863 (OpNode RC:$src1, RC:$src2,
866 (v16i32 immAllZerosV))))))]>,
869 def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
870 (ins RC:$src1, RC:$src2, x86memop:$src3),
871 !strconcat(OpcodeStr,
872 " \t{$src3, $src2, $dst|$dst, $src2, $src3}"),
874 (OpVT (OpNode RC:$src1, RC:$src2,
875 (mem_frag addr:$src3))))]>, EVEX_4V;
877 def rmk : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
878 (ins RC:$src1, KRC:$mask, RC:$src2, x86memop:$src3),
879 !strconcat(OpcodeStr,
880 " \t{$src3, $src2, $dst {${mask}}|"
881 "$dst {${mask}}, $src2, $src3}"),
883 (OpVT (vselect KRC:$mask,
884 (OpNode RC:$src1, RC:$src2,
885 (mem_frag addr:$src3)),
889 let AddedComplexity = 10 in // Prefer over the rrkz variant
890 def rmkz : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
891 (ins RC:$src1, KRC:$mask, RC:$src2, x86memop:$src3),
892 !strconcat(OpcodeStr,
893 " \t{$src3, $src2, $dst {${mask}} {z}|"
894 "$dst {${mask}} {z}, $src2, $src3}"),
896 (OpVT (vselect KRC:$mask,
897 (OpNode RC:$src1, RC:$src2,
898 (mem_frag addr:$src3)),
900 (v16i32 immAllZerosV))))))]>,
904 defm VPERMI2D : avx512_perm_3src<0x76, "vpermi2d", VR512, memopv16i32,
905 i512mem, X86VPermiv3, v16i32, VK16WM>,
906 EVEX_V512, EVEX_CD8<32, CD8VF>;
907 defm VPERMI2Q : avx512_perm_3src<0x76, "vpermi2q", VR512, memopv8i64,
908 i512mem, X86VPermiv3, v8i64, VK8WM>,
909 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
910 defm VPERMI2PS : avx512_perm_3src<0x77, "vpermi2ps", VR512, memopv16f32,
911 i512mem, X86VPermiv3, v16f32, VK16WM>,
912 EVEX_V512, EVEX_CD8<32, CD8VF>;
913 defm VPERMI2PD : avx512_perm_3src<0x77, "vpermi2pd", VR512, memopv8f64,
914 i512mem, X86VPermiv3, v8f64, VK8WM>,
915 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
917 multiclass avx512_perm_table_3src<bits<8> opc, string Suffix, RegisterClass RC,
918 PatFrag mem_frag, X86MemOperand x86memop,
919 SDNode OpNode, ValueType OpVT, RegisterClass KRC,
920 ValueType MaskVT, RegisterClass MRC> :
921 avx512_perm_3src<opc, "vpermt2"##Suffix, RC, mem_frag, x86memop, OpNode,
923 def : Pat<(OpVT (!cast<Intrinsic>("int_x86_avx512_mask_vpermt_"##Suffix##"_512")
924 VR512:$idx, VR512:$src1, VR512:$src2, -1)),
925 (!cast<Instruction>(NAME#rr) VR512:$src1, VR512:$idx, VR512:$src2)>;
927 def : Pat<(OpVT (!cast<Intrinsic>("int_x86_avx512_mask_vpermt_"##Suffix##"_512")
928 VR512:$idx, VR512:$src1, VR512:$src2, MRC:$mask)),
929 (!cast<Instruction>(NAME#rrk) VR512:$src1,
930 (MaskVT (COPY_TO_REGCLASS MRC:$mask, KRC)), VR512:$idx, VR512:$src2)>;
933 defm VPERMT2D : avx512_perm_table_3src<0x7E, "d", VR512, memopv16i32, i512mem,
934 X86VPermv3, v16i32, VK16WM, v16i1, GR16>,
935 EVEX_V512, EVEX_CD8<32, CD8VF>;
936 defm VPERMT2Q : avx512_perm_table_3src<0x7E, "q", VR512, memopv8i64, i512mem,
937 X86VPermv3, v8i64, VK8WM, v8i1, GR8>,
938 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
939 defm VPERMT2PS : avx512_perm_table_3src<0x7F, "ps", VR512, memopv16f32, i512mem,
940 X86VPermv3, v16f32, VK16WM, v16i1, GR16>,
941 EVEX_V512, EVEX_CD8<32, CD8VF>;
942 defm VPERMT2PD : avx512_perm_table_3src<0x7F, "pd", VR512, memopv8f64, i512mem,
943 X86VPermv3, v8f64, VK8WM, v8i1, GR8>,
944 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
946 //===----------------------------------------------------------------------===//
947 // AVX-512 - BLEND using mask
949 multiclass avx512_blendmask<bits<8> opc, string OpcodeStr,
950 RegisterClass KRC, RegisterClass RC,
951 X86MemOperand x86memop, PatFrag mem_frag,
952 SDNode OpNode, ValueType vt> {
953 def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
954 (ins KRC:$mask, RC:$src1, RC:$src2),
955 !strconcat(OpcodeStr,
956 " \t{$src2, $src1, ${dst} {${mask}}|${dst} {${mask}}, $src1, $src2}"),
957 [(set RC:$dst, (OpNode KRC:$mask, (vt RC:$src2),
958 (vt RC:$src1)))]>, EVEX_4V, EVEX_K;
960 def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
961 (ins KRC:$mask, RC:$src1, x86memop:$src2),
962 !strconcat(OpcodeStr,
963 " \t{$src2, $src1, ${dst} {${mask}}|${dst} {${mask}}, $src1, $src2}"),
964 []>, EVEX_4V, EVEX_K;
967 let ExeDomain = SSEPackedSingle in
968 defm VBLENDMPSZ : avx512_blendmask<0x65, "vblendmps",
969 VK16WM, VR512, f512mem,
970 memopv16f32, vselect, v16f32>,
971 EVEX_CD8<32, CD8VF>, EVEX_V512;
972 let ExeDomain = SSEPackedDouble in
973 defm VBLENDMPDZ : avx512_blendmask<0x65, "vblendmpd",
974 VK8WM, VR512, f512mem,
975 memopv8f64, vselect, v8f64>,
976 VEX_W, EVEX_CD8<64, CD8VF>, EVEX_V512;
978 def : Pat<(v16f32 (int_x86_avx512_mask_blend_ps_512 (v16f32 VR512:$src1),
979 (v16f32 VR512:$src2), (i16 GR16:$mask))),
980 (VBLENDMPSZrr (COPY_TO_REGCLASS GR16:$mask, VK16WM),
981 VR512:$src1, VR512:$src2)>;
983 def : Pat<(v8f64 (int_x86_avx512_mask_blend_pd_512 (v8f64 VR512:$src1),
984 (v8f64 VR512:$src2), (i8 GR8:$mask))),
985 (VBLENDMPDZrr (COPY_TO_REGCLASS GR8:$mask, VK8WM),
986 VR512:$src1, VR512:$src2)>;
988 defm VPBLENDMDZ : avx512_blendmask<0x64, "vpblendmd",
989 VK16WM, VR512, f512mem,
990 memopv16i32, vselect, v16i32>,
991 EVEX_CD8<32, CD8VF>, EVEX_V512;
993 defm VPBLENDMQZ : avx512_blendmask<0x64, "vpblendmq",
994 VK8WM, VR512, f512mem,
995 memopv8i64, vselect, v8i64>,
996 VEX_W, EVEX_CD8<64, CD8VF>, EVEX_V512;
998 def : Pat<(v16i32 (int_x86_avx512_mask_blend_d_512 (v16i32 VR512:$src1),
999 (v16i32 VR512:$src2), (i16 GR16:$mask))),
1000 (VPBLENDMDZrr (COPY_TO_REGCLASS GR16:$mask, VK16),
1001 VR512:$src1, VR512:$src2)>;
1003 def : Pat<(v8i64 (int_x86_avx512_mask_blend_q_512 (v8i64 VR512:$src1),
1004 (v8i64 VR512:$src2), (i8 GR8:$mask))),
1005 (VPBLENDMQZrr (COPY_TO_REGCLASS GR8:$mask, VK8),
1006 VR512:$src1, VR512:$src2)>;
1008 let Predicates = [HasAVX512] in {
1009 def : Pat<(v8f32 (vselect (v8i1 VK8WM:$mask), (v8f32 VR256X:$src1),
1010 (v8f32 VR256X:$src2))),
1012 (v16f32 (VBLENDMPSZrr (COPY_TO_REGCLASS VK8WM:$mask, VK16WM),
1013 (v16f32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)),
1014 (v16f32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)))), sub_ymm)>;
1016 def : Pat<(v8i32 (vselect (v8i1 VK8WM:$mask), (v8i32 VR256X:$src1),
1017 (v8i32 VR256X:$src2))),
1019 (v16i32 (VPBLENDMDZrr (COPY_TO_REGCLASS VK8WM:$mask, VK16WM),
1020 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)),
1021 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)))), sub_ymm)>;
1023 //===----------------------------------------------------------------------===//
1024 // Compare Instructions
1025 //===----------------------------------------------------------------------===//
1027 // avx512_cmp_scalar - AVX512 CMPSS and CMPSD
1028 multiclass avx512_cmp_scalar<RegisterClass RC, X86MemOperand x86memop,
1029 Operand CC, SDNode OpNode, ValueType VT,
1030 PatFrag ld_frag, string asm, string asm_alt> {
1031 def rr : AVX512Ii8<0xC2, MRMSrcReg,
1032 (outs VK1:$dst), (ins RC:$src1, RC:$src2, CC:$cc), asm,
1033 [(set VK1:$dst, (OpNode (VT RC:$src1), RC:$src2, imm:$cc))],
1034 IIC_SSE_ALU_F32S_RR>, EVEX_4V;
1035 def rm : AVX512Ii8<0xC2, MRMSrcMem,
1036 (outs VK1:$dst), (ins RC:$src1, x86memop:$src2, CC:$cc), asm,
1037 [(set VK1:$dst, (OpNode (VT RC:$src1),
1038 (ld_frag addr:$src2), imm:$cc))], IIC_SSE_ALU_F32P_RM>, EVEX_4V;
1039 let isAsmParserOnly = 1, hasSideEffects = 0 in {
1040 def rri_alt : AVX512Ii8<0xC2, MRMSrcReg,
1041 (outs VK1:$dst), (ins RC:$src1, RC:$src2, i8imm:$cc),
1042 asm_alt, [], IIC_SSE_ALU_F32S_RR>, EVEX_4V;
1043 def rmi_alt : AVX512Ii8<0xC2, MRMSrcMem,
1044 (outs VK1:$dst), (ins RC:$src1, x86memop:$src2, i8imm:$cc),
1045 asm_alt, [], IIC_SSE_ALU_F32P_RM>, EVEX_4V;
1049 let Predicates = [HasAVX512] in {
1050 defm VCMPSSZ : avx512_cmp_scalar<FR32X, f32mem, AVXCC, X86cmpms, f32, loadf32,
1051 "vcmp${cc}ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1052 "vcmpss\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}">,
1054 defm VCMPSDZ : avx512_cmp_scalar<FR64X, f64mem, AVXCC, X86cmpms, f64, loadf64,
1055 "vcmp${cc}sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1056 "vcmpsd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}">,
1060 multiclass avx512_icmp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
1061 X86VectorVTInfo _> {
1062 def rr : AVX512BI<opc, MRMSrcReg,
1063 (outs _.KRC:$dst), (ins _.RC:$src1, _.RC:$src2),
1064 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1065 [(set _.KRC:$dst, (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2)))],
1066 IIC_SSE_ALU_F32P_RR>, EVEX_4V;
1068 def rm : AVX512BI<opc, MRMSrcMem,
1069 (outs _.KRC:$dst), (ins _.RC:$src1, _.MemOp:$src2),
1070 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1071 [(set _.KRC:$dst, (OpNode (_.VT _.RC:$src1),
1072 (_.VT (bitconvert (_.LdFrag addr:$src2)))))],
1073 IIC_SSE_ALU_F32P_RM>, EVEX_4V;
1074 def rrk : AVX512BI<opc, MRMSrcReg,
1075 (outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1, _.RC:$src2),
1076 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst {${mask}}|",
1077 "$dst {${mask}}, $src1, $src2}"),
1078 [(set _.KRC:$dst, (and _.KRCWM:$mask,
1079 (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2))))],
1080 IIC_SSE_ALU_F32P_RR>, EVEX_4V, EVEX_K;
1082 def rmk : AVX512BI<opc, MRMSrcMem,
1083 (outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1, _.MemOp:$src2),
1084 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst {${mask}}|",
1085 "$dst {${mask}}, $src1, $src2}"),
1086 [(set _.KRC:$dst, (and _.KRCWM:$mask,
1087 (OpNode (_.VT _.RC:$src1),
1089 (_.LdFrag addr:$src2))))))],
1090 IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_K;
1093 multiclass avx512_icmp_packed_rmb<bits<8> opc, string OpcodeStr, SDNode OpNode,
1094 X86VectorVTInfo _> :
1095 avx512_icmp_packed<opc, OpcodeStr, OpNode, _> {
1096 let mayLoad = 1 in {
1097 def rmb : AVX512BI<opc, MRMSrcMem,
1098 (outs _.KRC:$dst), (ins _.RC:$src1, _.ScalarMemOp:$src2),
1099 !strconcat(OpcodeStr, "\t{${src2}", _.BroadcastStr, ", $src1, $dst",
1100 "|$dst, $src1, ${src2}", _.BroadcastStr, "}"),
1101 [(set _.KRC:$dst, (OpNode (_.VT _.RC:$src1),
1102 (X86VBroadcast (_.ScalarLdFrag addr:$src2))))],
1103 IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_B;
1104 def rmbk : AVX512BI<opc, MRMSrcMem,
1105 (outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1,
1106 _.ScalarMemOp:$src2),
1107 !strconcat(OpcodeStr,
1108 "\t{${src2}", _.BroadcastStr, ", $src1, $dst {${mask}}|",
1109 "$dst {${mask}}, $src1, ${src2}", _.BroadcastStr, "}"),
1110 [(set _.KRC:$dst, (and _.KRCWM:$mask,
1111 (OpNode (_.VT _.RC:$src1),
1113 (_.ScalarLdFrag addr:$src2)))))],
1114 IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_K, EVEX_B;
1118 multiclass avx512_icmp_packed_vl<bits<8> opc, string OpcodeStr, SDNode OpNode,
1119 AVX512VLVectorVTInfo VTInfo, Predicate prd> {
1120 let Predicates = [prd] in
1121 defm Z : avx512_icmp_packed<opc, OpcodeStr, OpNode, VTInfo.info512>,
1124 let Predicates = [prd, HasVLX] in {
1125 defm Z256 : avx512_icmp_packed<opc, OpcodeStr, OpNode, VTInfo.info256>,
1127 defm Z128 : avx512_icmp_packed<opc, OpcodeStr, OpNode, VTInfo.info128>,
1132 multiclass avx512_icmp_packed_rmb_vl<bits<8> opc, string OpcodeStr,
1133 SDNode OpNode, AVX512VLVectorVTInfo VTInfo,
1135 let Predicates = [prd] in
1136 defm Z : avx512_icmp_packed_rmb<opc, OpcodeStr, OpNode, VTInfo.info512>,
1139 let Predicates = [prd, HasVLX] in {
1140 defm Z256 : avx512_icmp_packed_rmb<opc, OpcodeStr, OpNode, VTInfo.info256>,
1142 defm Z128 : avx512_icmp_packed_rmb<opc, OpcodeStr, OpNode, VTInfo.info128>,
1147 defm VPCMPEQB : avx512_icmp_packed_vl<0x74, "vpcmpeqb", X86pcmpeqm,
1148 avx512vl_i8_info, HasBWI>,
1151 defm VPCMPEQW : avx512_icmp_packed_vl<0x75, "vpcmpeqw", X86pcmpeqm,
1152 avx512vl_i16_info, HasBWI>,
1153 EVEX_CD8<16, CD8VF>;
1155 defm VPCMPEQD : avx512_icmp_packed_rmb_vl<0x76, "vpcmpeqd", X86pcmpeqm,
1156 avx512vl_i32_info, HasAVX512>,
1157 EVEX_CD8<32, CD8VF>;
1159 defm VPCMPEQQ : avx512_icmp_packed_rmb_vl<0x29, "vpcmpeqq", X86pcmpeqm,
1160 avx512vl_i64_info, HasAVX512>,
1161 T8PD, VEX_W, EVEX_CD8<64, CD8VF>;
1163 defm VPCMPGTB : avx512_icmp_packed_vl<0x64, "vpcmpgtb", X86pcmpgtm,
1164 avx512vl_i8_info, HasBWI>,
1167 defm VPCMPGTW : avx512_icmp_packed_vl<0x65, "vpcmpgtw", X86pcmpgtm,
1168 avx512vl_i16_info, HasBWI>,
1169 EVEX_CD8<16, CD8VF>;
1171 defm VPCMPGTD : avx512_icmp_packed_rmb_vl<0x66, "vpcmpgtd", X86pcmpgtm,
1172 avx512vl_i32_info, HasAVX512>,
1173 EVEX_CD8<32, CD8VF>;
1175 defm VPCMPGTQ : avx512_icmp_packed_rmb_vl<0x37, "vpcmpgtq", X86pcmpgtm,
1176 avx512vl_i64_info, HasAVX512>,
1177 T8PD, VEX_W, EVEX_CD8<64, CD8VF>;
1179 def : Pat<(v8i1 (X86pcmpgtm (v8i32 VR256X:$src1), (v8i32 VR256X:$src2))),
1180 (COPY_TO_REGCLASS (VPCMPGTDZrr
1181 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)),
1182 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm))), VK8)>;
1184 def : Pat<(v8i1 (X86pcmpeqm (v8i32 VR256X:$src1), (v8i32 VR256X:$src2))),
1185 (COPY_TO_REGCLASS (VPCMPEQDZrr
1186 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)),
1187 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm))), VK8)>;
1189 multiclass avx512_icmp_cc<bits<8> opc, string Suffix, SDNode OpNode,
1190 X86VectorVTInfo _> {
1191 def rri : AVX512AIi8<opc, MRMSrcReg,
1192 (outs _.KRC:$dst), (ins _.RC:$src1, _.RC:$src2, AVXCC:$cc),
1193 !strconcat("vpcmp${cc}", Suffix,
1194 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1195 [(set _.KRC:$dst, (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2),
1197 IIC_SSE_ALU_F32P_RR>, EVEX_4V;
1199 def rmi : AVX512AIi8<opc, MRMSrcMem,
1200 (outs _.KRC:$dst), (ins _.RC:$src1, _.MemOp:$src2, AVXCC:$cc),
1201 !strconcat("vpcmp${cc}", Suffix,
1202 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1203 [(set _.KRC:$dst, (OpNode (_.VT _.RC:$src1),
1204 (_.VT (bitconvert (_.LdFrag addr:$src2))),
1206 IIC_SSE_ALU_F32P_RM>, EVEX_4V;
1207 def rrik : AVX512AIi8<opc, MRMSrcReg,
1208 (outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1, _.RC:$src2,
1210 !strconcat("vpcmp${cc}", Suffix,
1211 "\t{$src2, $src1, $dst {${mask}}|",
1212 "$dst {${mask}}, $src1, $src2}"),
1213 [(set _.KRC:$dst, (and _.KRCWM:$mask,
1214 (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2),
1216 IIC_SSE_ALU_F32P_RR>, EVEX_4V, EVEX_K;
1218 def rmik : AVX512AIi8<opc, MRMSrcMem,
1219 (outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1, _.MemOp:$src2,
1221 !strconcat("vpcmp${cc}", Suffix,
1222 "\t{$src2, $src1, $dst {${mask}}|",
1223 "$dst {${mask}}, $src1, $src2}"),
1224 [(set _.KRC:$dst, (and _.KRCWM:$mask,
1225 (OpNode (_.VT _.RC:$src1),
1226 (_.VT (bitconvert (_.LdFrag addr:$src2))),
1228 IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_K;
1230 // Accept explicit immediate argument form instead of comparison code.
1231 let isAsmParserOnly = 1, hasSideEffects = 0 in {
1232 def rri_alt : AVX512AIi8<opc, MRMSrcReg,
1233 (outs _.KRC:$dst), (ins _.RC:$src1, _.RC:$src2, i8imm:$cc),
1234 !strconcat("vpcmp", Suffix, "\t{$cc, $src2, $src1, $dst|",
1235 "$dst, $src1, $src2, $cc}"),
1236 [], IIC_SSE_ALU_F32P_RR>, EVEX_4V;
1237 def rmi_alt : AVX512AIi8<opc, MRMSrcMem,
1238 (outs _.KRC:$dst), (ins _.RC:$src1, _.MemOp:$src2, i8imm:$cc),
1239 !strconcat("vpcmp", Suffix, "\t{$cc, $src2, $src1, $dst|",
1240 "$dst, $src1, $src2, $cc}"),
1241 [], IIC_SSE_ALU_F32P_RM>, EVEX_4V;
1242 def rrik_alt : AVX512AIi8<opc, MRMSrcReg,
1243 (outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1, _.RC:$src2,
1245 !strconcat("vpcmp", Suffix,
1246 "\t{$cc, $src2, $src1, $dst {${mask}}|",
1247 "$dst {${mask}}, $src1, $src2, $cc}"),
1248 [], IIC_SSE_ALU_F32P_RR>, EVEX_4V, EVEX_K;
1249 def rmik_alt : AVX512AIi8<opc, MRMSrcMem,
1250 (outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1, _.MemOp:$src2,
1252 !strconcat("vpcmp", Suffix,
1253 "\t{$cc, $src2, $src1, $dst {${mask}}|",
1254 "$dst {${mask}}, $src1, $src2, $cc}"),
1255 [], IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_K;
1259 multiclass avx512_icmp_cc_rmb<bits<8> opc, string Suffix, SDNode OpNode,
1260 X86VectorVTInfo _> :
1261 avx512_icmp_cc<opc, Suffix, OpNode, _> {
1262 let mayLoad = 1 in {
1263 def rmib : AVX512AIi8<opc, MRMSrcMem,
1264 (outs _.KRC:$dst), (ins _.RC:$src1, _.ScalarMemOp:$src2,
1266 !strconcat("vpcmp${cc}", Suffix,
1267 "\t{${src2}", _.BroadcastStr, ", $src1, $dst|",
1268 "$dst, $src1, ${src2}", _.BroadcastStr, "}"),
1269 [(set _.KRC:$dst, (OpNode (_.VT _.RC:$src1),
1270 (X86VBroadcast (_.ScalarLdFrag addr:$src2)),
1272 IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_B;
1273 def rmibk : AVX512AIi8<opc, MRMSrcMem,
1274 (outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1,
1275 _.ScalarMemOp:$src2, AVXCC:$cc),
1276 !strconcat("vpcmp${cc}", Suffix,
1277 "\t{${src2}", _.BroadcastStr, ", $src1, $dst {${mask}}|",
1278 "$dst {${mask}}, $src1, ${src2}", _.BroadcastStr, "}"),
1279 [(set _.KRC:$dst, (and _.KRCWM:$mask,
1280 (OpNode (_.VT _.RC:$src1),
1281 (X86VBroadcast (_.ScalarLdFrag addr:$src2)),
1283 IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_K, EVEX_B;
1286 // Accept explicit immediate argument form instead of comparison code.
1287 let isAsmParserOnly = 1, hasSideEffects = 0 in {
1288 def rmib_alt : AVX512AIi8<opc, MRMSrcMem,
1289 (outs _.KRC:$dst), (ins _.RC:$src1, _.ScalarMemOp:$src2,
1291 !strconcat("vpcmp", Suffix,
1292 "\t{$cc, ${src2}", _.BroadcastStr, ", $src1, $dst|",
1293 "$dst, $src1, ${src2}", _.BroadcastStr, ", $cc}"),
1294 [], IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_B;
1295 def rmibk_alt : AVX512AIi8<opc, MRMSrcMem,
1296 (outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1,
1297 _.ScalarMemOp:$src2, i8imm:$cc),
1298 !strconcat("vpcmp", Suffix,
1299 "\t{$cc, ${src2}", _.BroadcastStr, ", $src1, $dst {${mask}}|",
1300 "$dst {${mask}}, $src1, ${src2}", _.BroadcastStr, ", $cc}"),
1301 [], IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_K, EVEX_B;
1305 multiclass avx512_icmp_cc_vl<bits<8> opc, string Suffix, SDNode OpNode,
1306 AVX512VLVectorVTInfo VTInfo, Predicate prd> {
1307 let Predicates = [prd] in
1308 defm Z : avx512_icmp_cc<opc, Suffix, OpNode, VTInfo.info512>, EVEX_V512;
1310 let Predicates = [prd, HasVLX] in {
1311 defm Z256 : avx512_icmp_cc<opc, Suffix, OpNode, VTInfo.info256>, EVEX_V256;
1312 defm Z128 : avx512_icmp_cc<opc, Suffix, OpNode, VTInfo.info128>, EVEX_V128;
1316 multiclass avx512_icmp_cc_rmb_vl<bits<8> opc, string Suffix, SDNode OpNode,
1317 AVX512VLVectorVTInfo VTInfo, Predicate prd> {
1318 let Predicates = [prd] in
1319 defm Z : avx512_icmp_cc_rmb<opc, Suffix, OpNode, VTInfo.info512>,
1322 let Predicates = [prd, HasVLX] in {
1323 defm Z256 : avx512_icmp_cc_rmb<opc, Suffix, OpNode, VTInfo.info256>,
1325 defm Z128 : avx512_icmp_cc_rmb<opc, Suffix, OpNode, VTInfo.info128>,
1330 defm VPCMPB : avx512_icmp_cc_vl<0x3F, "b", X86cmpm, avx512vl_i8_info,
1331 HasBWI>, EVEX_CD8<8, CD8VF>;
1332 defm VPCMPUB : avx512_icmp_cc_vl<0x3E, "ub", X86cmpmu, avx512vl_i8_info,
1333 HasBWI>, EVEX_CD8<8, CD8VF>;
1335 defm VPCMPW : avx512_icmp_cc_vl<0x3F, "w", X86cmpm, avx512vl_i16_info,
1336 HasBWI>, VEX_W, EVEX_CD8<16, CD8VF>;
1337 defm VPCMPUW : avx512_icmp_cc_vl<0x3E, "uw", X86cmpmu, avx512vl_i16_info,
1338 HasBWI>, VEX_W, EVEX_CD8<16, CD8VF>;
1340 defm VPCMPD : avx512_icmp_cc_rmb_vl<0x1F, "d", X86cmpm, avx512vl_i32_info,
1341 HasAVX512>, EVEX_CD8<32, CD8VF>;
1342 defm VPCMPUD : avx512_icmp_cc_rmb_vl<0x1E, "ud", X86cmpmu, avx512vl_i32_info,
1343 HasAVX512>, EVEX_CD8<32, CD8VF>;
1345 defm VPCMPQ : avx512_icmp_cc_rmb_vl<0x1F, "q", X86cmpm, avx512vl_i64_info,
1346 HasAVX512>, VEX_W, EVEX_CD8<64, CD8VF>;
1347 defm VPCMPUQ : avx512_icmp_cc_rmb_vl<0x1E, "uq", X86cmpmu, avx512vl_i64_info,
1348 HasAVX512>, VEX_W, EVEX_CD8<64, CD8VF>;
1350 // avx512_cmp_packed - compare packed instructions
1351 multiclass avx512_cmp_packed<RegisterClass KRC, RegisterClass RC,
1352 X86MemOperand x86memop, ValueType vt,
1353 string suffix, Domain d> {
1354 def rri : AVX512PIi8<0xC2, MRMSrcReg,
1355 (outs KRC:$dst), (ins RC:$src1, RC:$src2, AVXCC:$cc),
1356 !strconcat("vcmp${cc}", suffix,
1357 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1358 [(set KRC:$dst, (X86cmpm (vt RC:$src1), (vt RC:$src2), imm:$cc))], d>;
1359 def rrib: AVX512PIi8<0xC2, MRMSrcReg,
1360 (outs KRC:$dst), (ins RC:$src1, RC:$src2, AVXCC:$cc),
1361 !strconcat("vcmp${cc}", suffix,
1362 " \t{{sae}, $src2, $src1, $dst|$dst, $src1, $src2, {sae}}"),
1364 def rmi : AVX512PIi8<0xC2, MRMSrcMem,
1365 (outs KRC:$dst), (ins RC:$src1, x86memop:$src2, AVXCC:$cc),
1366 !strconcat("vcmp${cc}", suffix,
1367 " \t{$src2, $src1, $dst|$dst, $src1, $src2, $cc}"),
1369 (X86cmpm (vt RC:$src1), (memop addr:$src2), imm:$cc))], d>;
1371 // Accept explicit immediate argument form instead of comparison code.
1372 let isAsmParserOnly = 1, hasSideEffects = 0 in {
1373 def rri_alt : AVX512PIi8<0xC2, MRMSrcReg,
1374 (outs KRC:$dst), (ins RC:$src1, RC:$src2, i8imm:$cc),
1375 !strconcat("vcmp", suffix,
1376 " \t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}"), [], d>;
1377 def rmi_alt : AVX512PIi8<0xC2, MRMSrcMem,
1378 (outs KRC:$dst), (ins RC:$src1, x86memop:$src2, i8imm:$cc),
1379 !strconcat("vcmp", suffix,
1380 " \t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}"), [], d>;
1384 defm VCMPPSZ : avx512_cmp_packed<VK16, VR512, f512mem, v16f32,
1385 "ps", SSEPackedSingle>, PS, EVEX_4V, EVEX_V512,
1386 EVEX_CD8<32, CD8VF>;
1387 defm VCMPPDZ : avx512_cmp_packed<VK8, VR512, f512mem, v8f64,
1388 "pd", SSEPackedDouble>, PD, EVEX_4V, VEX_W, EVEX_V512,
1389 EVEX_CD8<64, CD8VF>;
1391 def : Pat<(v8i1 (X86cmpm (v8f32 VR256X:$src1), (v8f32 VR256X:$src2), imm:$cc)),
1392 (COPY_TO_REGCLASS (VCMPPSZrri
1393 (v16f32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)),
1394 (v16f32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)),
1396 def : Pat<(v8i1 (X86cmpm (v8i32 VR256X:$src1), (v8i32 VR256X:$src2), imm:$cc)),
1397 (COPY_TO_REGCLASS (VPCMPDZrri
1398 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)),
1399 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)),
1401 def : Pat<(v8i1 (X86cmpmu (v8i32 VR256X:$src1), (v8i32 VR256X:$src2), imm:$cc)),
1402 (COPY_TO_REGCLASS (VPCMPUDZrri
1403 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)),
1404 (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)),
1407 def : Pat<(i16 (int_x86_avx512_mask_cmp_ps_512 (v16f32 VR512:$src1),
1408 (v16f32 VR512:$src2), imm:$cc, (i16 -1),
1410 (COPY_TO_REGCLASS (VCMPPSZrrib VR512:$src1, VR512:$src2,
1411 (I8Imm imm:$cc)), GR16)>;
1413 def : Pat<(i8 (int_x86_avx512_mask_cmp_pd_512 (v8f64 VR512:$src1),
1414 (v8f64 VR512:$src2), imm:$cc, (i8 -1),
1416 (COPY_TO_REGCLASS (VCMPPDZrrib VR512:$src1, VR512:$src2,
1417 (I8Imm imm:$cc)), GR8)>;
1419 def : Pat<(i16 (int_x86_avx512_mask_cmp_ps_512 (v16f32 VR512:$src1),
1420 (v16f32 VR512:$src2), imm:$cc, (i16 -1),
1422 (COPY_TO_REGCLASS (VCMPPSZrri VR512:$src1, VR512:$src2,
1423 (I8Imm imm:$cc)), GR16)>;
1425 def : Pat<(i8 (int_x86_avx512_mask_cmp_pd_512 (v8f64 VR512:$src1),
1426 (v8f64 VR512:$src2), imm:$cc, (i8 -1),
1428 (COPY_TO_REGCLASS (VCMPPDZrri VR512:$src1, VR512:$src2,
1429 (I8Imm imm:$cc)), GR8)>;
1431 // Mask register copy, including
1432 // - copy between mask registers
1433 // - load/store mask registers
1434 // - copy from GPR to mask register and vice versa
1436 multiclass avx512_mask_mov<bits<8> opc_kk, bits<8> opc_km, bits<8> opc_mk,
1437 string OpcodeStr, RegisterClass KRC,
1438 ValueType vvt, ValueType ivt, X86MemOperand x86memop> {
1439 let hasSideEffects = 0 in {
1440 def kk : I<opc_kk, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src),
1441 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"), []>;
1443 def km : I<opc_km, MRMSrcMem, (outs KRC:$dst), (ins x86memop:$src),
1444 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
1445 [(set KRC:$dst, (vvt (bitconvert (ivt (load addr:$src)))))]>;
1447 def mk : I<opc_mk, MRMDestMem, (outs), (ins x86memop:$dst, KRC:$src),
1448 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"), []>;
1452 multiclass avx512_mask_mov_gpr<bits<8> opc_kr, bits<8> opc_rk,
1454 RegisterClass KRC, RegisterClass GRC> {
1455 let hasSideEffects = 0 in {
1456 def kr : I<opc_kr, MRMSrcReg, (outs KRC:$dst), (ins GRC:$src),
1457 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"), []>;
1458 def rk : I<opc_rk, MRMSrcReg, (outs GRC:$dst), (ins KRC:$src),
1459 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"), []>;
1463 let Predicates = [HasDQI] in
1464 defm KMOVB : avx512_mask_mov<0x90, 0x90, 0x91, "kmovb", VK8, v8i1, i8,
1466 avx512_mask_mov_gpr<0x92, 0x93, "kmovb", VK8, GR32>,
1469 let Predicates = [HasAVX512] in
1470 defm KMOVW : avx512_mask_mov<0x90, 0x90, 0x91, "kmovw", VK16, v16i1, i16,
1472 avx512_mask_mov_gpr<0x92, 0x93, "kmovw", VK16, GR32>,
1475 let Predicates = [HasBWI] in {
1476 defm KMOVD : avx512_mask_mov<0x90, 0x90, 0x91, "kmovd", VK32, v32i1, i32,
1477 i32mem>, VEX, PD, VEX_W;
1478 defm KMOVD : avx512_mask_mov_gpr<0x92, 0x93, "kmovd", VK32, GR32>,
1482 let Predicates = [HasBWI] in {
1483 defm KMOVQ : avx512_mask_mov<0x90, 0x90, 0x91, "kmovq", VK64, v64i1, i64,
1484 i64mem>, VEX, PS, VEX_W;
1485 defm KMOVQ : avx512_mask_mov_gpr<0x92, 0x93, "kmovq", VK64, GR64>,
1489 // GR from/to mask register
1490 let Predicates = [HasDQI] in {
1491 def : Pat<(v8i1 (bitconvert (i8 GR8:$src))),
1492 (KMOVBkr (SUBREG_TO_REG (i32 0), GR8:$src, sub_8bit))>;
1493 def : Pat<(i8 (bitconvert (v8i1 VK8:$src))),
1494 (EXTRACT_SUBREG (KMOVBrk VK8:$src), sub_8bit)>;
1496 let Predicates = [HasAVX512] in {
1497 def : Pat<(v16i1 (bitconvert (i16 GR16:$src))),
1498 (KMOVWkr (SUBREG_TO_REG (i32 0), GR16:$src, sub_16bit))>;
1499 def : Pat<(i16 (bitconvert (v16i1 VK16:$src))),
1500 (EXTRACT_SUBREG (KMOVWrk VK16:$src), sub_16bit)>;
1502 let Predicates = [HasBWI] in {
1503 def : Pat<(v32i1 (bitconvert (i32 GR32:$src))), (KMOVDkr GR32:$src)>;
1504 def : Pat<(i32 (bitconvert (v32i1 VK32:$src))), (KMOVDrk VK32:$src)>;
1506 let Predicates = [HasBWI] in {
1507 def : Pat<(v64i1 (bitconvert (i64 GR64:$src))), (KMOVQkr GR64:$src)>;
1508 def : Pat<(i64 (bitconvert (v64i1 VK64:$src))), (KMOVQrk VK64:$src)>;
1512 let Predicates = [HasDQI] in {
1513 def : Pat<(store (i8 (bitconvert (v8i1 VK8:$src))), addr:$dst),
1514 (KMOVBmk addr:$dst, VK8:$src)>;
1516 let Predicates = [HasAVX512] in {
1517 def : Pat<(store (i16 (bitconvert (v16i1 VK16:$src))), addr:$dst),
1518 (KMOVWmk addr:$dst, VK16:$src)>;
1519 def : Pat<(store (i8 (bitconvert (v8i1 VK8:$src))), addr:$dst),
1520 (KMOVWmk addr:$dst, (COPY_TO_REGCLASS VK8:$src, VK16))>;
1521 def : Pat<(i1 (load addr:$src)),
1522 (COPY_TO_REGCLASS (KMOVWkm addr:$src), VK1)>;
1523 def : Pat<(v8i1 (bitconvert (i8 (load addr:$src)))),
1524 (COPY_TO_REGCLASS (KMOVWkm addr:$src), VK8)>;
1526 let Predicates = [HasBWI] in {
1527 def : Pat<(store (i32 (bitconvert (v32i1 VK32:$src))), addr:$dst),
1528 (KMOVDmk addr:$dst, VK32:$src)>;
1530 let Predicates = [HasBWI] in {
1531 def : Pat<(store (i64 (bitconvert (v64i1 VK64:$src))), addr:$dst),
1532 (KMOVQmk addr:$dst, VK64:$src)>;
1535 let Predicates = [HasAVX512] in {
1536 def : Pat<(i1 (trunc (i64 GR64:$src))),
1537 (COPY_TO_REGCLASS (KMOVWkr (AND32ri (EXTRACT_SUBREG $src, sub_32bit),
1540 def : Pat<(i1 (trunc (i32 GR32:$src))),
1541 (COPY_TO_REGCLASS (KMOVWkr (AND32ri $src, (i32 1))), VK1)>;
1543 def : Pat<(i1 (trunc (i8 GR8:$src))),
1545 (KMOVWkr (AND32ri (SUBREG_TO_REG (i32 0), GR8:$src, sub_8bit), (i32 1))),
1547 def : Pat<(i1 (trunc (i16 GR16:$src))),
1549 (KMOVWkr (AND32ri (SUBREG_TO_REG (i32 0), $src, sub_16bit), (i32 1))),
1552 def : Pat<(i32 (zext VK1:$src)),
1553 (AND32ri (KMOVWrk (COPY_TO_REGCLASS VK1:$src, VK16)), (i32 1))>;
1554 def : Pat<(i8 (zext VK1:$src)),
1557 (COPY_TO_REGCLASS VK1:$src, VK16)), (i32 1)), sub_8bit)>;
1558 def : Pat<(i64 (zext VK1:$src)),
1559 (AND64ri8 (SUBREG_TO_REG (i64 0),
1560 (KMOVWrk (COPY_TO_REGCLASS VK1:$src, VK16)), sub_32bit), (i64 1))>;
1561 def : Pat<(i16 (zext VK1:$src)),
1563 (AND32ri (KMOVWrk (COPY_TO_REGCLASS VK1:$src, VK16)), (i32 1)),
1565 def : Pat<(v16i1 (scalar_to_vector VK1:$src)),
1566 (COPY_TO_REGCLASS VK1:$src, VK16)>;
1567 def : Pat<(v8i1 (scalar_to_vector VK1:$src)),
1568 (COPY_TO_REGCLASS VK1:$src, VK8)>;
1570 let Predicates = [HasBWI] in {
1571 def : Pat<(v32i1 (scalar_to_vector VK1:$src)),
1572 (COPY_TO_REGCLASS VK1:$src, VK32)>;
1573 def : Pat<(v64i1 (scalar_to_vector VK1:$src)),
1574 (COPY_TO_REGCLASS VK1:$src, VK64)>;
1578 // With AVX-512 only, 8-bit mask is promoted to 16-bit mask.
1579 let Predicates = [HasAVX512] in {
1580 // GR from/to 8-bit mask without native support
1581 def : Pat<(v8i1 (bitconvert (i8 GR8:$src))),
1583 (KMOVWkr (SUBREG_TO_REG (i32 0), GR8:$src, sub_8bit)),
1585 def : Pat<(i8 (bitconvert (v8i1 VK8:$src))),
1587 (KMOVWrk (COPY_TO_REGCLASS VK8:$src, VK16)),
1590 def : Pat<(i1 (X86Vextract VK16:$src, (iPTR 0))),
1591 (COPY_TO_REGCLASS VK16:$src, VK1)>;
1592 def : Pat<(i1 (X86Vextract VK8:$src, (iPTR 0))),
1593 (COPY_TO_REGCLASS VK8:$src, VK1)>;
1595 let Predicates = [HasBWI] in {
1596 def : Pat<(i1 (X86Vextract VK32:$src, (iPTR 0))),
1597 (COPY_TO_REGCLASS VK32:$src, VK1)>;
1598 def : Pat<(i1 (X86Vextract VK64:$src, (iPTR 0))),
1599 (COPY_TO_REGCLASS VK64:$src, VK1)>;
1602 // Mask unary operation
1604 multiclass avx512_mask_unop<bits<8> opc, string OpcodeStr,
1605 RegisterClass KRC, SDPatternOperator OpNode,
1607 let Predicates = [prd] in
1608 def rr : I<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src),
1609 !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
1610 [(set KRC:$dst, (OpNode KRC:$src))]>;
1613 multiclass avx512_mask_unop_all<bits<8> opc, string OpcodeStr,
1614 SDPatternOperator OpNode> {
1615 defm B : avx512_mask_unop<opc, !strconcat(OpcodeStr, "b"), VK8, OpNode,
1617 defm W : avx512_mask_unop<opc, !strconcat(OpcodeStr, "w"), VK16, OpNode,
1618 HasAVX512>, VEX, PS;
1619 defm D : avx512_mask_unop<opc, !strconcat(OpcodeStr, "d"), VK32, OpNode,
1620 HasBWI>, VEX, PD, VEX_W;
1621 defm Q : avx512_mask_unop<opc, !strconcat(OpcodeStr, "q"), VK64, OpNode,
1622 HasBWI>, VEX, PS, VEX_W;
1625 defm KNOT : avx512_mask_unop_all<0x44, "knot", not>;
1627 multiclass avx512_mask_unop_int<string IntName, string InstName> {
1628 let Predicates = [HasAVX512] in
1629 def : Pat<(!cast<Intrinsic>("int_x86_avx512_"##IntName##"_w")
1631 (COPY_TO_REGCLASS (!cast<Instruction>(InstName##"Wrr")
1632 (v16i1 (COPY_TO_REGCLASS GR16:$src, VK16))), GR16)>;
1634 defm : avx512_mask_unop_int<"knot", "KNOT">;
1636 let Predicates = [HasDQI] in
1637 def : Pat<(xor VK8:$src1, (v8i1 immAllOnesV)), (KNOTBrr VK8:$src1)>;
1638 let Predicates = [HasAVX512] in
1639 def : Pat<(xor VK16:$src1, (v16i1 immAllOnesV)), (KNOTWrr VK16:$src1)>;
1640 let Predicates = [HasBWI] in
1641 def : Pat<(xor VK32:$src1, (v32i1 immAllOnesV)), (KNOTDrr VK32:$src1)>;
1642 let Predicates = [HasBWI] in
1643 def : Pat<(xor VK64:$src1, (v64i1 immAllOnesV)), (KNOTQrr VK64:$src1)>;
1645 // KNL does not support KMOVB, 8-bit mask is promoted to 16-bit
1646 let Predicates = [HasAVX512] in {
1647 def : Pat<(xor VK8:$src1, (v8i1 immAllOnesV)),
1648 (COPY_TO_REGCLASS (KNOTWrr (COPY_TO_REGCLASS VK8:$src1, VK16)), VK8)>;
1650 def : Pat<(not VK8:$src),
1652 (KNOTWrr (COPY_TO_REGCLASS VK8:$src, VK16)), VK8)>;
1655 // Mask binary operation
1656 // - KAND, KANDN, KOR, KXNOR, KXOR
1657 multiclass avx512_mask_binop<bits<8> opc, string OpcodeStr,
1658 RegisterClass KRC, SDPatternOperator OpNode,
1660 let Predicates = [prd] in
1661 def rr : I<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src1, KRC:$src2),
1662 !strconcat(OpcodeStr,
1663 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1664 [(set KRC:$dst, (OpNode KRC:$src1, KRC:$src2))]>;
1667 multiclass avx512_mask_binop_all<bits<8> opc, string OpcodeStr,
1668 SDPatternOperator OpNode> {
1669 defm B : avx512_mask_binop<opc, !strconcat(OpcodeStr, "b"), VK8, OpNode,
1670 HasDQI>, VEX_4V, VEX_L, PD;
1671 defm W : avx512_mask_binop<opc, !strconcat(OpcodeStr, "w"), VK16, OpNode,
1672 HasAVX512>, VEX_4V, VEX_L, PS;
1673 defm D : avx512_mask_binop<opc, !strconcat(OpcodeStr, "d"), VK32, OpNode,
1674 HasBWI>, VEX_4V, VEX_L, VEX_W, PD;
1675 defm Q : avx512_mask_binop<opc, !strconcat(OpcodeStr, "q"), VK64, OpNode,
1676 HasBWI>, VEX_4V, VEX_L, VEX_W, PS;
1679 def andn : PatFrag<(ops node:$i0, node:$i1), (and (not node:$i0), node:$i1)>;
1680 def xnor : PatFrag<(ops node:$i0, node:$i1), (not (xor node:$i0, node:$i1))>;
1682 let isCommutable = 1 in {
1683 defm KAND : avx512_mask_binop_all<0x41, "kand", and>;
1684 defm KOR : avx512_mask_binop_all<0x45, "kor", or>;
1685 defm KXNOR : avx512_mask_binop_all<0x46, "kxnor", xnor>;
1686 defm KXOR : avx512_mask_binop_all<0x47, "kxor", xor>;
1688 let isCommutable = 0 in
1689 defm KANDN : avx512_mask_binop_all<0x42, "kandn", andn>;
1691 def : Pat<(xor VK1:$src1, VK1:$src2),
1692 (COPY_TO_REGCLASS (KXORWrr (COPY_TO_REGCLASS VK1:$src1, VK16),
1693 (COPY_TO_REGCLASS VK1:$src2, VK16)), VK1)>;
1695 def : Pat<(or VK1:$src1, VK1:$src2),
1696 (COPY_TO_REGCLASS (KORWrr (COPY_TO_REGCLASS VK1:$src1, VK16),
1697 (COPY_TO_REGCLASS VK1:$src2, VK16)), VK1)>;
1699 def : Pat<(and VK1:$src1, VK1:$src2),
1700 (COPY_TO_REGCLASS (KANDWrr (COPY_TO_REGCLASS VK1:$src1, VK16),
1701 (COPY_TO_REGCLASS VK1:$src2, VK16)), VK1)>;
1703 multiclass avx512_mask_binop_int<string IntName, string InstName> {
1704 let Predicates = [HasAVX512] in
1705 def : Pat<(!cast<Intrinsic>("int_x86_avx512_"##IntName##"_w")
1706 (i16 GR16:$src1), (i16 GR16:$src2)),
1707 (COPY_TO_REGCLASS (!cast<Instruction>(InstName##"Wrr")
1708 (v16i1 (COPY_TO_REGCLASS GR16:$src1, VK16)),
1709 (v16i1 (COPY_TO_REGCLASS GR16:$src2, VK16))), GR16)>;
1712 defm : avx512_mask_binop_int<"kand", "KAND">;
1713 defm : avx512_mask_binop_int<"kandn", "KANDN">;
1714 defm : avx512_mask_binop_int<"kor", "KOR">;
1715 defm : avx512_mask_binop_int<"kxnor", "KXNOR">;
1716 defm : avx512_mask_binop_int<"kxor", "KXOR">;
1718 // With AVX-512, 8-bit mask is promoted to 16-bit mask.
1719 multiclass avx512_binop_pat<SDPatternOperator OpNode, Instruction Inst> {
1720 let Predicates = [HasAVX512] in
1721 def : Pat<(OpNode VK8:$src1, VK8:$src2),
1723 (Inst (COPY_TO_REGCLASS VK8:$src1, VK16),
1724 (COPY_TO_REGCLASS VK8:$src2, VK16)), VK8)>;
1727 defm : avx512_binop_pat<and, KANDWrr>;
1728 defm : avx512_binop_pat<andn, KANDNWrr>;
1729 defm : avx512_binop_pat<or, KORWrr>;
1730 defm : avx512_binop_pat<xnor, KXNORWrr>;
1731 defm : avx512_binop_pat<xor, KXORWrr>;
1734 multiclass avx512_mask_unpck<bits<8> opc, string OpcodeStr,
1735 RegisterClass KRC> {
1736 let Predicates = [HasAVX512] in
1737 def rr : I<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src1, KRC:$src2),
1738 !strconcat(OpcodeStr,
1739 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1742 multiclass avx512_mask_unpck_bw<bits<8> opc, string OpcodeStr> {
1743 defm BW : avx512_mask_unpck<opc, !strconcat(OpcodeStr, "bw"), VK16>,
1747 defm KUNPCK : avx512_mask_unpck_bw<0x4b, "kunpck">;
1748 def : Pat<(v16i1 (concat_vectors (v8i1 VK8:$src1), (v8i1 VK8:$src2))),
1749 (KUNPCKBWrr (COPY_TO_REGCLASS VK8:$src2, VK16),
1750 (COPY_TO_REGCLASS VK8:$src1, VK16))>;
1753 multiclass avx512_mask_unpck_int<string IntName, string InstName> {
1754 let Predicates = [HasAVX512] in
1755 def : Pat<(!cast<Intrinsic>("int_x86_avx512_"##IntName##"_bw")
1756 (i16 GR16:$src1), (i16 GR16:$src2)),
1757 (COPY_TO_REGCLASS (!cast<Instruction>(InstName##"BWrr")
1758 (v16i1 (COPY_TO_REGCLASS GR16:$src1, VK16)),
1759 (v16i1 (COPY_TO_REGCLASS GR16:$src2, VK16))), GR16)>;
1761 defm : avx512_mask_unpck_int<"kunpck", "KUNPCK">;
1764 multiclass avx512_mask_testop<bits<8> opc, string OpcodeStr, RegisterClass KRC,
1766 let Predicates = [HasAVX512], Defs = [EFLAGS] in
1767 def rr : I<opc, MRMSrcReg, (outs), (ins KRC:$src1, KRC:$src2),
1768 !strconcat(OpcodeStr, " \t{$src2, $src1|$src1, $src2}"),
1769 [(set EFLAGS, (OpNode KRC:$src1, KRC:$src2))]>;
1772 multiclass avx512_mask_testop_w<bits<8> opc, string OpcodeStr, SDNode OpNode> {
1773 defm W : avx512_mask_testop<opc, !strconcat(OpcodeStr, "w"), VK16, OpNode>,
1777 defm KORTEST : avx512_mask_testop_w<0x98, "kortest", X86kortest>;
1779 def : Pat<(X86cmp VK1:$src1, (i1 0)),
1780 (KORTESTWrr (COPY_TO_REGCLASS VK1:$src1, VK16),
1781 (COPY_TO_REGCLASS VK1:$src1, VK16))>;
1784 multiclass avx512_mask_shiftop<bits<8> opc, string OpcodeStr, RegisterClass KRC,
1786 let Predicates = [HasAVX512] in
1787 def ri : Ii8<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src, i8imm:$imm),
1788 !strconcat(OpcodeStr,
1789 " \t{$imm, $src, $dst|$dst, $src, $imm}"),
1790 [(set KRC:$dst, (OpNode KRC:$src, (i8 imm:$imm)))]>;
1793 multiclass avx512_mask_shiftop_w<bits<8> opc1, bits<8> opc2, string OpcodeStr,
1795 defm W : avx512_mask_shiftop<opc1, !strconcat(OpcodeStr, "w"), VK16, OpNode>,
1799 defm KSHIFTL : avx512_mask_shiftop_w<0x32, 0x33, "kshiftl", X86vshli>;
1800 defm KSHIFTR : avx512_mask_shiftop_w<0x30, 0x31, "kshiftr", X86vsrli>;
1802 // Mask setting all 0s or 1s
1803 multiclass avx512_mask_setop<RegisterClass KRC, ValueType VT, PatFrag Val> {
1804 let Predicates = [HasAVX512] in
1805 let isReMaterializable = 1, isAsCheapAsAMove = 1, isPseudo = 1 in
1806 def #NAME# : I<0, Pseudo, (outs KRC:$dst), (ins), "",
1807 [(set KRC:$dst, (VT Val))]>;
1810 multiclass avx512_mask_setop_w<PatFrag Val> {
1811 defm B : avx512_mask_setop<VK8, v8i1, Val>;
1812 defm W : avx512_mask_setop<VK16, v16i1, Val>;
1815 defm KSET0 : avx512_mask_setop_w<immAllZerosV>;
1816 defm KSET1 : avx512_mask_setop_w<immAllOnesV>;
1818 // With AVX-512 only, 8-bit mask is promoted to 16-bit mask.
1819 let Predicates = [HasAVX512] in {
1820 def : Pat<(v8i1 immAllZerosV), (COPY_TO_REGCLASS (KSET0W), VK8)>;
1821 def : Pat<(v8i1 immAllOnesV), (COPY_TO_REGCLASS (KSET1W), VK8)>;
1822 def : Pat<(i1 0), (COPY_TO_REGCLASS (KSET0W), VK1)>;
1823 def : Pat<(i1 1), (COPY_TO_REGCLASS (KSET1W), VK1)>;
1824 def : Pat<(i1 -1), (COPY_TO_REGCLASS (KSET1W), VK1)>;
1826 def : Pat<(v8i1 (extract_subvector (v16i1 VK16:$src), (iPTR 0))),
1827 (v8i1 (COPY_TO_REGCLASS VK16:$src, VK8))>;
1829 def : Pat<(v16i1 (insert_subvector undef, (v8i1 VK8:$src), (iPTR 0))),
1830 (v16i1 (COPY_TO_REGCLASS VK8:$src, VK16))>;
1832 def : Pat<(v8i1 (extract_subvector (v16i1 VK16:$src), (iPTR 8))),
1833 (v8i1 (COPY_TO_REGCLASS (KSHIFTRWri VK16:$src, (i8 8)), VK8))>;
1835 let Predicates = [HasVLX] in {
1836 def : Pat<(v8i1 (insert_subvector undef, (v4i1 VK4:$src), (iPTR 0))),
1837 (v8i1 (COPY_TO_REGCLASS VK4:$src, VK8))>;
1838 def : Pat<(v8i1 (insert_subvector undef, (v2i1 VK2:$src), (iPTR 0))),
1839 (v8i1 (COPY_TO_REGCLASS VK2:$src, VK8))>;
1840 def : Pat<(v4i1 (extract_subvector (v8i1 VK8:$src), (iPTR 0))),
1841 (v4i1 (COPY_TO_REGCLASS VK8:$src, VK4))>;
1842 def : Pat<(v2i1 (extract_subvector (v8i1 VK8:$src), (iPTR 0))),
1843 (v2i1 (COPY_TO_REGCLASS VK8:$src, VK2))>;
1846 def : Pat<(v8i1 (X86vshli VK8:$src, (i8 imm:$imm))),
1847 (v8i1 (COPY_TO_REGCLASS (KSHIFTLWri (COPY_TO_REGCLASS VK8:$src, VK16), (I8Imm $imm)), VK8))>;
1849 def : Pat<(v8i1 (X86vsrli VK8:$src, (i8 imm:$imm))),
1850 (v8i1 (COPY_TO_REGCLASS (KSHIFTRWri (COPY_TO_REGCLASS VK8:$src, VK16), (I8Imm $imm)), VK8))>;
1851 //===----------------------------------------------------------------------===//
1852 // AVX-512 - Aligned and unaligned load and store
1855 multiclass avx512_load<bits<8> opc, string OpcodeStr, PatFrag ld_frag,
1856 RegisterClass KRC, RegisterClass RC,
1857 ValueType vt, ValueType zvt, X86MemOperand memop,
1858 Domain d, bit IsReMaterializable = 1> {
1859 let hasSideEffects = 0 in {
1860 def rr : AVX512PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
1861 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), [],
1863 def rrkz : AVX512PI<opc, MRMSrcReg, (outs RC:$dst), (ins KRC:$mask, RC:$src),
1864 !strconcat(OpcodeStr, "\t{$src, ${dst} {${mask}} {z}|",
1865 "${dst} {${mask}} {z}, $src}"), [], d>, EVEX, EVEX_KZ;
1867 let canFoldAsLoad = 1, isReMaterializable = IsReMaterializable,
1868 SchedRW = [WriteLoad] in
1869 def rm : AVX512PI<opc, MRMSrcMem, (outs RC:$dst), (ins memop:$src),
1870 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
1871 [(set RC:$dst, (vt (bitconvert (ld_frag addr:$src))))],
1874 let AddedComplexity = 20 in {
1875 let Constraints = "$src0 = $dst", hasSideEffects = 0 in {
1876 let hasSideEffects = 0 in
1877 def rrk : AVX512PI<opc, MRMSrcReg, (outs RC:$dst),
1878 (ins RC:$src0, KRC:$mask, RC:$src1),
1879 !strconcat(OpcodeStr, "\t{$src1, ${dst} {${mask}}|",
1880 "${dst} {${mask}}, $src1}"),
1881 [(set RC:$dst, (vt (vselect KRC:$mask,
1885 let mayLoad = 1, SchedRW = [WriteLoad] in
1886 def rmk : AVX512PI<opc, MRMSrcMem, (outs RC:$dst),
1887 (ins RC:$src0, KRC:$mask, memop:$src1),
1888 !strconcat(OpcodeStr, "\t{$src1, ${dst} {${mask}}|",
1889 "${dst} {${mask}}, $src1}"),
1892 (vt (bitconvert (ld_frag addr:$src1))),
1896 let mayLoad = 1, SchedRW = [WriteLoad] in
1897 def rmkz : AVX512PI<opc, MRMSrcMem, (outs RC:$dst),
1898 (ins KRC:$mask, memop:$src),
1899 !strconcat(OpcodeStr, "\t{$src, ${dst} {${mask}} {z}|",
1900 "${dst} {${mask}} {z}, $src}"),
1903 (vt (bitconvert (ld_frag addr:$src))),
1904 (vt (bitconvert (zvt immAllZerosV))))))],
1909 multiclass avx512_load_vl<bits<8> opc, string OpcodeStr, string ld_pat,
1910 string elty, string elsz, string vsz512,
1911 string vsz256, string vsz128, Domain d,
1912 Predicate prd, bit IsReMaterializable = 1> {
1913 let Predicates = [prd] in
1914 defm Z : avx512_load<opc, OpcodeStr,
1915 !cast<PatFrag>(ld_pat##"v"##vsz512##elty##elsz),
1916 !cast<RegisterClass>("VK"##vsz512##"WM"), VR512,
1917 !cast<ValueType>("v"##vsz512##elty##elsz), v16i32,
1918 !cast<X86MemOperand>(elty##"512mem"), d,
1919 IsReMaterializable>, EVEX_V512;
1921 let Predicates = [prd, HasVLX] in {
1922 defm Z256 : avx512_load<opc, OpcodeStr,
1923 !cast<PatFrag>(ld_pat##!if(!eq(elty,"f"),
1924 "v"##vsz256##elty##elsz, "v4i64")),
1925 !cast<RegisterClass>("VK"##vsz256##"WM"), VR256X,
1926 !cast<ValueType>("v"##vsz256##elty##elsz), v8i32,
1927 !cast<X86MemOperand>(elty##"256mem"), d,
1928 IsReMaterializable>, EVEX_V256;
1930 defm Z128 : avx512_load<opc, OpcodeStr,
1931 !cast<PatFrag>(ld_pat##!if(!eq(elty,"f"),
1932 "v"##vsz128##elty##elsz, "v2i64")),
1933 !cast<RegisterClass>("VK"##vsz128##"WM"), VR128X,
1934 !cast<ValueType>("v"##vsz128##elty##elsz), v4i32,
1935 !cast<X86MemOperand>(elty##"128mem"), d,
1936 IsReMaterializable>, EVEX_V128;
1941 multiclass avx512_store<bits<8> opc, string OpcodeStr, PatFrag st_frag,
1942 ValueType OpVT, RegisterClass KRC, RegisterClass RC,
1943 X86MemOperand memop, Domain d> {
1944 let isAsmParserOnly = 1, hasSideEffects = 0 in {
1945 def rr_alt : AVX512PI<opc, MRMDestReg, (outs RC:$dst), (ins RC:$src),
1946 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), [], d>,
1948 let Constraints = "$src1 = $dst" in
1949 def rrk_alt : AVX512PI<opc, MRMDestReg, (outs RC:$dst),
1950 (ins RC:$src1, KRC:$mask, RC:$src2),
1951 !strconcat(OpcodeStr,
1952 "\t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"), [], d>,
1954 def rrkz_alt : AVX512PI<opc, MRMDestReg, (outs RC:$dst),
1955 (ins KRC:$mask, RC:$src),
1956 !strconcat(OpcodeStr,
1957 "\t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
1958 [], d>, EVEX, EVEX_KZ;
1960 let mayStore = 1 in {
1961 def mr : AVX512PI<opc, MRMDestMem, (outs), (ins memop:$dst, RC:$src),
1962 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
1963 [(st_frag (OpVT RC:$src), addr:$dst)], d>, EVEX;
1964 def mrk : AVX512PI<opc, MRMDestMem, (outs),
1965 (ins memop:$dst, KRC:$mask, RC:$src),
1966 !strconcat(OpcodeStr,
1967 "\t{$src, ${dst} {${mask}}|${dst} {${mask}}, $src}"),
1968 [], d>, EVEX, EVEX_K;
1973 multiclass avx512_store_vl<bits<8> opc, string OpcodeStr, string st_pat,
1974 string st_suff_512, string st_suff_256,
1975 string st_suff_128, string elty, string elsz,
1976 string vsz512, string vsz256, string vsz128,
1977 Domain d, Predicate prd> {
1978 let Predicates = [prd] in
1979 defm Z : avx512_store<opc, OpcodeStr, !cast<PatFrag>(st_pat##st_suff_512),
1980 !cast<ValueType>("v"##vsz512##elty##elsz),
1981 !cast<RegisterClass>("VK"##vsz512##"WM"), VR512,
1982 !cast<X86MemOperand>(elty##"512mem"), d>, EVEX_V512;
1984 let Predicates = [prd, HasVLX] in {
1985 defm Z256 : avx512_store<opc, OpcodeStr, !cast<PatFrag>(st_pat##st_suff_256),
1986 !cast<ValueType>("v"##vsz256##elty##elsz),
1987 !cast<RegisterClass>("VK"##vsz256##"WM"), VR256X,
1988 !cast<X86MemOperand>(elty##"256mem"), d>, EVEX_V256;
1990 defm Z128 : avx512_store<opc, OpcodeStr, !cast<PatFrag>(st_pat##st_suff_128),
1991 !cast<ValueType>("v"##vsz128##elty##elsz),
1992 !cast<RegisterClass>("VK"##vsz128##"WM"), VR128X,
1993 !cast<X86MemOperand>(elty##"128mem"), d>, EVEX_V128;
1997 defm VMOVAPS : avx512_load_vl<0x28, "vmovaps", "alignedload", "f", "32",
1998 "16", "8", "4", SSEPackedSingle, HasAVX512>,
1999 avx512_store_vl<0x29, "vmovaps", "alignedstore",
2000 "512", "256", "", "f", "32", "16", "8", "4",
2001 SSEPackedSingle, HasAVX512>,
2002 PS, EVEX_CD8<32, CD8VF>;
2004 defm VMOVAPD : avx512_load_vl<0x28, "vmovapd", "alignedload", "f", "64",
2005 "8", "4", "2", SSEPackedDouble, HasAVX512>,
2006 avx512_store_vl<0x29, "vmovapd", "alignedstore",
2007 "512", "256", "", "f", "64", "8", "4", "2",
2008 SSEPackedDouble, HasAVX512>,
2009 PD, VEX_W, EVEX_CD8<64, CD8VF>;
2011 defm VMOVUPS : avx512_load_vl<0x10, "vmovups", "load", "f", "32",
2012 "16", "8", "4", SSEPackedSingle, HasAVX512>,
2013 avx512_store_vl<0x11, "vmovups", "store", "", "", "", "f", "32",
2014 "16", "8", "4", SSEPackedSingle, HasAVX512>,
2015 PS, EVEX_CD8<32, CD8VF>;
2017 defm VMOVUPD : avx512_load_vl<0x10, "vmovupd", "load", "f", "64",
2018 "8", "4", "2", SSEPackedDouble, HasAVX512, 0>,
2019 avx512_store_vl<0x11, "vmovupd", "store", "", "", "", "f", "64",
2020 "8", "4", "2", SSEPackedDouble, HasAVX512>,
2021 PD, VEX_W, EVEX_CD8<64, CD8VF>;
2023 def: Pat<(v8f64 (int_x86_avx512_mask_loadu_pd_512 addr:$ptr,
2024 (bc_v8f64 (v16i32 immAllZerosV)), GR8:$mask)),
2025 (VMOVUPDZrmkz (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)), addr:$ptr)>;
2027 def: Pat<(v16f32 (int_x86_avx512_mask_loadu_ps_512 addr:$ptr,
2028 (bc_v16f32 (v16i32 immAllZerosV)), GR16:$mask)),
2029 (VMOVUPSZrmkz (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)), addr:$ptr)>;
2031 def: Pat<(int_x86_avx512_mask_storeu_ps_512 addr:$ptr, (v16f32 VR512:$src),
2033 (VMOVUPSZmrk addr:$ptr, (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)),
2035 def: Pat<(int_x86_avx512_mask_storeu_pd_512 addr:$ptr, (v8f64 VR512:$src),
2037 (VMOVUPDZmrk addr:$ptr, (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)),
2040 defm VMOVDQA32 : avx512_load_vl<0x6F, "vmovdqa32", "alignedload", "i", "32",
2041 "16", "8", "4", SSEPackedInt, HasAVX512>,
2042 avx512_store_vl<0x7F, "vmovdqa32", "alignedstore",
2043 "512", "256", "", "i", "32", "16", "8", "4",
2044 SSEPackedInt, HasAVX512>,
2045 PD, EVEX_CD8<32, CD8VF>;
2047 defm VMOVDQA64 : avx512_load_vl<0x6F, "vmovdqa64", "alignedload", "i", "64",
2048 "8", "4", "2", SSEPackedInt, HasAVX512>,
2049 avx512_store_vl<0x7F, "vmovdqa64", "alignedstore",
2050 "512", "256", "", "i", "64", "8", "4", "2",
2051 SSEPackedInt, HasAVX512>,
2052 PD, VEX_W, EVEX_CD8<64, CD8VF>;
2054 defm VMOVDQU8 : avx512_load_vl<0x6F, "vmovdqu8", "load", "i", "8",
2055 "64", "32", "16", SSEPackedInt, HasBWI>,
2056 avx512_store_vl<0x7F, "vmovdqu8", "store", "", "", "",
2057 "i", "8", "64", "32", "16", SSEPackedInt,
2058 HasBWI>, XD, EVEX_CD8<8, CD8VF>;
2060 defm VMOVDQU16 : avx512_load_vl<0x6F, "vmovdqu16", "load", "i", "16",
2061 "32", "16", "8", SSEPackedInt, HasBWI>,
2062 avx512_store_vl<0x7F, "vmovdqu16", "store", "", "", "",
2063 "i", "16", "32", "16", "8", SSEPackedInt,
2064 HasBWI>, XD, VEX_W, EVEX_CD8<16, CD8VF>;
2066 defm VMOVDQU32 : avx512_load_vl<0x6F, "vmovdqu32", "load", "i", "32",
2067 "16", "8", "4", SSEPackedInt, HasAVX512>,
2068 avx512_store_vl<0x7F, "vmovdqu32", "store", "", "", "",
2069 "i", "32", "16", "8", "4", SSEPackedInt,
2070 HasAVX512>, XS, EVEX_CD8<32, CD8VF>;
2072 defm VMOVDQU64 : avx512_load_vl<0x6F, "vmovdqu64", "load", "i", "64",
2073 "8", "4", "2", SSEPackedInt, HasAVX512>,
2074 avx512_store_vl<0x7F, "vmovdqu64", "store", "", "", "",
2075 "i", "64", "8", "4", "2", SSEPackedInt,
2076 HasAVX512>, XS, VEX_W, EVEX_CD8<64, CD8VF>;
2078 def: Pat<(v16i32 (int_x86_avx512_mask_loadu_d_512 addr:$ptr,
2079 (v16i32 immAllZerosV), GR16:$mask)),
2080 (VMOVDQU32Zrmkz (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)), addr:$ptr)>;
2082 def: Pat<(v8i64 (int_x86_avx512_mask_loadu_q_512 addr:$ptr,
2083 (bc_v8i64 (v16i32 immAllZerosV)), GR8:$mask)),
2084 (VMOVDQU64Zrmkz (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)), addr:$ptr)>;
2086 def: Pat<(int_x86_avx512_mask_storeu_d_512 addr:$ptr, (v16i32 VR512:$src),
2088 (VMOVDQU32Zmrk addr:$ptr, (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)),
2090 def: Pat<(int_x86_avx512_mask_storeu_q_512 addr:$ptr, (v8i64 VR512:$src),
2092 (VMOVDQU64Zmrk addr:$ptr, (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)),
2095 let AddedComplexity = 20 in {
2096 def : Pat<(v8i64 (vselect VK8WM:$mask, (v8i64 VR512:$src),
2097 (bc_v8i64 (v16i32 immAllZerosV)))),
2098 (VMOVDQU64Zrrkz VK8WM:$mask, VR512:$src)>;
2100 def : Pat<(v8i64 (vselect VK8WM:$mask, (bc_v8i64 (v16i32 immAllZerosV)),
2101 (v8i64 VR512:$src))),
2102 (VMOVDQU64Zrrkz (COPY_TO_REGCLASS (KNOTWrr (COPY_TO_REGCLASS VK8:$mask, VK16)),
2105 def : Pat<(v16i32 (vselect VK16WM:$mask, (v16i32 VR512:$src),
2106 (v16i32 immAllZerosV))),
2107 (VMOVDQU32Zrrkz VK16WM:$mask, VR512:$src)>;
2109 def : Pat<(v16i32 (vselect VK16WM:$mask, (v16i32 immAllZerosV),
2110 (v16i32 VR512:$src))),
2111 (VMOVDQU32Zrrkz (KNOTWrr VK16WM:$mask), VR512:$src)>;
2114 // Move Int Doubleword to Packed Double Int
2116 def VMOVDI2PDIZrr : AVX512BI<0x6E, MRMSrcReg, (outs VR128X:$dst), (ins GR32:$src),
2117 "vmovd\t{$src, $dst|$dst, $src}",
2119 (v4i32 (scalar_to_vector GR32:$src)))], IIC_SSE_MOVDQ>,
2121 def VMOVDI2PDIZrm : AVX512BI<0x6E, MRMSrcMem, (outs VR128X:$dst), (ins i32mem:$src),
2122 "vmovd\t{$src, $dst|$dst, $src}",
2124 (v4i32 (scalar_to_vector (loadi32 addr:$src))))],
2125 IIC_SSE_MOVDQ>, EVEX, VEX_LIG, EVEX_CD8<32, CD8VT1>;
2126 def VMOV64toPQIZrr : AVX512BI<0x6E, MRMSrcReg, (outs VR128X:$dst), (ins GR64:$src),
2127 "vmovq\t{$src, $dst|$dst, $src}",
2129 (v2i64 (scalar_to_vector GR64:$src)))],
2130 IIC_SSE_MOVDQ>, EVEX, VEX_W, VEX_LIG;
2131 let isCodeGenOnly = 1 in {
2132 def VMOV64toSDZrr : AVX512BI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
2133 "vmovq\t{$src, $dst|$dst, $src}",
2134 [(set FR64:$dst, (bitconvert GR64:$src))],
2135 IIC_SSE_MOVDQ>, EVEX, VEX_W, Sched<[WriteMove]>;
2136 def VMOVSDto64Zrr : AVX512BI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
2137 "vmovq\t{$src, $dst|$dst, $src}",
2138 [(set GR64:$dst, (bitconvert FR64:$src))],
2139 IIC_SSE_MOVDQ>, EVEX, VEX_W, Sched<[WriteMove]>;
2141 def VMOVSDto64Zmr : AVX512BI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
2142 "vmovq\t{$src, $dst|$dst, $src}",
2143 [(store (i64 (bitconvert FR64:$src)), addr:$dst)],
2144 IIC_SSE_MOVDQ>, EVEX, VEX_W, Sched<[WriteStore]>,
2145 EVEX_CD8<64, CD8VT1>;
2147 // Move Int Doubleword to Single Scalar
2149 let isCodeGenOnly = 1 in {
2150 def VMOVDI2SSZrr : AVX512BI<0x6E, MRMSrcReg, (outs FR32X:$dst), (ins GR32:$src),
2151 "vmovd\t{$src, $dst|$dst, $src}",
2152 [(set FR32X:$dst, (bitconvert GR32:$src))],
2153 IIC_SSE_MOVDQ>, EVEX, VEX_LIG;
2155 def VMOVDI2SSZrm : AVX512BI<0x6E, MRMSrcMem, (outs FR32X:$dst), (ins i32mem:$src),
2156 "vmovd\t{$src, $dst|$dst, $src}",
2157 [(set FR32X:$dst, (bitconvert (loadi32 addr:$src)))],
2158 IIC_SSE_MOVDQ>, EVEX, VEX_LIG, EVEX_CD8<32, CD8VT1>;
2161 // Move doubleword from xmm register to r/m32
2163 def VMOVPDI2DIZrr : AVX512BI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128X:$src),
2164 "vmovd\t{$src, $dst|$dst, $src}",
2165 [(set GR32:$dst, (vector_extract (v4i32 VR128X:$src),
2166 (iPTR 0)))], IIC_SSE_MOVD_ToGP>,
2168 def VMOVPDI2DIZmr : AVX512BI<0x7E, MRMDestMem, (outs),
2169 (ins i32mem:$dst, VR128X:$src),
2170 "vmovd\t{$src, $dst|$dst, $src}",
2171 [(store (i32 (vector_extract (v4i32 VR128X:$src),
2172 (iPTR 0))), addr:$dst)], IIC_SSE_MOVDQ>,
2173 EVEX, VEX_LIG, EVEX_CD8<32, CD8VT1>;
2175 // Move quadword from xmm1 register to r/m64
2177 def VMOVPQIto64Zrr : I<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128X:$src),
2178 "vmovq\t{$src, $dst|$dst, $src}",
2179 [(set GR64:$dst, (extractelt (v2i64 VR128X:$src),
2181 IIC_SSE_MOVD_ToGP>, PD, EVEX, VEX_LIG, VEX_W,
2182 Requires<[HasAVX512, In64BitMode]>;
2184 def VMOVPQIto64Zmr : I<0xD6, MRMDestMem, (outs),
2185 (ins i64mem:$dst, VR128X:$src),
2186 "vmovq\t{$src, $dst|$dst, $src}",
2187 [(store (extractelt (v2i64 VR128X:$src), (iPTR 0)),
2188 addr:$dst)], IIC_SSE_MOVDQ>,
2189 EVEX, PD, VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>,
2190 Sched<[WriteStore]>, Requires<[HasAVX512, In64BitMode]>;
2192 // Move Scalar Single to Double Int
2194 let isCodeGenOnly = 1 in {
2195 def VMOVSS2DIZrr : AVX512BI<0x7E, MRMDestReg, (outs GR32:$dst),
2197 "vmovd\t{$src, $dst|$dst, $src}",
2198 [(set GR32:$dst, (bitconvert FR32X:$src))],
2199 IIC_SSE_MOVD_ToGP>, EVEX, VEX_LIG;
2200 def VMOVSS2DIZmr : AVX512BI<0x7E, MRMDestMem, (outs),
2201 (ins i32mem:$dst, FR32X:$src),
2202 "vmovd\t{$src, $dst|$dst, $src}",
2203 [(store (i32 (bitconvert FR32X:$src)), addr:$dst)],
2204 IIC_SSE_MOVDQ>, EVEX, VEX_LIG, EVEX_CD8<32, CD8VT1>;
2207 // Move Quadword Int to Packed Quadword Int
2209 def VMOVQI2PQIZrm : AVX512BI<0x6E, MRMSrcMem, (outs VR128X:$dst),
2211 "vmovq\t{$src, $dst|$dst, $src}",
2213 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>,
2214 EVEX, VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
2216 //===----------------------------------------------------------------------===//
2217 // AVX-512 MOVSS, MOVSD
2218 //===----------------------------------------------------------------------===//
2220 multiclass avx512_move_scalar <string asm, RegisterClass RC,
2221 SDNode OpNode, ValueType vt,
2222 X86MemOperand x86memop, PatFrag mem_pat> {
2223 let hasSideEffects = 0 in {
2224 def rr : SI<0x10, MRMSrcReg, (outs VR128X:$dst), (ins VR128X:$src1, RC:$src2),
2225 !strconcat(asm, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2226 [(set VR128X:$dst, (vt (OpNode VR128X:$src1,
2227 (scalar_to_vector RC:$src2))))],
2228 IIC_SSE_MOV_S_RR>, EVEX_4V, VEX_LIG;
2229 let Constraints = "$src1 = $dst" in
2230 def rrk : SI<0x10, MRMSrcReg, (outs VR128X:$dst),
2231 (ins VR128X:$src1, VK1WM:$mask, RC:$src2, RC:$src3),
2233 " \t{$src3, $src2, $dst {${mask}}|$dst {${mask}}, $src2, $src3}"),
2234 [], IIC_SSE_MOV_S_RR>, EVEX_4V, VEX_LIG, EVEX_K;
2235 def rm : SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
2236 !strconcat(asm, " \t{$src, $dst|$dst, $src}"),
2237 [(set RC:$dst, (mem_pat addr:$src))], IIC_SSE_MOV_S_RM>,
2239 let mayStore = 1 in {
2240 def mr: SI<0x11, MRMDestMem, (outs), (ins x86memop:$dst, RC:$src),
2241 !strconcat(asm, " \t{$src, $dst|$dst, $src}"),
2242 [(store RC:$src, addr:$dst)], IIC_SSE_MOV_S_MR>,
2244 def mrk: SI<0x11, MRMDestMem, (outs), (ins x86memop:$dst, VK1WM:$mask, RC:$src),
2245 !strconcat(asm, " \t{$src, $dst {${mask}}|$dst {${mask}}, $src}"),
2246 [], IIC_SSE_MOV_S_MR>,
2247 EVEX, VEX_LIG, EVEX_K;
2249 } //hasSideEffects = 0
2252 let ExeDomain = SSEPackedSingle in
2253 defm VMOVSSZ : avx512_move_scalar<"movss", FR32X, X86Movss, v4f32, f32mem,
2254 loadf32>, XS, EVEX_CD8<32, CD8VT1>;
2256 let ExeDomain = SSEPackedDouble in
2257 defm VMOVSDZ : avx512_move_scalar<"movsd", FR64X, X86Movsd, v2f64, f64mem,
2258 loadf64>, XD, VEX_W, EVEX_CD8<64, CD8VT1>;
2260 def : Pat<(f32 (X86select VK1WM:$mask, (f32 FR32X:$src1), (f32 FR32X:$src2))),
2261 (COPY_TO_REGCLASS (VMOVSSZrrk (COPY_TO_REGCLASS FR32X:$src2, VR128X),
2262 VK1WM:$mask, (f32 (IMPLICIT_DEF)), FR32X:$src1), FR32X)>;
2264 def : Pat<(f64 (X86select VK1WM:$mask, (f64 FR64X:$src1), (f64 FR64X:$src2))),
2265 (COPY_TO_REGCLASS (VMOVSDZrrk (COPY_TO_REGCLASS FR64X:$src2, VR128X),
2266 VK1WM:$mask, (f64 (IMPLICIT_DEF)), FR64X:$src1), FR64X)>;
2268 def : Pat<(int_x86_avx512_mask_store_ss addr:$dst, VR128X:$src, GR8:$mask),
2269 (VMOVSSZmrk addr:$dst, (i1 (COPY_TO_REGCLASS GR8:$mask, VK1WM)),
2270 (COPY_TO_REGCLASS VR128X:$src, FR32X))>;
2272 // For the disassembler
2273 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in {
2274 def VMOVSSZrr_REV : SI<0x11, MRMDestReg, (outs VR128X:$dst),
2275 (ins VR128X:$src1, FR32X:$src2),
2276 "movss\t{$src2, $src1, $dst|$dst, $src1, $src2}", [],
2278 XS, EVEX_4V, VEX_LIG;
2279 def VMOVSDZrr_REV : SI<0x11, MRMDestReg, (outs VR128X:$dst),
2280 (ins VR128X:$src1, FR64X:$src2),
2281 "movsd\t{$src2, $src1, $dst|$dst, $src1, $src2}", [],
2283 XD, EVEX_4V, VEX_LIG, VEX_W;
2286 let Predicates = [HasAVX512] in {
2287 let AddedComplexity = 15 in {
2288 // Move scalar to XMM zero-extended, zeroing a VR128X then do a
2289 // MOVS{S,D} to the lower bits.
2290 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32X:$src)))),
2291 (VMOVSSZrr (v4f32 (V_SET0)), FR32X:$src)>;
2292 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128X:$src))),
2293 (VMOVSSZrr (v4f32 (V_SET0)), (COPY_TO_REGCLASS VR128X:$src, FR32X))>;
2294 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128X:$src))),
2295 (VMOVSSZrr (v4i32 (V_SET0)), (COPY_TO_REGCLASS VR128X:$src, FR32X))>;
2296 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64X:$src)))),
2297 (VMOVSDZrr (v2f64 (V_SET0)), FR64X:$src)>;
2299 // Move low f32 and clear high bits.
2300 def : Pat<(v8f32 (X86vzmovl (v8f32 VR256X:$src))),
2301 (SUBREG_TO_REG (i32 0),
2302 (VMOVSSZrr (v4f32 (V_SET0)),
2303 (EXTRACT_SUBREG (v8f32 VR256X:$src), sub_xmm)), sub_xmm)>;
2304 def : Pat<(v8i32 (X86vzmovl (v8i32 VR256X:$src))),
2305 (SUBREG_TO_REG (i32 0),
2306 (VMOVSSZrr (v4i32 (V_SET0)),
2307 (EXTRACT_SUBREG (v8i32 VR256X:$src), sub_xmm)), sub_xmm)>;
2310 let AddedComplexity = 20 in {
2311 // MOVSSrm zeros the high parts of the register; represent this
2312 // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
2313 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
2314 (COPY_TO_REGCLASS (VMOVSSZrm addr:$src), VR128X)>;
2315 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
2316 (COPY_TO_REGCLASS (VMOVSSZrm addr:$src), VR128X)>;
2317 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
2318 (COPY_TO_REGCLASS (VMOVSSZrm addr:$src), VR128X)>;
2320 // MOVSDrm zeros the high parts of the register; represent this
2321 // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
2322 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
2323 (COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>;
2324 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
2325 (COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>;
2326 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
2327 (COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>;
2328 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
2329 (COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>;
2330 def : Pat<(v2f64 (X86vzload addr:$src)),
2331 (COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>;
2333 // Represent the same patterns above but in the form they appear for
2335 def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
2336 (v4i32 (scalar_to_vector (loadi32 addr:$src))), (iPTR 0)))),
2337 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrm addr:$src), sub_xmm)>;
2338 def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
2339 (v4f32 (scalar_to_vector (loadf32 addr:$src))), (iPTR 0)))),
2340 (SUBREG_TO_REG (i32 0), (VMOVSSZrm addr:$src), sub_xmm)>;
2341 def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,
2342 (v2f64 (scalar_to_vector (loadf64 addr:$src))), (iPTR 0)))),
2343 (SUBREG_TO_REG (i32 0), (VMOVSDZrm addr:$src), sub_xmm)>;
2345 def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
2346 (v4f32 (scalar_to_vector FR32X:$src)), (iPTR 0)))),
2347 (SUBREG_TO_REG (i32 0), (v4f32 (VMOVSSZrr (v4f32 (V_SET0)),
2348 FR32X:$src)), sub_xmm)>;
2349 def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,
2350 (v2f64 (scalar_to_vector FR64X:$src)), (iPTR 0)))),
2351 (SUBREG_TO_REG (i64 0), (v2f64 (VMOVSDZrr (v2f64 (V_SET0)),
2352 FR64X:$src)), sub_xmm)>;
2353 def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
2354 (v2i64 (scalar_to_vector (loadi64 addr:$src))), (iPTR 0)))),
2355 (SUBREG_TO_REG (i64 0), (VMOVQI2PQIZrm addr:$src), sub_xmm)>;
2357 // Move low f64 and clear high bits.
2358 def : Pat<(v4f64 (X86vzmovl (v4f64 VR256X:$src))),
2359 (SUBREG_TO_REG (i32 0),
2360 (VMOVSDZrr (v2f64 (V_SET0)),
2361 (EXTRACT_SUBREG (v4f64 VR256X:$src), sub_xmm)), sub_xmm)>;
2363 def : Pat<(v4i64 (X86vzmovl (v4i64 VR256X:$src))),
2364 (SUBREG_TO_REG (i32 0), (VMOVSDZrr (v2i64 (V_SET0)),
2365 (EXTRACT_SUBREG (v4i64 VR256X:$src), sub_xmm)), sub_xmm)>;
2367 // Extract and store.
2368 def : Pat<(store (f32 (vector_extract (v4f32 VR128X:$src), (iPTR 0))),
2370 (VMOVSSZmr addr:$dst, (COPY_TO_REGCLASS (v4f32 VR128X:$src), FR32X))>;
2371 def : Pat<(store (f64 (vector_extract (v2f64 VR128X:$src), (iPTR 0))),
2373 (VMOVSDZmr addr:$dst, (COPY_TO_REGCLASS (v2f64 VR128X:$src), FR64X))>;
2375 // Shuffle with VMOVSS
2376 def : Pat<(v4i32 (X86Movss VR128X:$src1, VR128X:$src2)),
2377 (VMOVSSZrr (v4i32 VR128X:$src1),
2378 (COPY_TO_REGCLASS (v4i32 VR128X:$src2), FR32X))>;
2379 def : Pat<(v4f32 (X86Movss VR128X:$src1, VR128X:$src2)),
2380 (VMOVSSZrr (v4f32 VR128X:$src1),
2381 (COPY_TO_REGCLASS (v4f32 VR128X:$src2), FR32X))>;
2384 def : Pat<(v8i32 (X86Movss VR256X:$src1, VR256X:$src2)),
2385 (SUBREG_TO_REG (i32 0),
2386 (VMOVSSZrr (EXTRACT_SUBREG (v8i32 VR256X:$src1), sub_xmm),
2387 (EXTRACT_SUBREG (v8i32 VR256X:$src2), sub_xmm)),
2389 def : Pat<(v8f32 (X86Movss VR256X:$src1, VR256X:$src2)),
2390 (SUBREG_TO_REG (i32 0),
2391 (VMOVSSZrr (EXTRACT_SUBREG (v8f32 VR256X:$src1), sub_xmm),
2392 (EXTRACT_SUBREG (v8f32 VR256X:$src2), sub_xmm)),
2395 // Shuffle with VMOVSD
2396 def : Pat<(v2i64 (X86Movsd VR128X:$src1, VR128X:$src2)),
2397 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
2398 def : Pat<(v2f64 (X86Movsd VR128X:$src1, VR128X:$src2)),
2399 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
2400 def : Pat<(v4f32 (X86Movsd VR128X:$src1, VR128X:$src2)),
2401 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
2402 def : Pat<(v4i32 (X86Movsd VR128X:$src1, VR128X:$src2)),
2403 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
2406 def : Pat<(v4i64 (X86Movsd VR256X:$src1, VR256X:$src2)),
2407 (SUBREG_TO_REG (i32 0),
2408 (VMOVSDZrr (EXTRACT_SUBREG (v4i64 VR256X:$src1), sub_xmm),
2409 (EXTRACT_SUBREG (v4i64 VR256X:$src2), sub_xmm)),
2411 def : Pat<(v4f64 (X86Movsd VR256X:$src1, VR256X:$src2)),
2412 (SUBREG_TO_REG (i32 0),
2413 (VMOVSDZrr (EXTRACT_SUBREG (v4f64 VR256X:$src1), sub_xmm),
2414 (EXTRACT_SUBREG (v4f64 VR256X:$src2), sub_xmm)),
2417 def : Pat<(v2f64 (X86Movlpd VR128X:$src1, VR128X:$src2)),
2418 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
2419 def : Pat<(v2i64 (X86Movlpd VR128X:$src1, VR128X:$src2)),
2420 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
2421 def : Pat<(v4f32 (X86Movlps VR128X:$src1, VR128X:$src2)),
2422 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
2423 def : Pat<(v4i32 (X86Movlps VR128X:$src1, VR128X:$src2)),
2424 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
2427 let AddedComplexity = 15 in
2428 def VMOVZPQILo2PQIZrr : AVX512XSI<0x7E, MRMSrcReg, (outs VR128X:$dst),
2430 "vmovq\t{$src, $dst|$dst, $src}",
2431 [(set VR128X:$dst, (v2i64 (X86vzmovl
2432 (v2i64 VR128X:$src))))],
2433 IIC_SSE_MOVQ_RR>, EVEX, VEX_W;
2435 let AddedComplexity = 20 in
2436 def VMOVZPQILo2PQIZrm : AVX512XSI<0x7E, MRMSrcMem, (outs VR128X:$dst),
2438 "vmovq\t{$src, $dst|$dst, $src}",
2439 [(set VR128X:$dst, (v2i64 (X86vzmovl
2440 (loadv2i64 addr:$src))))],
2441 IIC_SSE_MOVDQ>, EVEX, VEX_W,
2442 EVEX_CD8<8, CD8VT8>;
2444 let Predicates = [HasAVX512] in {
2445 // AVX 128-bit movd/movq instruction write zeros in the high 128-bit part.
2446 let AddedComplexity = 20 in {
2447 def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector (loadi32 addr:$src))))),
2448 (VMOVDI2PDIZrm addr:$src)>;
2449 def : Pat<(v2i64 (X86vzmovl (v2i64 (scalar_to_vector GR64:$src)))),
2450 (VMOV64toPQIZrr GR64:$src)>;
2451 def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector GR32:$src)))),
2452 (VMOVDI2PDIZrr GR32:$src)>;
2454 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
2455 (VMOVDI2PDIZrm addr:$src)>;
2456 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
2457 (VMOVDI2PDIZrm addr:$src)>;
2458 def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
2459 (VMOVZPQILo2PQIZrm addr:$src)>;
2460 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128X:$src))),
2461 (VMOVZPQILo2PQIZrr VR128X:$src)>;
2462 def : Pat<(v2i64 (X86vzload addr:$src)),
2463 (VMOVZPQILo2PQIZrm addr:$src)>;
2466 // Use regular 128-bit instructions to match 256-bit scalar_to_vec+zext.
2467 def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
2468 (v4i32 (scalar_to_vector GR32:$src)),(iPTR 0)))),
2469 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrr GR32:$src), sub_xmm)>;
2470 def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
2471 (v2i64 (scalar_to_vector GR64:$src)),(iPTR 0)))),
2472 (SUBREG_TO_REG (i64 0), (VMOV64toPQIZrr GR64:$src), sub_xmm)>;
2475 def : Pat<(v16i32 (X86Vinsert (v16i32 immAllZerosV), GR32:$src2, (iPTR 0))),
2476 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrr GR32:$src2), sub_xmm)>;
2478 def : Pat<(v8i64 (X86Vinsert (bc_v8i64 (v16i32 immAllZerosV)), GR64:$src2, (iPTR 0))),
2479 (SUBREG_TO_REG (i32 0), (VMOV64toPQIZrr GR64:$src2), sub_xmm)>;
2481 def : Pat<(v16i32 (X86Vinsert undef, GR32:$src2, (iPTR 0))),
2482 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrr GR32:$src2), sub_xmm)>;
2484 def : Pat<(v8i64 (X86Vinsert undef, GR64:$src2, (iPTR 0))),
2485 (SUBREG_TO_REG (i32 0), (VMOV64toPQIZrr GR64:$src2), sub_xmm)>;
2487 //===----------------------------------------------------------------------===//
2488 // AVX-512 - Non-temporals
2489 //===----------------------------------------------------------------------===//
2490 let SchedRW = [WriteLoad] in {
2491 def VMOVNTDQAZrm : AVX512PI<0x2A, MRMSrcMem, (outs VR512:$dst),
2492 (ins i512mem:$src), "vmovntdqa\t{$src, $dst|$dst, $src}",
2493 [(set VR512:$dst, (int_x86_avx512_movntdqa addr:$src))],
2494 SSEPackedInt>, EVEX, T8PD, EVEX_V512,
2495 EVEX_CD8<64, CD8VF>;
2497 let Predicates = [HasAVX512, HasVLX] in {
2498 def VMOVNTDQAZ256rm : AVX512PI<0x2A, MRMSrcMem, (outs VR256X:$dst),
2500 "vmovntdqa\t{$src, $dst|$dst, $src}", [],
2501 SSEPackedInt>, EVEX, T8PD, EVEX_V256,
2502 EVEX_CD8<64, CD8VF>;
2504 def VMOVNTDQAZ128rm : AVX512PI<0x2A, MRMSrcMem, (outs VR128X:$dst),
2506 "vmovntdqa\t{$src, $dst|$dst, $src}", [],
2507 SSEPackedInt>, EVEX, T8PD, EVEX_V128,
2508 EVEX_CD8<64, CD8VF>;
2512 multiclass avx512_movnt<bits<8> opc, string OpcodeStr, PatFrag st_frag,
2513 ValueType OpVT, RegisterClass RC, X86MemOperand memop,
2514 Domain d, InstrItinClass itin = IIC_SSE_MOVNT> {
2515 let SchedRW = [WriteStore], mayStore = 1,
2516 AddedComplexity = 400 in
2517 def mr : AVX512PI<opc, MRMDestMem, (outs), (ins memop:$dst, RC:$src),
2518 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2519 [(st_frag (OpVT RC:$src), addr:$dst)], d, itin>, EVEX;
2522 multiclass avx512_movnt_vl<bits<8> opc, string OpcodeStr, PatFrag st_frag,
2523 string elty, string elsz, string vsz512,
2524 string vsz256, string vsz128, Domain d,
2525 Predicate prd, InstrItinClass itin = IIC_SSE_MOVNT> {
2526 let Predicates = [prd] in
2527 defm Z : avx512_movnt<opc, OpcodeStr, st_frag,
2528 !cast<ValueType>("v"##vsz512##elty##elsz), VR512,
2529 !cast<X86MemOperand>(elty##"512mem"), d, itin>,
2532 let Predicates = [prd, HasVLX] in {
2533 defm Z256 : avx512_movnt<opc, OpcodeStr, st_frag,
2534 !cast<ValueType>("v"##vsz256##elty##elsz), VR256X,
2535 !cast<X86MemOperand>(elty##"256mem"), d, itin>,
2538 defm Z128 : avx512_movnt<opc, OpcodeStr, st_frag,
2539 !cast<ValueType>("v"##vsz128##elty##elsz), VR128X,
2540 !cast<X86MemOperand>(elty##"128mem"), d, itin>,
2545 defm VMOVNTDQ : avx512_movnt_vl<0xE7, "vmovntdq", alignednontemporalstore,
2546 "i", "64", "8", "4", "2", SSEPackedInt,
2547 HasAVX512>, PD, EVEX_CD8<64, CD8VF>;
2549 defm VMOVNTPD : avx512_movnt_vl<0x2B, "vmovntpd", alignednontemporalstore,
2550 "f", "64", "8", "4", "2", SSEPackedDouble,
2551 HasAVX512>, PD, VEX_W, EVEX_CD8<64, CD8VF>;
2553 defm VMOVNTPS : avx512_movnt_vl<0x2B, "vmovntps", alignednontemporalstore,
2554 "f", "32", "16", "8", "4", SSEPackedSingle,
2555 HasAVX512>, PS, EVEX_CD8<32, CD8VF>;
2557 //===----------------------------------------------------------------------===//
2558 // AVX-512 - Integer arithmetic
2560 multiclass avx512_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
2561 X86VectorVTInfo _, OpndItins itins,
2562 bit IsCommutable = 0> {
2563 defm rr : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
2564 (ins _.RC:$src1, _.RC:$src2), OpcodeStr##_.Suffix,
2565 "$src2, $src1", "$src1, $src2",
2566 (_.VT (OpNode _.RC:$src1, _.RC:$src2)),
2567 itins.rr, IsCommutable>,
2568 AVX512BIBase, EVEX_4V;
2571 defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
2572 (ins _.RC:$src1, _.MemOp:$src2), OpcodeStr##_.Suffix,
2573 "$src2, $src1", "$src1, $src2",
2574 (_.VT (OpNode _.RC:$src1,
2575 (bitconvert (_.LdFrag addr:$src2)))),
2577 AVX512BIBase, EVEX_4V;
2580 multiclass avx512_binop_rmb<bits<8> opc, string OpcodeStr, SDNode OpNode,
2581 X86VectorVTInfo _, OpndItins itins,
2582 bit IsCommutable = 0> :
2583 avx512_binop_rm<opc, OpcodeStr, OpNode, _, itins, IsCommutable> {
2585 defm rmb : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
2586 (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr##_.Suffix,
2587 "${src2}"##_.BroadcastStr##", $src1",
2588 "$src1, ${src2}"##_.BroadcastStr,
2589 (_.VT (OpNode _.RC:$src1,
2591 (_.ScalarLdFrag addr:$src2)))),
2593 AVX512BIBase, EVEX_4V, EVEX_B;
2596 multiclass avx512_binop_rm_vl<bits<8> opc, string OpcodeStr, SDNode OpNode,
2597 AVX512VLVectorVTInfo VTInfo, OpndItins itins,
2598 Predicate prd, bit IsCommutable = 0> {
2599 let Predicates = [prd] in
2600 defm Z : avx512_binop_rm<opc, OpcodeStr, OpNode, VTInfo.info512, itins,
2601 IsCommutable>, EVEX_V512;
2603 let Predicates = [prd, HasVLX] in {
2604 defm Z256 : avx512_binop_rm<opc, OpcodeStr, OpNode, VTInfo.info256, itins,
2605 IsCommutable>, EVEX_V256;
2606 defm Z128 : avx512_binop_rm<opc, OpcodeStr, OpNode, VTInfo.info128, itins,
2607 IsCommutable>, EVEX_V128;
2611 multiclass avx512_binop_rmb_vl<bits<8> opc, string OpcodeStr, SDNode OpNode,
2612 AVX512VLVectorVTInfo VTInfo, OpndItins itins,
2613 Predicate prd, bit IsCommutable = 0> {
2614 let Predicates = [prd] in
2615 defm Z : avx512_binop_rmb<opc, OpcodeStr, OpNode, VTInfo.info512, itins,
2616 IsCommutable>, EVEX_V512;
2618 let Predicates = [prd, HasVLX] in {
2619 defm Z256 : avx512_binop_rmb<opc, OpcodeStr, OpNode, VTInfo.info256, itins,
2620 IsCommutable>, EVEX_V256;
2621 defm Z128 : avx512_binop_rmb<opc, OpcodeStr, OpNode, VTInfo.info128, itins,
2622 IsCommutable>, EVEX_V128;
2626 multiclass avx512_binop_rm_vl_q<bits<8> opc, string OpcodeStr, SDNode OpNode,
2627 OpndItins itins, Predicate prd,
2628 bit IsCommutable = 0> {
2629 defm NAME : avx512_binop_rmb_vl<opc, OpcodeStr, OpNode, avx512vl_i64_info,
2630 itins, prd, IsCommutable>,
2631 VEX_W, EVEX_CD8<64, CD8VF>;
2634 multiclass avx512_binop_rm_vl_d<bits<8> opc, string OpcodeStr, SDNode OpNode,
2635 OpndItins itins, Predicate prd,
2636 bit IsCommutable = 0> {
2637 defm NAME : avx512_binop_rmb_vl<opc, OpcodeStr, OpNode, avx512vl_i32_info,
2638 itins, prd, IsCommutable>, EVEX_CD8<32, CD8VF>;
2641 multiclass avx512_binop_rm_vl_w<bits<8> opc, string OpcodeStr, SDNode OpNode,
2642 OpndItins itins, Predicate prd,
2643 bit IsCommutable = 0> {
2644 defm NAME : avx512_binop_rm_vl<opc, OpcodeStr, OpNode, avx512vl_i16_info,
2645 itins, prd, IsCommutable>, EVEX_CD8<16, CD8VF>;
2648 multiclass avx512_binop_rm_vl_b<bits<8> opc, string OpcodeStr, SDNode OpNode,
2649 OpndItins itins, Predicate prd,
2650 bit IsCommutable = 0> {
2651 defm NAME : avx512_binop_rm_vl<opc, OpcodeStr, OpNode, avx512vl_i8_info,
2652 itins, prd, IsCommutable>, EVEX_CD8<8, CD8VF>;
2655 multiclass avx512_binop_rm_vl_dq<bits<8> opc_d, bits<8> opc_q, string OpcodeStr,
2656 SDNode OpNode, OpndItins itins, Predicate prd,
2657 bit IsCommutable = 0> {
2658 defm Q : avx512_binop_rm_vl_q<opc_q, OpcodeStr, OpNode, itins, prd,
2661 defm D : avx512_binop_rm_vl_d<opc_d, OpcodeStr, OpNode, itins, prd,
2665 multiclass avx512_binop_rm_vl_bw<bits<8> opc_b, bits<8> opc_w, string OpcodeStr,
2666 SDNode OpNode, OpndItins itins, Predicate prd,
2667 bit IsCommutable = 0> {
2668 defm W : avx512_binop_rm_vl_w<opc_w, OpcodeStr, OpNode, itins, prd,
2671 defm B : avx512_binop_rm_vl_b<opc_b, OpcodeStr, OpNode, itins, prd,
2675 multiclass avx512_binop_rm_vl_all<bits<8> opc_b, bits<8> opc_w,
2676 bits<8> opc_d, bits<8> opc_q,
2677 string OpcodeStr, SDNode OpNode,
2678 OpndItins itins, bit IsCommutable = 0> {
2679 defm NAME : avx512_binop_rm_vl_dq<opc_d, opc_q, OpcodeStr, OpNode,
2680 itins, HasAVX512, IsCommutable>,
2681 avx512_binop_rm_vl_bw<opc_b, opc_w, OpcodeStr, OpNode,
2682 itins, HasBWI, IsCommutable>;
2685 multiclass avx512_binop_rm2<bits<8> opc, string OpcodeStr, ValueType DstVT,
2686 ValueType SrcVT, RegisterClass KRC, RegisterClass RC,
2687 PatFrag memop_frag, X86MemOperand x86memop,
2688 PatFrag scalar_mfrag, X86MemOperand x86scalar_mop,
2689 string BrdcstStr, OpndItins itins, bit IsCommutable = 0> {
2690 let isCommutable = IsCommutable in
2692 def rr : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
2693 (ins RC:$src1, RC:$src2),
2694 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2696 def rrk : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
2697 (ins KRC:$mask, RC:$src1, RC:$src2),
2698 !strconcat(OpcodeStr,
2699 " \t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
2700 [], itins.rr>, EVEX_4V, EVEX_K;
2701 def rrkz : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
2702 (ins KRC:$mask, RC:$src1, RC:$src2),
2703 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst {${mask}} {z}" ,
2704 "|$dst {${mask}} {z}, $src1, $src2}"),
2705 [], itins.rr>, EVEX_4V, EVEX_KZ;
2707 let mayLoad = 1 in {
2708 def rm : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
2709 (ins RC:$src1, x86memop:$src2),
2710 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2712 def rmk : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
2713 (ins KRC:$mask, RC:$src1, x86memop:$src2),
2714 !strconcat(OpcodeStr,
2715 " \t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
2716 [], itins.rm>, EVEX_4V, EVEX_K;
2717 def rmkz : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
2718 (ins KRC:$mask, RC:$src1, x86memop:$src2),
2719 !strconcat(OpcodeStr,
2720 " \t{$src2, $src1, $dst {${mask}} {z}|$dst {${mask}} {z}, $src1, $src2}"),
2721 [], itins.rm>, EVEX_4V, EVEX_KZ;
2722 def rmb : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
2723 (ins RC:$src1, x86scalar_mop:$src2),
2724 !strconcat(OpcodeStr, " \t{${src2}", BrdcstStr,
2725 ", $src1, $dst|$dst, $src1, ${src2}", BrdcstStr, "}"),
2726 [], itins.rm>, EVEX_4V, EVEX_B;
2727 def rmbk : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
2728 (ins KRC:$mask, RC:$src1, x86scalar_mop:$src2),
2729 !strconcat(OpcodeStr, " \t{${src2}", BrdcstStr,
2730 ", $src1, $dst {${mask}}|$dst {${mask}}, $src1, ${src2}",
2732 [], itins.rm>, EVEX_4V, EVEX_B, EVEX_K;
2733 def rmbkz : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
2734 (ins KRC:$mask, RC:$src1, x86scalar_mop:$src2),
2735 !strconcat(OpcodeStr, " \t{${src2}", BrdcstStr,
2736 ", $src1, $dst {${mask}} {z}|$dst {${mask}} {z}, $src1, ${src2}",
2738 [], itins.rm>, EVEX_4V, EVEX_B, EVEX_KZ;
2742 defm VPADD : avx512_binop_rm_vl_all<0xFC, 0xFD, 0xFE, 0xD4, "vpadd", add,
2743 SSE_INTALU_ITINS_P, 1>;
2744 defm VPSUB : avx512_binop_rm_vl_all<0xF8, 0xF9, 0xFA, 0xFB, "vpsub", sub,
2745 SSE_INTALU_ITINS_P, 0>;
2746 defm VPMULLD : avx512_binop_rm_vl_d<0x40, "vpmull", mul,
2747 SSE_INTALU_ITINS_P, HasAVX512, 1>, T8PD;
2748 defm VPMULLW : avx512_binop_rm_vl_w<0xD5, "vpmull", mul,
2749 SSE_INTALU_ITINS_P, HasBWI, 1>;
2750 defm VPMULLQ : avx512_binop_rm_vl_q<0x40, "vpmull", mul,
2751 SSE_INTALU_ITINS_P, HasDQI, 1>, T8PD;
2753 defm VPMULDQZ : avx512_binop_rm2<0x28, "vpmuldq", v8i64, v16i32, VK8WM, VR512,
2754 memopv8i64, i512mem, loadi64, i64mem, "{1to8}",
2755 SSE_INTALU_ITINS_P, 1>, T8PD, EVEX_V512,
2756 EVEX_CD8<64, CD8VF>, VEX_W;
2758 defm VPMULUDQZ : avx512_binop_rm2<0xF4, "vpmuludq", v8i64, v16i32, VK8WM, VR512,
2759 memopv8i64, i512mem, loadi64, i64mem, "{1to8}",
2760 SSE_INTMUL_ITINS_P, 1>, EVEX_V512, EVEX_CD8<64, CD8VF>, VEX_W;
2762 def : Pat<(v8i64 (X86pmuludq (v16i32 VR512:$src1), (v16i32 VR512:$src2))),
2763 (VPMULUDQZrr VR512:$src1, VR512:$src2)>;
2765 def : Pat<(v8i64 (int_x86_avx512_mask_pmulu_dq_512 (v16i32 VR512:$src1),
2766 (v16i32 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
2767 (VPMULUDQZrr VR512:$src1, VR512:$src2)>;
2768 def : Pat<(v8i64 (int_x86_avx512_mask_pmul_dq_512 (v16i32 VR512:$src1),
2769 (v16i32 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
2770 (VPMULDQZrr VR512:$src1, VR512:$src2)>;
2772 defm VPMAXSB : avx512_binop_rm_vl_b<0x3C, "vpmaxs", X86smax,
2773 SSE_INTALU_ITINS_P, HasBWI, 1>, T8PD;
2774 defm VPMAXSW : avx512_binop_rm_vl_w<0xEE, "vpmaxs", X86smax,
2775 SSE_INTALU_ITINS_P, HasBWI, 1>;
2776 defm VPMAXS : avx512_binop_rm_vl_dq<0x3D, 0x3D, "vpmaxs", X86smax,
2777 SSE_INTALU_ITINS_P, HasAVX512, 1>, T8PD;
2779 defm VPMAXUB : avx512_binop_rm_vl_b<0xDE, "vpmaxu", X86umax,
2780 SSE_INTALU_ITINS_P, HasBWI, 1>;
2781 defm VPMAXUW : avx512_binop_rm_vl_w<0x3E, "vpmaxu", X86umax,
2782 SSE_INTALU_ITINS_P, HasBWI, 1>, T8PD;
2783 defm VPMAXU : avx512_binop_rm_vl_dq<0x3F, 0x3F, "vpmaxu", X86umax,
2784 SSE_INTALU_ITINS_P, HasAVX512, 1>, T8PD;
2786 defm VPMINSB : avx512_binop_rm_vl_b<0x38, "vpmins", X86smin,
2787 SSE_INTALU_ITINS_P, HasBWI, 1>, T8PD;
2788 defm VPMINSW : avx512_binop_rm_vl_w<0xEA, "vpmins", X86smin,
2789 SSE_INTALU_ITINS_P, HasBWI, 1>;
2790 defm VPMINS : avx512_binop_rm_vl_dq<0x39, 0x39, "vpmins", X86smin,
2791 SSE_INTALU_ITINS_P, HasAVX512, 1>, T8PD;
2793 defm VPMINUB : avx512_binop_rm_vl_b<0xDA, "vpminu", X86umin,
2794 SSE_INTALU_ITINS_P, HasBWI, 1>;
2795 defm VPMINUW : avx512_binop_rm_vl_w<0x3A, "vpminu", X86umin,
2796 SSE_INTALU_ITINS_P, HasBWI, 1>, T8PD;
2797 defm VPMINU : avx512_binop_rm_vl_dq<0x3B, 0x3B, "vpminu", X86umin,
2798 SSE_INTALU_ITINS_P, HasAVX512, 1>, T8PD;
2800 def : Pat <(v16i32 (int_x86_avx512_mask_pmaxs_d_512 (v16i32 VR512:$src1),
2801 (v16i32 VR512:$src2), (v16i32 immAllZerosV), (i16 -1))),
2802 (VPMAXSDZrr VR512:$src1, VR512:$src2)>;
2803 def : Pat <(v16i32 (int_x86_avx512_mask_pmaxu_d_512 (v16i32 VR512:$src1),
2804 (v16i32 VR512:$src2), (v16i32 immAllZerosV), (i16 -1))),
2805 (VPMAXUDZrr VR512:$src1, VR512:$src2)>;
2806 def : Pat <(v8i64 (int_x86_avx512_mask_pmaxs_q_512 (v8i64 VR512:$src1),
2807 (v8i64 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
2808 (VPMAXSQZrr VR512:$src1, VR512:$src2)>;
2809 def : Pat <(v8i64 (int_x86_avx512_mask_pmaxu_q_512 (v8i64 VR512:$src1),
2810 (v8i64 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
2811 (VPMAXUQZrr VR512:$src1, VR512:$src2)>;
2812 def : Pat <(v16i32 (int_x86_avx512_mask_pmins_d_512 (v16i32 VR512:$src1),
2813 (v16i32 VR512:$src2), (v16i32 immAllZerosV), (i16 -1))),
2814 (VPMINSDZrr VR512:$src1, VR512:$src2)>;
2815 def : Pat <(v16i32 (int_x86_avx512_mask_pminu_d_512 (v16i32 VR512:$src1),
2816 (v16i32 VR512:$src2), (v16i32 immAllZerosV), (i16 -1))),
2817 (VPMINUDZrr VR512:$src1, VR512:$src2)>;
2818 def : Pat <(v8i64 (int_x86_avx512_mask_pmins_q_512 (v8i64 VR512:$src1),
2819 (v8i64 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
2820 (VPMINSQZrr VR512:$src1, VR512:$src2)>;
2821 def : Pat <(v8i64 (int_x86_avx512_mask_pminu_q_512 (v8i64 VR512:$src1),
2822 (v8i64 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
2823 (VPMINUQZrr VR512:$src1, VR512:$src2)>;
2824 //===----------------------------------------------------------------------===//
2825 // AVX-512 - Unpack Instructions
2826 //===----------------------------------------------------------------------===//
2828 multiclass avx512_unpack_fp<bits<8> opc, SDNode OpNode, ValueType vt,
2829 PatFrag mem_frag, RegisterClass RC,
2830 X86MemOperand x86memop, string asm,
2832 def rr : AVX512PI<opc, MRMSrcReg,
2833 (outs RC:$dst), (ins RC:$src1, RC:$src2),
2835 (vt (OpNode RC:$src1, RC:$src2)))],
2837 def rm : AVX512PI<opc, MRMSrcMem,
2838 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
2840 (vt (OpNode RC:$src1,
2841 (bitconvert (mem_frag addr:$src2)))))],
2845 defm VUNPCKHPSZ: avx512_unpack_fp<0x15, X86Unpckh, v16f32, memopv8f64,
2846 VR512, f512mem, "vunpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2847 SSEPackedSingle>, PS, EVEX_V512, EVEX_CD8<32, CD8VF>;
2848 defm VUNPCKHPDZ: avx512_unpack_fp<0x15, X86Unpckh, v8f64, memopv8f64,
2849 VR512, f512mem, "vunpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2850 SSEPackedDouble>, PD, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
2851 defm VUNPCKLPSZ: avx512_unpack_fp<0x14, X86Unpckl, v16f32, memopv8f64,
2852 VR512, f512mem, "vunpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2853 SSEPackedSingle>, PS, EVEX_V512, EVEX_CD8<32, CD8VF>;
2854 defm VUNPCKLPDZ: avx512_unpack_fp<0x14, X86Unpckl, v8f64, memopv8f64,
2855 VR512, f512mem, "vunpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2856 SSEPackedDouble>, PD, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
2858 multiclass avx512_unpack_int<bits<8> opc, string OpcodeStr, SDNode OpNode,
2859 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
2860 X86MemOperand x86memop> {
2861 def rr : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
2862 (ins RC:$src1, RC:$src2),
2863 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2864 [(set RC:$dst, (OpVT (OpNode (OpVT RC:$src1), (OpVT RC:$src2))))],
2865 IIC_SSE_UNPCK>, EVEX_4V;
2866 def rm : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
2867 (ins RC:$src1, x86memop:$src2),
2868 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2869 [(set RC:$dst, (OpVT (OpNode (OpVT RC:$src1),
2870 (bitconvert (memop_frag addr:$src2)))))],
2871 IIC_SSE_UNPCK>, EVEX_4V;
2873 defm VPUNPCKLDQZ : avx512_unpack_int<0x62, "vpunpckldq", X86Unpckl, v16i32,
2874 VR512, memopv16i32, i512mem>, EVEX_V512,
2875 EVEX_CD8<32, CD8VF>;
2876 defm VPUNPCKLQDQZ : avx512_unpack_int<0x6C, "vpunpcklqdq", X86Unpckl, v8i64,
2877 VR512, memopv8i64, i512mem>, EVEX_V512,
2878 VEX_W, EVEX_CD8<64, CD8VF>;
2879 defm VPUNPCKHDQZ : avx512_unpack_int<0x6A, "vpunpckhdq", X86Unpckh, v16i32,
2880 VR512, memopv16i32, i512mem>, EVEX_V512,
2881 EVEX_CD8<32, CD8VF>;
2882 defm VPUNPCKHQDQZ : avx512_unpack_int<0x6D, "vpunpckhqdq", X86Unpckh, v8i64,
2883 VR512, memopv8i64, i512mem>, EVEX_V512,
2884 VEX_W, EVEX_CD8<64, CD8VF>;
2885 //===----------------------------------------------------------------------===//
2889 multiclass avx512_pshuf_imm<bits<8> opc, string OpcodeStr, RegisterClass RC,
2890 SDNode OpNode, PatFrag mem_frag,
2891 X86MemOperand x86memop, ValueType OpVT> {
2892 def ri : AVX512Ii8<opc, MRMSrcReg, (outs RC:$dst),
2893 (ins RC:$src1, i8imm:$src2),
2894 !strconcat(OpcodeStr,
2895 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2897 (OpVT (OpNode RC:$src1, (i8 imm:$src2))))]>,
2899 def mi : AVX512Ii8<opc, MRMSrcMem, (outs RC:$dst),
2900 (ins x86memop:$src1, i8imm:$src2),
2901 !strconcat(OpcodeStr,
2902 " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2904 (OpVT (OpNode (mem_frag addr:$src1),
2905 (i8 imm:$src2))))]>, EVEX;
2908 defm VPSHUFDZ : avx512_pshuf_imm<0x70, "vpshufd", VR512, X86PShufd, memopv16i32,
2909 i512mem, v16i32>, PD, EVEX_V512, EVEX_CD8<32, CD8VF>;
2911 //===----------------------------------------------------------------------===//
2912 // AVX-512 Logical Instructions
2913 //===----------------------------------------------------------------------===//
2915 defm VPAND : avx512_binop_rm_vl_dq<0xDB, 0xDB, "vpand", and,
2916 SSE_INTALU_ITINS_P, HasAVX512, 1>;
2917 defm VPOR : avx512_binop_rm_vl_dq<0xEB, 0xEB, "vpor", or,
2918 SSE_INTALU_ITINS_P, HasAVX512, 1>;
2919 defm VPXOR : avx512_binop_rm_vl_dq<0xEF, 0xEF, "vpxor", xor,
2920 SSE_INTALU_ITINS_P, HasAVX512, 1>;
2921 defm VPANDN : avx512_binop_rm_vl_dq<0xDF, 0xDF, "vpandn", X86andnp,
2922 SSE_INTALU_ITINS_P, HasAVX512, 1>;
2924 //===----------------------------------------------------------------------===//
2925 // AVX-512 FP arithmetic
2926 //===----------------------------------------------------------------------===//
2928 multiclass avx512_binop_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
2930 defm SSZ : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"), OpNode, FR32X,
2931 f32mem, itins.s, 0>, XS, EVEX_4V, VEX_LIG,
2932 EVEX_CD8<32, CD8VT1>;
2933 defm SDZ : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd"), OpNode, FR64X,
2934 f64mem, itins.d, 0>, XD, VEX_W, EVEX_4V, VEX_LIG,
2935 EVEX_CD8<64, CD8VT1>;
2938 let isCommutable = 1 in {
2939 defm VADD : avx512_binop_s<0x58, "add", fadd, SSE_ALU_ITINS_S>;
2940 defm VMUL : avx512_binop_s<0x59, "mul", fmul, SSE_ALU_ITINS_S>;
2941 defm VMIN : avx512_binop_s<0x5D, "min", X86fmin, SSE_ALU_ITINS_S>;
2942 defm VMAX : avx512_binop_s<0x5F, "max", X86fmax, SSE_ALU_ITINS_S>;
2944 let isCommutable = 0 in {
2945 defm VSUB : avx512_binop_s<0x5C, "sub", fsub, SSE_ALU_ITINS_S>;
2946 defm VDIV : avx512_binop_s<0x5E, "div", fdiv, SSE_ALU_ITINS_S>;
2949 multiclass avx512_fp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
2951 RegisterClass RC, ValueType vt,
2952 X86MemOperand x86memop, PatFrag mem_frag,
2953 X86MemOperand x86scalar_mop, PatFrag scalar_mfrag,
2955 Domain d, OpndItins itins, bit commutable> {
2956 let isCommutable = commutable in {
2957 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
2958 !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2959 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], itins.rr, d>,
2962 def rrk: PI<opc, MRMSrcReg, (outs RC:$dst), (ins KRC:$mask, RC:$src1, RC:$src2),
2963 !strconcat(OpcodeStr,
2964 " \t{$src2, $src1, $dst {${mask}} |$dst {${mask}}, $src1, $src2}"),
2965 [], itins.rr, d>, EVEX_4V, EVEX_K;
2967 def rrkz: PI<opc, MRMSrcReg, (outs RC:$dst), (ins KRC:$mask, RC:$src1, RC:$src2),
2968 !strconcat(OpcodeStr,
2969 " \t{$src2, $src1, $dst {${mask}} {z}|$dst {${mask}} {z}, $src1, $src2}"),
2970 [], itins.rr, d>, EVEX_4V, EVEX_KZ;