1 //===-- X86InstrSSE.td - SSE Instruction Set ---------------*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86 SSE instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
16 class OpndItins<InstrItinClass arg_rr, InstrItinClass arg_rm> {
17 InstrItinClass rr = arg_rr;
18 InstrItinClass rm = arg_rm;
19 // InstrSchedModel info.
20 X86FoldableSchedWrite Sched = WriteFAdd;
23 class SizeItins<OpndItins arg_s, OpndItins arg_d> {
29 class ShiftOpndItins<InstrItinClass arg_rr, InstrItinClass arg_rm,
30 InstrItinClass arg_ri> {
31 InstrItinClass rr = arg_rr;
32 InstrItinClass rm = arg_rm;
33 InstrItinClass ri = arg_ri;
38 let Sched = WriteFAdd in {
39 def SSE_ALU_F32S : OpndItins<
40 IIC_SSE_ALU_F32S_RR, IIC_SSE_ALU_F32S_RM
43 def SSE_ALU_F64S : OpndItins<
44 IIC_SSE_ALU_F64S_RR, IIC_SSE_ALU_F64S_RM
48 def SSE_ALU_ITINS_S : SizeItins<
49 SSE_ALU_F32S, SSE_ALU_F64S
52 let Sched = WriteFMul in {
53 def SSE_MUL_F32S : OpndItins<
54 IIC_SSE_MUL_F32S_RR, IIC_SSE_MUL_F64S_RM
57 def SSE_MUL_F64S : OpndItins<
58 IIC_SSE_MUL_F64S_RR, IIC_SSE_MUL_F64S_RM
62 def SSE_MUL_ITINS_S : SizeItins<
63 SSE_MUL_F32S, SSE_MUL_F64S
66 let Sched = WriteFDiv in {
67 def SSE_DIV_F32S : OpndItins<
68 IIC_SSE_DIV_F32S_RR, IIC_SSE_DIV_F64S_RM
71 def SSE_DIV_F64S : OpndItins<
72 IIC_SSE_DIV_F64S_RR, IIC_SSE_DIV_F64S_RM
76 def SSE_DIV_ITINS_S : SizeItins<
77 SSE_DIV_F32S, SSE_DIV_F64S
81 let Sched = WriteFAdd in {
82 def SSE_ALU_F32P : OpndItins<
83 IIC_SSE_ALU_F32P_RR, IIC_SSE_ALU_F32P_RM
86 def SSE_ALU_F64P : OpndItins<
87 IIC_SSE_ALU_F64P_RR, IIC_SSE_ALU_F64P_RM
91 def SSE_ALU_ITINS_P : SizeItins<
92 SSE_ALU_F32P, SSE_ALU_F64P
95 let Sched = WriteFMul in {
96 def SSE_MUL_F32P : OpndItins<
97 IIC_SSE_MUL_F32P_RR, IIC_SSE_MUL_F64P_RM
100 def SSE_MUL_F64P : OpndItins<
101 IIC_SSE_MUL_F64P_RR, IIC_SSE_MUL_F64P_RM
105 def SSE_MUL_ITINS_P : SizeItins<
106 SSE_MUL_F32P, SSE_MUL_F64P
109 let Sched = WriteFDiv in {
110 def SSE_DIV_F32P : OpndItins<
111 IIC_SSE_DIV_F32P_RR, IIC_SSE_DIV_F64P_RM
114 def SSE_DIV_F64P : OpndItins<
115 IIC_SSE_DIV_F64P_RR, IIC_SSE_DIV_F64P_RM
119 def SSE_DIV_ITINS_P : SizeItins<
120 SSE_DIV_F32P, SSE_DIV_F64P
123 let Sched = WriteVecLogic in
124 def SSE_VEC_BIT_ITINS_P : OpndItins<
125 IIC_SSE_BIT_P_RR, IIC_SSE_BIT_P_RM
128 def SSE_BIT_ITINS_P : OpndItins<
129 IIC_SSE_BIT_P_RR, IIC_SSE_BIT_P_RM
132 let Sched = WriteVecALU in {
133 def SSE_INTALU_ITINS_P : OpndItins<
134 IIC_SSE_INTALU_P_RR, IIC_SSE_INTALU_P_RM
137 def SSE_INTALUQ_ITINS_P : OpndItins<
138 IIC_SSE_INTALUQ_P_RR, IIC_SSE_INTALUQ_P_RM
142 let Sched = WriteVecIMul in
143 def SSE_INTMUL_ITINS_P : OpndItins<
144 IIC_SSE_INTMUL_P_RR, IIC_SSE_INTMUL_P_RM
147 def SSE_INTSHIFT_ITINS_P : ShiftOpndItins<
148 IIC_SSE_INTSH_P_RR, IIC_SSE_INTSH_P_RM, IIC_SSE_INTSH_P_RI
151 def SSE_MOVA_ITINS : OpndItins<
152 IIC_SSE_MOVA_P_RR, IIC_SSE_MOVA_P_RM
155 def SSE_MOVU_ITINS : OpndItins<
156 IIC_SSE_MOVU_P_RR, IIC_SSE_MOVU_P_RM
159 def SSE_DPPD_ITINS : OpndItins<
160 IIC_SSE_DPPD_RR, IIC_SSE_DPPD_RM
163 def SSE_DPPS_ITINS : OpndItins<
164 IIC_SSE_DPPS_RR, IIC_SSE_DPPD_RM
167 def DEFAULT_ITINS : OpndItins<
168 IIC_ALU_NONMEM, IIC_ALU_MEM
171 def SSE_EXTRACT_ITINS : OpndItins<
172 IIC_SSE_EXTRACTPS_RR, IIC_SSE_EXTRACTPS_RM
175 def SSE_INSERT_ITINS : OpndItins<
176 IIC_SSE_INSERTPS_RR, IIC_SSE_INSERTPS_RM
179 let Sched = WriteMPSAD in
180 def SSE_MPSADBW_ITINS : OpndItins<
181 IIC_SSE_MPSADBW_RR, IIC_SSE_MPSADBW_RM
184 let Sched = WriteVecIMul in
185 def SSE_PMULLD_ITINS : OpndItins<
186 IIC_SSE_PMULLD_RR, IIC_SSE_PMULLD_RM
189 // Definitions for backward compatibility.
190 // The instructions mapped on these definitions uses a different itinerary
191 // than the actual scheduling model.
192 let Sched = WriteShuffle in
193 def DEFAULT_ITINS_SHUFFLESCHED : OpndItins<
194 IIC_ALU_NONMEM, IIC_ALU_MEM
197 let Sched = WriteVecIMul in
198 def DEFAULT_ITINS_VECIMULSCHED : OpndItins<
199 IIC_ALU_NONMEM, IIC_ALU_MEM
202 let Sched = WriteShuffle in
203 def SSE_INTALU_ITINS_SHUFF_P : OpndItins<
204 IIC_SSE_INTALU_P_RR, IIC_SSE_INTALU_P_RM
207 let Sched = WriteMPSAD in
208 def DEFAULT_ITINS_MPSADSCHED : OpndItins<
209 IIC_ALU_NONMEM, IIC_ALU_MEM
212 let Sched = WriteFBlend in
213 def DEFAULT_ITINS_FBLENDSCHED : OpndItins<
214 IIC_ALU_NONMEM, IIC_ALU_MEM
217 let Sched = WriteBlend in
218 def DEFAULT_ITINS_BLENDSCHED : OpndItins<
219 IIC_ALU_NONMEM, IIC_ALU_MEM
222 let Sched = WriteVarBlend in
223 def DEFAULT_ITINS_VARBLENDSCHED : OpndItins<
224 IIC_ALU_NONMEM, IIC_ALU_MEM
227 let Sched = WriteFBlend in
228 def SSE_INTALU_ITINS_FBLEND_P : OpndItins<
229 IIC_SSE_INTALU_P_RR, IIC_SSE_INTALU_P_RM
232 let Sched = WriteBlend in
233 def SSE_INTALU_ITINS_BLEND_P : OpndItins<
234 IIC_SSE_INTALU_P_RR, IIC_SSE_INTALU_P_RM
237 //===----------------------------------------------------------------------===//
238 // SSE 1 & 2 Instructions Classes
239 //===----------------------------------------------------------------------===//
241 /// sse12_fp_scalar - SSE 1 & 2 scalar instructions class
242 multiclass sse12_fp_scalar<bits<8> opc, string OpcodeStr, SDNode OpNode,
243 RegisterClass RC, X86MemOperand x86memop,
244 Domain d, OpndItins itins, bit Is2Addr = 1> {
245 let isCommutable = 1 in {
246 def rr : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
248 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
249 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
250 [(set RC:$dst, (OpNode RC:$src1, RC:$src2))], itins.rr, d>,
251 Sched<[itins.Sched]>;
253 def rm : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
255 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
256 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
257 [(set RC:$dst, (OpNode RC:$src1, (load addr:$src2)))], itins.rm, d>,
258 Sched<[itins.Sched.Folded, ReadAfterLd]>;
261 /// sse12_fp_scalar_int - SSE 1 & 2 scalar instructions intrinsics class
262 multiclass sse12_fp_scalar_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
263 string asm, string SSEVer, string FPSizeStr,
264 Operand memopr, ComplexPattern mem_cpat,
265 Domain d, OpndItins itins, bit Is2Addr = 1> {
266 let isCodeGenOnly = 1 in {
267 def rr_Int : SI_Int<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
269 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
270 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
271 [(set RC:$dst, (!cast<Intrinsic>(
272 !strconcat("int_x86_sse", SSEVer, "_", OpcodeStr, FPSizeStr))
273 RC:$src1, RC:$src2))], itins.rr, d>,
274 Sched<[itins.Sched]>;
275 def rm_Int : SI_Int<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, memopr:$src2),
277 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
278 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
279 [(set RC:$dst, (!cast<Intrinsic>(!strconcat("int_x86_sse",
280 SSEVer, "_", OpcodeStr, FPSizeStr))
281 RC:$src1, mem_cpat:$src2))], itins.rm, d>,
282 Sched<[itins.Sched.Folded, ReadAfterLd]>;
286 /// sse12_fp_packed - SSE 1 & 2 packed instructions class
287 multiclass sse12_fp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
288 RegisterClass RC, ValueType vt,
289 X86MemOperand x86memop, PatFrag mem_frag,
290 Domain d, OpndItins itins, bit Is2Addr = 1> {
291 let isCommutable = 1 in
292 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
294 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
295 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
296 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], itins.rr, d>,
297 Sched<[itins.Sched]>;
299 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
301 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
302 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
303 [(set RC:$dst, (OpNode RC:$src1, (mem_frag addr:$src2)))],
305 Sched<[itins.Sched.Folded, ReadAfterLd]>;
308 /// sse12_fp_packed_logical_rm - SSE 1 & 2 packed instructions class
309 multiclass sse12_fp_packed_logical_rm<bits<8> opc, RegisterClass RC, Domain d,
310 string OpcodeStr, X86MemOperand x86memop,
311 list<dag> pat_rr, list<dag> pat_rm,
313 let isCommutable = 1, hasSideEffects = 0 in
314 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
316 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
317 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
318 pat_rr, NoItinerary, d>,
319 Sched<[WriteVecLogic]>;
320 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
322 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
323 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
324 pat_rm, NoItinerary, d>,
325 Sched<[WriteVecLogicLd, ReadAfterLd]>;
328 //===----------------------------------------------------------------------===//
329 // Non-instruction patterns
330 //===----------------------------------------------------------------------===//
332 // A vector extract of the first f32/f64 position is a subregister copy
333 def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
334 (COPY_TO_REGCLASS (v4f32 VR128:$src), FR32)>;
335 def : Pat<(f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
336 (COPY_TO_REGCLASS (v2f64 VR128:$src), FR64)>;
338 // A 128-bit subvector extract from the first 256-bit vector position
339 // is a subregister copy that needs no instruction.
340 def : Pat<(v4i32 (extract_subvector (v8i32 VR256:$src), (iPTR 0))),
341 (v4i32 (EXTRACT_SUBREG (v8i32 VR256:$src), sub_xmm))>;
342 def : Pat<(v4f32 (extract_subvector (v8f32 VR256:$src), (iPTR 0))),
343 (v4f32 (EXTRACT_SUBREG (v8f32 VR256:$src), sub_xmm))>;
345 def : Pat<(v2i64 (extract_subvector (v4i64 VR256:$src), (iPTR 0))),
346 (v2i64 (EXTRACT_SUBREG (v4i64 VR256:$src), sub_xmm))>;
347 def : Pat<(v2f64 (extract_subvector (v4f64 VR256:$src), (iPTR 0))),
348 (v2f64 (EXTRACT_SUBREG (v4f64 VR256:$src), sub_xmm))>;
350 def : Pat<(v8i16 (extract_subvector (v16i16 VR256:$src), (iPTR 0))),
351 (v8i16 (EXTRACT_SUBREG (v16i16 VR256:$src), sub_xmm))>;
352 def : Pat<(v16i8 (extract_subvector (v32i8 VR256:$src), (iPTR 0))),
353 (v16i8 (EXTRACT_SUBREG (v32i8 VR256:$src), sub_xmm))>;
355 // A 128-bit subvector insert to the first 256-bit vector position
356 // is a subregister copy that needs no instruction.
357 let AddedComplexity = 25 in { // to give priority over vinsertf128rm
358 def : Pat<(insert_subvector undef, (v2i64 VR128:$src), (iPTR 0)),
359 (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
360 def : Pat<(insert_subvector undef, (v2f64 VR128:$src), (iPTR 0)),
361 (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
362 def : Pat<(insert_subvector undef, (v4i32 VR128:$src), (iPTR 0)),
363 (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
364 def : Pat<(insert_subvector undef, (v4f32 VR128:$src), (iPTR 0)),
365 (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
366 def : Pat<(insert_subvector undef, (v8i16 VR128:$src), (iPTR 0)),
367 (INSERT_SUBREG (v16i16 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
368 def : Pat<(insert_subvector undef, (v16i8 VR128:$src), (iPTR 0)),
369 (INSERT_SUBREG (v32i8 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
372 // Implicitly promote a 32-bit scalar to a vector.
373 def : Pat<(v4f32 (scalar_to_vector FR32:$src)),
374 (COPY_TO_REGCLASS FR32:$src, VR128)>;
375 def : Pat<(v8f32 (scalar_to_vector FR32:$src)),
376 (COPY_TO_REGCLASS FR32:$src, VR128)>;
377 // Implicitly promote a 64-bit scalar to a vector.
378 def : Pat<(v2f64 (scalar_to_vector FR64:$src)),
379 (COPY_TO_REGCLASS FR64:$src, VR128)>;
380 def : Pat<(v4f64 (scalar_to_vector FR64:$src)),
381 (COPY_TO_REGCLASS FR64:$src, VR128)>;
383 // Bitcasts between 128-bit vector types. Return the original type since
384 // no instruction is needed for the conversion
385 let Predicates = [HasSSE2] in {
386 def : Pat<(v2i64 (bitconvert (v4i32 VR128:$src))), (v2i64 VR128:$src)>;
387 def : Pat<(v2i64 (bitconvert (v8i16 VR128:$src))), (v2i64 VR128:$src)>;
388 def : Pat<(v2i64 (bitconvert (v16i8 VR128:$src))), (v2i64 VR128:$src)>;
389 def : Pat<(v2i64 (bitconvert (v2f64 VR128:$src))), (v2i64 VR128:$src)>;
390 def : Pat<(v2i64 (bitconvert (v4f32 VR128:$src))), (v2i64 VR128:$src)>;
391 def : Pat<(v4i32 (bitconvert (v2i64 VR128:$src))), (v4i32 VR128:$src)>;
392 def : Pat<(v4i32 (bitconvert (v8i16 VR128:$src))), (v4i32 VR128:$src)>;
393 def : Pat<(v4i32 (bitconvert (v16i8 VR128:$src))), (v4i32 VR128:$src)>;
394 def : Pat<(v4i32 (bitconvert (v2f64 VR128:$src))), (v4i32 VR128:$src)>;
395 def : Pat<(v4i32 (bitconvert (v4f32 VR128:$src))), (v4i32 VR128:$src)>;
396 def : Pat<(v8i16 (bitconvert (v2i64 VR128:$src))), (v8i16 VR128:$src)>;
397 def : Pat<(v8i16 (bitconvert (v4i32 VR128:$src))), (v8i16 VR128:$src)>;
398 def : Pat<(v8i16 (bitconvert (v16i8 VR128:$src))), (v8i16 VR128:$src)>;
399 def : Pat<(v8i16 (bitconvert (v2f64 VR128:$src))), (v8i16 VR128:$src)>;
400 def : Pat<(v8i16 (bitconvert (v4f32 VR128:$src))), (v8i16 VR128:$src)>;
401 def : Pat<(v16i8 (bitconvert (v2i64 VR128:$src))), (v16i8 VR128:$src)>;
402 def : Pat<(v16i8 (bitconvert (v4i32 VR128:$src))), (v16i8 VR128:$src)>;
403 def : Pat<(v16i8 (bitconvert (v8i16 VR128:$src))), (v16i8 VR128:$src)>;
404 def : Pat<(v16i8 (bitconvert (v2f64 VR128:$src))), (v16i8 VR128:$src)>;
405 def : Pat<(v16i8 (bitconvert (v4f32 VR128:$src))), (v16i8 VR128:$src)>;
406 def : Pat<(v4f32 (bitconvert (v2i64 VR128:$src))), (v4f32 VR128:$src)>;
407 def : Pat<(v4f32 (bitconvert (v4i32 VR128:$src))), (v4f32 VR128:$src)>;
408 def : Pat<(v4f32 (bitconvert (v8i16 VR128:$src))), (v4f32 VR128:$src)>;
409 def : Pat<(v4f32 (bitconvert (v16i8 VR128:$src))), (v4f32 VR128:$src)>;
410 def : Pat<(v4f32 (bitconvert (v2f64 VR128:$src))), (v4f32 VR128:$src)>;
411 def : Pat<(v2f64 (bitconvert (v2i64 VR128:$src))), (v2f64 VR128:$src)>;
412 def : Pat<(v2f64 (bitconvert (v4i32 VR128:$src))), (v2f64 VR128:$src)>;
413 def : Pat<(v2f64 (bitconvert (v8i16 VR128:$src))), (v2f64 VR128:$src)>;
414 def : Pat<(v2f64 (bitconvert (v16i8 VR128:$src))), (v2f64 VR128:$src)>;
415 def : Pat<(v2f64 (bitconvert (v4f32 VR128:$src))), (v2f64 VR128:$src)>;
418 // Bitcasts between 256-bit vector types. Return the original type since
419 // no instruction is needed for the conversion
420 let Predicates = [HasAVX] in {
421 def : Pat<(v4f64 (bitconvert (v8f32 VR256:$src))), (v4f64 VR256:$src)>;
422 def : Pat<(v4f64 (bitconvert (v8i32 VR256:$src))), (v4f64 VR256:$src)>;
423 def : Pat<(v4f64 (bitconvert (v4i64 VR256:$src))), (v4f64 VR256:$src)>;
424 def : Pat<(v4f64 (bitconvert (v16i16 VR256:$src))), (v4f64 VR256:$src)>;
425 def : Pat<(v4f64 (bitconvert (v32i8 VR256:$src))), (v4f64 VR256:$src)>;
426 def : Pat<(v8f32 (bitconvert (v8i32 VR256:$src))), (v8f32 VR256:$src)>;
427 def : Pat<(v8f32 (bitconvert (v4i64 VR256:$src))), (v8f32 VR256:$src)>;
428 def : Pat<(v8f32 (bitconvert (v4f64 VR256:$src))), (v8f32 VR256:$src)>;
429 def : Pat<(v8f32 (bitconvert (v32i8 VR256:$src))), (v8f32 VR256:$src)>;
430 def : Pat<(v8f32 (bitconvert (v16i16 VR256:$src))), (v8f32 VR256:$src)>;
431 def : Pat<(v4i64 (bitconvert (v8f32 VR256:$src))), (v4i64 VR256:$src)>;
432 def : Pat<(v4i64 (bitconvert (v8i32 VR256:$src))), (v4i64 VR256:$src)>;
433 def : Pat<(v4i64 (bitconvert (v4f64 VR256:$src))), (v4i64 VR256:$src)>;
434 def : Pat<(v4i64 (bitconvert (v32i8 VR256:$src))), (v4i64 VR256:$src)>;
435 def : Pat<(v4i64 (bitconvert (v16i16 VR256:$src))), (v4i64 VR256:$src)>;
436 def : Pat<(v32i8 (bitconvert (v4f64 VR256:$src))), (v32i8 VR256:$src)>;
437 def : Pat<(v32i8 (bitconvert (v4i64 VR256:$src))), (v32i8 VR256:$src)>;
438 def : Pat<(v32i8 (bitconvert (v8f32 VR256:$src))), (v32i8 VR256:$src)>;
439 def : Pat<(v32i8 (bitconvert (v8i32 VR256:$src))), (v32i8 VR256:$src)>;
440 def : Pat<(v32i8 (bitconvert (v16i16 VR256:$src))), (v32i8 VR256:$src)>;
441 def : Pat<(v8i32 (bitconvert (v32i8 VR256:$src))), (v8i32 VR256:$src)>;
442 def : Pat<(v8i32 (bitconvert (v16i16 VR256:$src))), (v8i32 VR256:$src)>;
443 def : Pat<(v8i32 (bitconvert (v8f32 VR256:$src))), (v8i32 VR256:$src)>;
444 def : Pat<(v8i32 (bitconvert (v4i64 VR256:$src))), (v8i32 VR256:$src)>;
445 def : Pat<(v8i32 (bitconvert (v4f64 VR256:$src))), (v8i32 VR256:$src)>;
446 def : Pat<(v16i16 (bitconvert (v8f32 VR256:$src))), (v16i16 VR256:$src)>;
447 def : Pat<(v16i16 (bitconvert (v8i32 VR256:$src))), (v16i16 VR256:$src)>;
448 def : Pat<(v16i16 (bitconvert (v4i64 VR256:$src))), (v16i16 VR256:$src)>;
449 def : Pat<(v16i16 (bitconvert (v4f64 VR256:$src))), (v16i16 VR256:$src)>;
450 def : Pat<(v16i16 (bitconvert (v32i8 VR256:$src))), (v16i16 VR256:$src)>;
453 // Alias instructions that map fld0 to xorps for sse or vxorps for avx.
454 // This is expanded by ExpandPostRAPseudos.
455 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
456 isPseudo = 1, SchedRW = [WriteZero] in {
457 def FsFLD0SS : I<0, Pseudo, (outs FR32:$dst), (ins), "",
458 [(set FR32:$dst, fp32imm0)]>, Requires<[HasSSE1]>;
459 def FsFLD0SD : I<0, Pseudo, (outs FR64:$dst), (ins), "",
460 [(set FR64:$dst, fpimm0)]>, Requires<[HasSSE2]>;
463 //===----------------------------------------------------------------------===//
464 // AVX & SSE - Zero/One Vectors
465 //===----------------------------------------------------------------------===//
467 // Alias instruction that maps zero vector to pxor / xorp* for sse.
468 // This is expanded by ExpandPostRAPseudos to an xorps / vxorps, and then
469 // swizzled by ExecutionDepsFix to pxor.
470 // We set canFoldAsLoad because this can be converted to a constant-pool
471 // load of an all-zeros value if folding it would be beneficial.
472 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
473 isPseudo = 1, SchedRW = [WriteZero] in {
474 def V_SET0 : I<0, Pseudo, (outs VR128:$dst), (ins), "",
475 [(set VR128:$dst, (v4f32 immAllZerosV))]>;
478 def : Pat<(v2f64 immAllZerosV), (V_SET0)>;
479 def : Pat<(v4i32 immAllZerosV), (V_SET0)>;
480 def : Pat<(v2i64 immAllZerosV), (V_SET0)>;
481 def : Pat<(v8i16 immAllZerosV), (V_SET0)>;
482 def : Pat<(v16i8 immAllZerosV), (V_SET0)>;
485 // The same as done above but for AVX. The 256-bit AVX1 ISA doesn't support PI,
486 // and doesn't need it because on sandy bridge the register is set to zero
487 // at the rename stage without using any execution unit, so SET0PSY
488 // and SET0PDY can be used for vector int instructions without penalty
489 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
490 isPseudo = 1, Predicates = [HasAVX], SchedRW = [WriteZero] in {
491 def AVX_SET0 : I<0, Pseudo, (outs VR256:$dst), (ins), "",
492 [(set VR256:$dst, (v8f32 immAllZerosV))]>;
495 let Predicates = [HasAVX] in
496 def : Pat<(v4f64 immAllZerosV), (AVX_SET0)>;
498 let Predicates = [HasAVX2] in {
499 def : Pat<(v4i64 immAllZerosV), (AVX_SET0)>;
500 def : Pat<(v8i32 immAllZerosV), (AVX_SET0)>;
501 def : Pat<(v16i16 immAllZerosV), (AVX_SET0)>;
502 def : Pat<(v32i8 immAllZerosV), (AVX_SET0)>;
505 // AVX1 has no support for 256-bit integer instructions, but since the 128-bit
506 // VPXOR instruction writes zero to its upper part, it's safe build zeros.
507 let Predicates = [HasAVX1Only] in {
508 def : Pat<(v32i8 immAllZerosV), (SUBREG_TO_REG (i8 0), (V_SET0), sub_xmm)>;
509 def : Pat<(bc_v32i8 (v8f32 immAllZerosV)),
510 (SUBREG_TO_REG (i8 0), (V_SET0), sub_xmm)>;
512 def : Pat<(v16i16 immAllZerosV), (SUBREG_TO_REG (i16 0), (V_SET0), sub_xmm)>;
513 def : Pat<(bc_v16i16 (v8f32 immAllZerosV)),
514 (SUBREG_TO_REG (i16 0), (V_SET0), sub_xmm)>;
516 def : Pat<(v8i32 immAllZerosV), (SUBREG_TO_REG (i32 0), (V_SET0), sub_xmm)>;
517 def : Pat<(bc_v8i32 (v8f32 immAllZerosV)),
518 (SUBREG_TO_REG (i32 0), (V_SET0), sub_xmm)>;
520 def : Pat<(v4i64 immAllZerosV), (SUBREG_TO_REG (i64 0), (V_SET0), sub_xmm)>;
521 def : Pat<(bc_v4i64 (v8f32 immAllZerosV)),
522 (SUBREG_TO_REG (i64 0), (V_SET0), sub_xmm)>;
525 // We set canFoldAsLoad because this can be converted to a constant-pool
526 // load of an all-ones value if folding it would be beneficial.
527 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
528 isPseudo = 1, SchedRW = [WriteZero] in {
529 def V_SETALLONES : I<0, Pseudo, (outs VR128:$dst), (ins), "",
530 [(set VR128:$dst, (v4i32 immAllOnesV))]>;
531 let Predicates = [HasAVX2] in
532 def AVX2_SETALLONES : I<0, Pseudo, (outs VR256:$dst), (ins), "",
533 [(set VR256:$dst, (v8i32 immAllOnesV))]>;
537 //===----------------------------------------------------------------------===//
538 // SSE 1 & 2 - Move FP Scalar Instructions
540 // Move Instructions. Register-to-register movss/movsd is not used for FR32/64
541 // register copies because it's a partial register update; Register-to-register
542 // movss/movsd is not modeled as an INSERT_SUBREG because INSERT_SUBREG requires
543 // that the insert be implementable in terms of a copy, and just mentioned, we
544 // don't use movss/movsd for copies.
545 //===----------------------------------------------------------------------===//
547 multiclass sse12_move_rr<RegisterClass RC, SDNode OpNode, ValueType vt,
548 X86MemOperand x86memop, string base_opc,
549 string asm_opr, Domain d = GenericDomain> {
550 def rr : SI<0x10, MRMSrcReg, (outs VR128:$dst),
551 (ins VR128:$src1, RC:$src2),
552 !strconcat(base_opc, asm_opr),
553 [(set VR128:$dst, (vt (OpNode VR128:$src1,
554 (scalar_to_vector RC:$src2))))],
555 IIC_SSE_MOV_S_RR, d>, Sched<[WriteFShuffle]>;
557 // For the disassembler
558 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in
559 def rr_REV : SI<0x11, MRMDestReg, (outs VR128:$dst),
560 (ins VR128:$src1, RC:$src2),
561 !strconcat(base_opc, asm_opr),
562 [], IIC_SSE_MOV_S_RR>, Sched<[WriteFShuffle]>;
565 multiclass sse12_move<RegisterClass RC, SDNode OpNode, ValueType vt,
566 X86MemOperand x86memop, string OpcodeStr,
567 Domain d = GenericDomain> {
569 defm V#NAME : sse12_move_rr<RC, OpNode, vt, x86memop, OpcodeStr,
570 "\t{$src2, $src1, $dst|$dst, $src1, $src2}", d>,
573 def V#NAME#mr : SI<0x11, MRMDestMem, (outs), (ins x86memop:$dst, RC:$src),
574 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
575 [(store RC:$src, addr:$dst)], IIC_SSE_MOV_S_MR, d>,
576 VEX, VEX_LIG, Sched<[WriteStore]>;
578 let Constraints = "$src1 = $dst" in {
579 defm NAME : sse12_move_rr<RC, OpNode, vt, x86memop, OpcodeStr,
580 "\t{$src2, $dst|$dst, $src2}", d>;
583 def NAME#mr : SI<0x11, MRMDestMem, (outs), (ins x86memop:$dst, RC:$src),
584 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
585 [(store RC:$src, addr:$dst)], IIC_SSE_MOV_S_MR, d>,
589 // Loading from memory automatically zeroing upper bits.
590 multiclass sse12_move_rm<RegisterClass RC, X86MemOperand x86memop,
591 PatFrag mem_pat, string OpcodeStr,
592 Domain d = GenericDomain> {
593 def V#NAME#rm : SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
594 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
595 [(set RC:$dst, (mem_pat addr:$src))],
596 IIC_SSE_MOV_S_RM, d>, VEX, VEX_LIG, Sched<[WriteLoad]>;
597 def NAME#rm : SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
598 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
599 [(set RC:$dst, (mem_pat addr:$src))],
600 IIC_SSE_MOV_S_RM, d>, Sched<[WriteLoad]>;
603 defm MOVSS : sse12_move<FR32, X86Movss, v4f32, f32mem, "movss",
604 SSEPackedSingle>, XS;
605 defm MOVSD : sse12_move<FR64, X86Movsd, v2f64, f64mem, "movsd",
606 SSEPackedDouble>, XD;
608 let canFoldAsLoad = 1, isReMaterializable = 1 in {
609 defm MOVSS : sse12_move_rm<FR32, f32mem, loadf32, "movss",
610 SSEPackedSingle>, XS;
612 let AddedComplexity = 20 in
613 defm MOVSD : sse12_move_rm<FR64, f64mem, loadf64, "movsd",
614 SSEPackedDouble>, XD;
618 let Predicates = [UseAVX] in {
619 let AddedComplexity = 20 in {
620 // MOVSSrm zeros the high parts of the register; represent this
621 // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
622 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
623 (COPY_TO_REGCLASS (VMOVSSrm addr:$src), VR128)>;
624 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
625 (COPY_TO_REGCLASS (VMOVSSrm addr:$src), VR128)>;
626 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
627 (COPY_TO_REGCLASS (VMOVSSrm addr:$src), VR128)>;
629 // MOVSDrm zeros the high parts of the register; represent this
630 // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
631 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
632 (COPY_TO_REGCLASS (VMOVSDrm addr:$src), VR128)>;
633 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
634 (COPY_TO_REGCLASS (VMOVSDrm addr:$src), VR128)>;
635 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
636 (COPY_TO_REGCLASS (VMOVSDrm addr:$src), VR128)>;
637 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
638 (COPY_TO_REGCLASS (VMOVSDrm addr:$src), VR128)>;
639 def : Pat<(v2f64 (X86vzload addr:$src)),
640 (COPY_TO_REGCLASS (VMOVSDrm addr:$src), VR128)>;
642 // Represent the same patterns above but in the form they appear for
644 def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
645 (v4f32 (scalar_to_vector (loadf32 addr:$src))), (iPTR 0)))),
646 (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_xmm)>;
647 def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,
648 (v2f64 (scalar_to_vector (loadf64 addr:$src))), (iPTR 0)))),
649 (SUBREG_TO_REG (i32 0), (VMOVSDrm addr:$src), sub_xmm)>;
652 // Extract and store.
653 def : Pat<(store (f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
655 (VMOVSSmr addr:$dst, (COPY_TO_REGCLASS (v4f32 VR128:$src), FR32))>;
656 def : Pat<(store (f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
658 (VMOVSDmr addr:$dst, (COPY_TO_REGCLASS (v2f64 VR128:$src), FR64))>;
660 // Shuffle with VMOVSS
661 def : Pat<(v4i32 (X86Movss VR128:$src1, VR128:$src2)),
662 (VMOVSSrr (v4i32 VR128:$src1),
663 (COPY_TO_REGCLASS (v4i32 VR128:$src2), FR32))>;
664 def : Pat<(v4f32 (X86Movss VR128:$src1, VR128:$src2)),
665 (VMOVSSrr (v4f32 VR128:$src1),
666 (COPY_TO_REGCLASS (v4f32 VR128:$src2), FR32))>;
669 def : Pat<(v8i32 (X86Movss VR256:$src1, VR256:$src2)),
670 (SUBREG_TO_REG (i32 0),
671 (VMOVSSrr (EXTRACT_SUBREG (v8i32 VR256:$src1), sub_xmm),
672 (EXTRACT_SUBREG (v8i32 VR256:$src2), sub_xmm)),
674 def : Pat<(v8f32 (X86Movss VR256:$src1, VR256:$src2)),
675 (SUBREG_TO_REG (i32 0),
676 (VMOVSSrr (EXTRACT_SUBREG (v8f32 VR256:$src1), sub_xmm),
677 (EXTRACT_SUBREG (v8f32 VR256:$src2), sub_xmm)),
680 // Shuffle with VMOVSD
681 def : Pat<(v2i64 (X86Movsd VR128:$src1, VR128:$src2)),
682 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
683 def : Pat<(v2f64 (X86Movsd VR128:$src1, VR128:$src2)),
684 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
685 def : Pat<(v4f32 (X86Movsd VR128:$src1, VR128:$src2)),
686 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
687 def : Pat<(v4i32 (X86Movsd VR128:$src1, VR128:$src2)),
688 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
691 def : Pat<(v4i64 (X86Movsd VR256:$src1, VR256:$src2)),
692 (SUBREG_TO_REG (i32 0),
693 (VMOVSDrr (EXTRACT_SUBREG (v4i64 VR256:$src1), sub_xmm),
694 (EXTRACT_SUBREG (v4i64 VR256:$src2), sub_xmm)),
696 def : Pat<(v4f64 (X86Movsd VR256:$src1, VR256:$src2)),
697 (SUBREG_TO_REG (i32 0),
698 (VMOVSDrr (EXTRACT_SUBREG (v4f64 VR256:$src1), sub_xmm),
699 (EXTRACT_SUBREG (v4f64 VR256:$src2), sub_xmm)),
702 // FIXME: Instead of a X86Movlps there should be a X86Movsd here, the problem
703 // is during lowering, where it's not possible to recognize the fold cause
704 // it has two uses through a bitcast. One use disappears at isel time and the
705 // fold opportunity reappears.
706 def : Pat<(v2f64 (X86Movlpd VR128:$src1, VR128:$src2)),
707 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
708 def : Pat<(v2i64 (X86Movlpd VR128:$src1, VR128:$src2)),
709 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
710 def : Pat<(v4f32 (X86Movlps VR128:$src1, VR128:$src2)),
711 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
712 def : Pat<(v4i32 (X86Movlps VR128:$src1, VR128:$src2)),
713 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
716 let Predicates = [UseSSE1] in {
717 let Predicates = [NoSSE41], AddedComplexity = 15 in {
718 // Move scalar to XMM zero-extended, zeroing a VR128 then do a
719 // MOVSS to the lower bits.
720 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),
721 (MOVSSrr (v4f32 (V_SET0)), FR32:$src)>;
722 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
723 (MOVSSrr (v4f32 (V_SET0)), (COPY_TO_REGCLASS VR128:$src, FR32))>;
724 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
725 (MOVSSrr (v4i32 (V_SET0)), (COPY_TO_REGCLASS VR128:$src, FR32))>;
728 let AddedComplexity = 20 in {
729 // MOVSSrm already zeros the high parts of the register.
730 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
731 (COPY_TO_REGCLASS (MOVSSrm addr:$src), VR128)>;
732 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
733 (COPY_TO_REGCLASS (MOVSSrm addr:$src), VR128)>;
734 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
735 (COPY_TO_REGCLASS (MOVSSrm addr:$src), VR128)>;
738 // Extract and store.
739 def : Pat<(store (f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
741 (MOVSSmr addr:$dst, (COPY_TO_REGCLASS VR128:$src, FR32))>;
743 // Shuffle with MOVSS
744 def : Pat<(v4i32 (X86Movss VR128:$src1, VR128:$src2)),
745 (MOVSSrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR32))>;
746 def : Pat<(v4f32 (X86Movss VR128:$src1, VR128:$src2)),
747 (MOVSSrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR32))>;
750 let Predicates = [UseSSE2] in {
751 let Predicates = [NoSSE41], AddedComplexity = 15 in {
752 // Move scalar to XMM zero-extended, zeroing a VR128 then do a
753 // MOVSD to the lower bits.
754 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))),
755 (MOVSDrr (v2f64 (V_SET0)), FR64:$src)>;
758 let AddedComplexity = 20 in {
759 // MOVSDrm already zeros the high parts of the register.
760 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
761 (COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
762 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
763 (COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
764 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
765 (COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
766 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
767 (COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
768 def : Pat<(v2f64 (X86vzload addr:$src)),
769 (COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
772 // Extract and store.
773 def : Pat<(store (f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
775 (MOVSDmr addr:$dst, (COPY_TO_REGCLASS VR128:$src, FR64))>;
777 // Shuffle with MOVSD
778 def : Pat<(v2i64 (X86Movsd VR128:$src1, VR128:$src2)),
779 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
780 def : Pat<(v2f64 (X86Movsd VR128:$src1, VR128:$src2)),
781 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
782 def : Pat<(v4f32 (X86Movsd VR128:$src1, VR128:$src2)),
783 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
784 def : Pat<(v4i32 (X86Movsd VR128:$src1, VR128:$src2)),
785 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
787 // FIXME: Instead of a X86Movlps there should be a X86Movsd here, the problem
788 // is during lowering, where it's not possible to recognize the fold because
789 // it has two uses through a bitcast. One use disappears at isel time and the
790 // fold opportunity reappears.
791 def : Pat<(v2f64 (X86Movlpd VR128:$src1, VR128:$src2)),
792 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
793 def : Pat<(v2i64 (X86Movlpd VR128:$src1, VR128:$src2)),
794 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
795 def : Pat<(v4f32 (X86Movlps VR128:$src1, VR128:$src2)),
796 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
797 def : Pat<(v4i32 (X86Movlps VR128:$src1, VR128:$src2)),
798 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
801 //===----------------------------------------------------------------------===//
802 // SSE 1 & 2 - Move Aligned/Unaligned FP Instructions
803 //===----------------------------------------------------------------------===//
805 multiclass sse12_mov_packed<bits<8> opc, RegisterClass RC,
806 X86MemOperand x86memop, PatFrag ld_frag,
807 string asm, Domain d,
809 bit IsReMaterializable = 1> {
810 let hasSideEffects = 0 in
811 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
812 !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], itins.rr, d>,
813 Sched<[WriteFShuffle]>;
814 let canFoldAsLoad = 1, isReMaterializable = IsReMaterializable in
815 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
816 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
817 [(set RC:$dst, (ld_frag addr:$src))], itins.rm, d>,
821 let Predicates = [HasAVX, NoVLX] in {
822 defm VMOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
823 "movaps", SSEPackedSingle, SSE_MOVA_ITINS>,
825 defm VMOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
826 "movapd", SSEPackedDouble, SSE_MOVA_ITINS>,
828 defm VMOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
829 "movups", SSEPackedSingle, SSE_MOVU_ITINS>,
831 defm VMOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
832 "movupd", SSEPackedDouble, SSE_MOVU_ITINS, 0>,
835 defm VMOVAPSY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv8f32,
836 "movaps", SSEPackedSingle, SSE_MOVA_ITINS>,
838 defm VMOVAPDY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv4f64,
839 "movapd", SSEPackedDouble, SSE_MOVA_ITINS>,
841 defm VMOVUPSY : sse12_mov_packed<0x10, VR256, f256mem, loadv8f32,
842 "movups", SSEPackedSingle, SSE_MOVU_ITINS>,
844 defm VMOVUPDY : sse12_mov_packed<0x10, VR256, f256mem, loadv4f64,
845 "movupd", SSEPackedDouble, SSE_MOVU_ITINS, 0>,
849 let Predicates = [UseSSE1] in {
850 defm MOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
851 "movaps", SSEPackedSingle, SSE_MOVA_ITINS>,
853 defm MOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
854 "movups", SSEPackedSingle, SSE_MOVU_ITINS>,
857 let Predicates = [UseSSE2] in {
858 defm MOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
859 "movapd", SSEPackedDouble, SSE_MOVA_ITINS>,
861 defm MOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
862 "movupd", SSEPackedDouble, SSE_MOVU_ITINS, 0>,
866 let SchedRW = [WriteStore], Predicates = [HasAVX, NoVLX] in {
867 def VMOVAPSmr : VPSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
868 "movaps\t{$src, $dst|$dst, $src}",
869 [(alignedstore (v4f32 VR128:$src), addr:$dst)],
870 IIC_SSE_MOVA_P_MR>, VEX;
871 def VMOVAPDmr : VPDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
872 "movapd\t{$src, $dst|$dst, $src}",
873 [(alignedstore (v2f64 VR128:$src), addr:$dst)],
874 IIC_SSE_MOVA_P_MR>, VEX;
875 def VMOVUPSmr : VPSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
876 "movups\t{$src, $dst|$dst, $src}",
877 [(store (v4f32 VR128:$src), addr:$dst)],
878 IIC_SSE_MOVU_P_MR>, VEX;
879 def VMOVUPDmr : VPDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
880 "movupd\t{$src, $dst|$dst, $src}",
881 [(store (v2f64 VR128:$src), addr:$dst)],
882 IIC_SSE_MOVU_P_MR>, VEX;
883 def VMOVAPSYmr : VPSI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
884 "movaps\t{$src, $dst|$dst, $src}",
885 [(alignedstore256 (v8f32 VR256:$src), addr:$dst)],
886 IIC_SSE_MOVA_P_MR>, VEX, VEX_L;
887 def VMOVAPDYmr : VPDI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
888 "movapd\t{$src, $dst|$dst, $src}",
889 [(alignedstore256 (v4f64 VR256:$src), addr:$dst)],
890 IIC_SSE_MOVA_P_MR>, VEX, VEX_L;
891 def VMOVUPSYmr : VPSI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
892 "movups\t{$src, $dst|$dst, $src}",
893 [(store (v8f32 VR256:$src), addr:$dst)],
894 IIC_SSE_MOVU_P_MR>, VEX, VEX_L;
895 def VMOVUPDYmr : VPDI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
896 "movupd\t{$src, $dst|$dst, $src}",
897 [(store (v4f64 VR256:$src), addr:$dst)],
898 IIC_SSE_MOVU_P_MR>, VEX, VEX_L;
902 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
903 SchedRW = [WriteFShuffle] in {
904 def VMOVAPSrr_REV : VPSI<0x29, MRMDestReg, (outs VR128:$dst),
906 "movaps\t{$src, $dst|$dst, $src}", [],
907 IIC_SSE_MOVA_P_RR>, VEX;
908 def VMOVAPDrr_REV : VPDI<0x29, MRMDestReg, (outs VR128:$dst),
910 "movapd\t{$src, $dst|$dst, $src}", [],
911 IIC_SSE_MOVA_P_RR>, VEX;
912 def VMOVUPSrr_REV : VPSI<0x11, MRMDestReg, (outs VR128:$dst),
914 "movups\t{$src, $dst|$dst, $src}", [],
915 IIC_SSE_MOVU_P_RR>, VEX;
916 def VMOVUPDrr_REV : VPDI<0x11, MRMDestReg, (outs VR128:$dst),
918 "movupd\t{$src, $dst|$dst, $src}", [],
919 IIC_SSE_MOVU_P_RR>, VEX;
920 def VMOVAPSYrr_REV : VPSI<0x29, MRMDestReg, (outs VR256:$dst),
922 "movaps\t{$src, $dst|$dst, $src}", [],
923 IIC_SSE_MOVA_P_RR>, VEX, VEX_L;
924 def VMOVAPDYrr_REV : VPDI<0x29, MRMDestReg, (outs VR256:$dst),
926 "movapd\t{$src, $dst|$dst, $src}", [],
927 IIC_SSE_MOVA_P_RR>, VEX, VEX_L;
928 def VMOVUPSYrr_REV : VPSI<0x11, MRMDestReg, (outs VR256:$dst),
930 "movups\t{$src, $dst|$dst, $src}", [],
931 IIC_SSE_MOVU_P_RR>, VEX, VEX_L;
932 def VMOVUPDYrr_REV : VPDI<0x11, MRMDestReg, (outs VR256:$dst),
934 "movupd\t{$src, $dst|$dst, $src}", [],
935 IIC_SSE_MOVU_P_RR>, VEX, VEX_L;
938 let Predicates = [HasAVX] in {
939 def : Pat<(v8i32 (X86vzmovl
940 (insert_subvector undef, (v4i32 VR128:$src), (iPTR 0)))),
941 (SUBREG_TO_REG (i32 0), (VMOVAPSrr VR128:$src), sub_xmm)>;
942 def : Pat<(v4i64 (X86vzmovl
943 (insert_subvector undef, (v2i64 VR128:$src), (iPTR 0)))),
944 (SUBREG_TO_REG (i32 0), (VMOVAPSrr VR128:$src), sub_xmm)>;
945 def : Pat<(v8f32 (X86vzmovl
946 (insert_subvector undef, (v4f32 VR128:$src), (iPTR 0)))),
947 (SUBREG_TO_REG (i32 0), (VMOVAPSrr VR128:$src), sub_xmm)>;
948 def : Pat<(v4f64 (X86vzmovl
949 (insert_subvector undef, (v2f64 VR128:$src), (iPTR 0)))),
950 (SUBREG_TO_REG (i32 0), (VMOVAPSrr VR128:$src), sub_xmm)>;
954 def : Pat<(int_x86_avx_storeu_ps_256 addr:$dst, VR256:$src),
955 (VMOVUPSYmr addr:$dst, VR256:$src)>;
956 def : Pat<(int_x86_avx_storeu_pd_256 addr:$dst, VR256:$src),
957 (VMOVUPDYmr addr:$dst, VR256:$src)>;
959 let SchedRW = [WriteStore] in {
960 def MOVAPSmr : PSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
961 "movaps\t{$src, $dst|$dst, $src}",
962 [(alignedstore (v4f32 VR128:$src), addr:$dst)],
964 def MOVAPDmr : PDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
965 "movapd\t{$src, $dst|$dst, $src}",
966 [(alignedstore (v2f64 VR128:$src), addr:$dst)],
968 def MOVUPSmr : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
969 "movups\t{$src, $dst|$dst, $src}",
970 [(store (v4f32 VR128:$src), addr:$dst)],
972 def MOVUPDmr : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
973 "movupd\t{$src, $dst|$dst, $src}",
974 [(store (v2f64 VR128:$src), addr:$dst)],
979 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
980 SchedRW = [WriteFShuffle] in {
981 def MOVAPSrr_REV : PSI<0x29, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
982 "movaps\t{$src, $dst|$dst, $src}", [],
984 def MOVAPDrr_REV : PDI<0x29, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
985 "movapd\t{$src, $dst|$dst, $src}", [],
987 def MOVUPSrr_REV : PSI<0x11, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
988 "movups\t{$src, $dst|$dst, $src}", [],
990 def MOVUPDrr_REV : PDI<0x11, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
991 "movupd\t{$src, $dst|$dst, $src}", [],
995 let Predicates = [HasAVX] in {
996 def : Pat<(int_x86_sse_storeu_ps addr:$dst, VR128:$src),
997 (VMOVUPSmr addr:$dst, VR128:$src)>;
998 def : Pat<(int_x86_sse2_storeu_pd addr:$dst, VR128:$src),
999 (VMOVUPDmr addr:$dst, VR128:$src)>;
1002 let Predicates = [UseSSE1] in
1003 def : Pat<(int_x86_sse_storeu_ps addr:$dst, VR128:$src),
1004 (MOVUPSmr addr:$dst, VR128:$src)>;
1005 let Predicates = [UseSSE2] in
1006 def : Pat<(int_x86_sse2_storeu_pd addr:$dst, VR128:$src),
1007 (MOVUPDmr addr:$dst, VR128:$src)>;
1009 // Use vmovaps/vmovups for AVX integer load/store.
1010 let Predicates = [HasAVX, NoVLX] in {
1011 // 128-bit load/store
1012 def : Pat<(alignedloadv2i64 addr:$src),
1013 (VMOVAPSrm addr:$src)>;
1014 def : Pat<(loadv2i64 addr:$src),
1015 (VMOVUPSrm addr:$src)>;
1017 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
1018 (VMOVAPSmr addr:$dst, VR128:$src)>;
1019 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
1020 (VMOVAPSmr addr:$dst, VR128:$src)>;
1021 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
1022 (VMOVAPSmr addr:$dst, VR128:$src)>;
1023 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
1024 (VMOVAPSmr addr:$dst, VR128:$src)>;
1025 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
1026 (VMOVUPSmr addr:$dst, VR128:$src)>;
1027 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
1028 (VMOVUPSmr addr:$dst, VR128:$src)>;
1029 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
1030 (VMOVUPSmr addr:$dst, VR128:$src)>;
1031 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
1032 (VMOVUPSmr addr:$dst, VR128:$src)>;
1034 // 256-bit load/store
1035 def : Pat<(alignedloadv4i64 addr:$src),
1036 (VMOVAPSYrm addr:$src)>;
1037 def : Pat<(loadv4i64 addr:$src),
1038 (VMOVUPSYrm addr:$src)>;
1039 def : Pat<(alignedstore256 (v4i64 VR256:$src), addr:$dst),
1040 (VMOVAPSYmr addr:$dst, VR256:$src)>;
1041 def : Pat<(alignedstore256 (v8i32 VR256:$src), addr:$dst),
1042 (VMOVAPSYmr addr:$dst, VR256:$src)>;
1043 def : Pat<(alignedstore256 (v16i16 VR256:$src), addr:$dst),
1044 (VMOVAPSYmr addr:$dst, VR256:$src)>;
1045 def : Pat<(alignedstore256 (v32i8 VR256:$src), addr:$dst),
1046 (VMOVAPSYmr addr:$dst, VR256:$src)>;
1047 def : Pat<(store (v4i64 VR256:$src), addr:$dst),
1048 (VMOVUPSYmr addr:$dst, VR256:$src)>;
1049 def : Pat<(store (v8i32 VR256:$src), addr:$dst),
1050 (VMOVUPSYmr addr:$dst, VR256:$src)>;
1051 def : Pat<(store (v16i16 VR256:$src), addr:$dst),
1052 (VMOVUPSYmr addr:$dst, VR256:$src)>;
1053 def : Pat<(store (v32i8 VR256:$src), addr:$dst),
1054 (VMOVUPSYmr addr:$dst, VR256:$src)>;
1056 // Special patterns for storing subvector extracts of lower 128-bits
1057 // Its cheaper to just use VMOVAPS/VMOVUPS instead of VEXTRACTF128mr
1058 def : Pat<(alignedstore (v2f64 (extract_subvector
1059 (v4f64 VR256:$src), (iPTR 0))), addr:$dst),
1060 (VMOVAPDmr addr:$dst, (v2f64 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1061 def : Pat<(alignedstore (v4f32 (extract_subvector
1062 (v8f32 VR256:$src), (iPTR 0))), addr:$dst),
1063 (VMOVAPSmr addr:$dst, (v4f32 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1064 def : Pat<(alignedstore (v2i64 (extract_subvector
1065 (v4i64 VR256:$src), (iPTR 0))), addr:$dst),
1066 (VMOVAPDmr addr:$dst, (v2i64 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1067 def : Pat<(alignedstore (v4i32 (extract_subvector
1068 (v8i32 VR256:$src), (iPTR 0))), addr:$dst),
1069 (VMOVAPSmr addr:$dst, (v4i32 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1070 def : Pat<(alignedstore (v8i16 (extract_subvector
1071 (v16i16 VR256:$src), (iPTR 0))), addr:$dst),
1072 (VMOVAPSmr addr:$dst, (v8i16 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1073 def : Pat<(alignedstore (v16i8 (extract_subvector
1074 (v32i8 VR256:$src), (iPTR 0))), addr:$dst),
1075 (VMOVAPSmr addr:$dst, (v16i8 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1077 def : Pat<(store (v2f64 (extract_subvector
1078 (v4f64 VR256:$src), (iPTR 0))), addr:$dst),
1079 (VMOVUPDmr addr:$dst, (v2f64 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1080 def : Pat<(store (v4f32 (extract_subvector
1081 (v8f32 VR256:$src), (iPTR 0))), addr:$dst),
1082 (VMOVUPSmr addr:$dst, (v4f32 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1083 def : Pat<(store (v2i64 (extract_subvector
1084 (v4i64 VR256:$src), (iPTR 0))), addr:$dst),
1085 (VMOVUPDmr addr:$dst, (v2i64 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1086 def : Pat<(store (v4i32 (extract_subvector
1087 (v8i32 VR256:$src), (iPTR 0))), addr:$dst),
1088 (VMOVUPSmr addr:$dst, (v4i32 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1089 def : Pat<(store (v8i16 (extract_subvector
1090 (v16i16 VR256:$src), (iPTR 0))), addr:$dst),
1091 (VMOVUPSmr addr:$dst, (v8i16 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1092 def : Pat<(store (v16i8 (extract_subvector
1093 (v32i8 VR256:$src), (iPTR 0))), addr:$dst),
1094 (VMOVUPSmr addr:$dst, (v16i8 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1097 // Use movaps / movups for SSE integer load / store (one byte shorter).
1098 // The instructions selected below are then converted to MOVDQA/MOVDQU
1099 // during the SSE domain pass.
1100 let Predicates = [UseSSE1] in {
1101 def : Pat<(alignedloadv2i64 addr:$src),
1102 (MOVAPSrm addr:$src)>;
1103 def : Pat<(loadv2i64 addr:$src),
1104 (MOVUPSrm addr:$src)>;
1106 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
1107 (MOVAPSmr addr:$dst, VR128:$src)>;
1108 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
1109 (MOVAPSmr addr:$dst, VR128:$src)>;
1110 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
1111 (MOVAPSmr addr:$dst, VR128:$src)>;
1112 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
1113 (MOVAPSmr addr:$dst, VR128:$src)>;
1114 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
1115 (MOVUPSmr addr:$dst, VR128:$src)>;
1116 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
1117 (MOVUPSmr addr:$dst, VR128:$src)>;
1118 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
1119 (MOVUPSmr addr:$dst, VR128:$src)>;
1120 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
1121 (MOVUPSmr addr:$dst, VR128:$src)>;
1124 // Alias instruction to load FR32 or FR64 from f128mem using movaps. Upper
1125 // bits are disregarded. FIXME: Set encoding to pseudo!
1126 let canFoldAsLoad = 1, isReMaterializable = 1, SchedRW = [WriteLoad] in {
1127 let isCodeGenOnly = 1 in {
1128 def FsVMOVAPSrm : VPSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
1129 "movaps\t{$src, $dst|$dst, $src}",
1130 [(set FR32:$dst, (alignedloadfsf32 addr:$src))],
1131 IIC_SSE_MOVA_P_RM>, VEX;
1132 def FsVMOVAPDrm : VPDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
1133 "movapd\t{$src, $dst|$dst, $src}",
1134 [(set FR64:$dst, (alignedloadfsf64 addr:$src))],
1135 IIC_SSE_MOVA_P_RM>, VEX;
1136 def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
1137 "movaps\t{$src, $dst|$dst, $src}",
1138 [(set FR32:$dst, (alignedloadfsf32 addr:$src))],
1140 def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
1141 "movapd\t{$src, $dst|$dst, $src}",
1142 [(set FR64:$dst, (alignedloadfsf64 addr:$src))],
1147 //===----------------------------------------------------------------------===//
1148 // SSE 1 & 2 - Move Low packed FP Instructions
1149 //===----------------------------------------------------------------------===//
1151 multiclass sse12_mov_hilo_packed_base<bits<8>opc, SDNode psnode, SDNode pdnode,
1152 string base_opc, string asm_opr,
1153 InstrItinClass itin> {
1154 def PSrm : PI<opc, MRMSrcMem,
1155 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
1156 !strconcat(base_opc, "s", asm_opr),
1158 (psnode VR128:$src1,
1159 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))))],
1160 itin, SSEPackedSingle>, PS,
1161 Sched<[WriteFShuffleLd, ReadAfterLd]>;
1163 def PDrm : PI<opc, MRMSrcMem,
1164 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
1165 !strconcat(base_opc, "d", asm_opr),
1166 [(set VR128:$dst, (v2f64 (pdnode VR128:$src1,
1167 (scalar_to_vector (loadf64 addr:$src2)))))],
1168 itin, SSEPackedDouble>, PD,
1169 Sched<[WriteFShuffleLd, ReadAfterLd]>;
1173 multiclass sse12_mov_hilo_packed<bits<8>opc, SDNode psnode, SDNode pdnode,
1174 string base_opc, InstrItinClass itin> {
1175 let Predicates = [UseAVX] in
1176 defm V#NAME : sse12_mov_hilo_packed_base<opc, psnode, pdnode, base_opc,
1177 "\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1180 let Constraints = "$src1 = $dst" in
1181 defm NAME : sse12_mov_hilo_packed_base<opc, psnode, pdnode, base_opc,
1182 "\t{$src2, $dst|$dst, $src2}",
1186 let AddedComplexity = 20 in {
1187 defm MOVL : sse12_mov_hilo_packed<0x12, X86Movlps, X86Movlpd, "movlp",
1191 let SchedRW = [WriteStore] in {
1192 let Predicates = [UseAVX] in {
1193 def VMOVLPSmr : VPSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1194 "movlps\t{$src, $dst|$dst, $src}",
1195 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
1196 (iPTR 0))), addr:$dst)],
1197 IIC_SSE_MOV_LH>, VEX;
1198 def VMOVLPDmr : VPDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1199 "movlpd\t{$src, $dst|$dst, $src}",
1200 [(store (f64 (vector_extract (v2f64 VR128:$src),
1201 (iPTR 0))), addr:$dst)],
1202 IIC_SSE_MOV_LH>, VEX;
1204 def MOVLPSmr : PSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1205 "movlps\t{$src, $dst|$dst, $src}",
1206 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
1207 (iPTR 0))), addr:$dst)],
1209 def MOVLPDmr : PDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1210 "movlpd\t{$src, $dst|$dst, $src}",
1211 [(store (f64 (vector_extract (v2f64 VR128:$src),
1212 (iPTR 0))), addr:$dst)],
1216 let Predicates = [UseAVX] in {
1217 // Shuffle with VMOVLPS
1218 def : Pat<(v4f32 (X86Movlps VR128:$src1, (load addr:$src2))),
1219 (VMOVLPSrm VR128:$src1, addr:$src2)>;
1220 def : Pat<(v4i32 (X86Movlps VR128:$src1, (load addr:$src2))),
1221 (VMOVLPSrm VR128:$src1, addr:$src2)>;
1223 // Shuffle with VMOVLPD
1224 def : Pat<(v2f64 (X86Movlpd VR128:$src1, (load addr:$src2))),
1225 (VMOVLPDrm VR128:$src1, addr:$src2)>;
1226 def : Pat<(v2i64 (X86Movlpd VR128:$src1, (load addr:$src2))),
1227 (VMOVLPDrm VR128:$src1, addr:$src2)>;
1228 def : Pat<(v2f64 (X86Movsd VR128:$src1,
1229 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
1230 (VMOVLPDrm VR128:$src1, addr:$src2)>;
1233 def : Pat<(store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)),
1235 (VMOVLPSmr addr:$src1, VR128:$src2)>;
1236 def : Pat<(store (v4i32 (X86Movlps
1237 (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)), addr:$src1),
1238 (VMOVLPSmr addr:$src1, VR128:$src2)>;
1239 def : Pat<(store (v2f64 (X86Movlpd (load addr:$src1), VR128:$src2)),
1241 (VMOVLPDmr addr:$src1, VR128:$src2)>;
1242 def : Pat<(store (v2i64 (X86Movlpd (load addr:$src1), VR128:$src2)),
1244 (VMOVLPDmr addr:$src1, VR128:$src2)>;
1247 let Predicates = [UseSSE1] in {
1248 // (store (vector_shuffle (load addr), v2, <4, 5, 2, 3>), addr) using MOVLPS
1249 def : Pat<(store (i64 (vector_extract (bc_v2i64 (v4f32 VR128:$src2)),
1250 (iPTR 0))), addr:$src1),
1251 (MOVLPSmr addr:$src1, VR128:$src2)>;
1253 // Shuffle with MOVLPS
1254 def : Pat<(v4f32 (X86Movlps VR128:$src1, (load addr:$src2))),
1255 (MOVLPSrm VR128:$src1, addr:$src2)>;
1256 def : Pat<(v4i32 (X86Movlps VR128:$src1, (load addr:$src2))),
1257 (MOVLPSrm VR128:$src1, addr:$src2)>;
1258 def : Pat<(X86Movlps VR128:$src1,
1259 (bc_v4f32 (v2i64 (scalar_to_vector (loadi64 addr:$src2))))),
1260 (MOVLPSrm VR128:$src1, addr:$src2)>;
1263 def : Pat<(store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)),
1265 (MOVLPSmr addr:$src1, VR128:$src2)>;
1266 def : Pat<(store (v4i32 (X86Movlps
1267 (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)),
1269 (MOVLPSmr addr:$src1, VR128:$src2)>;
1272 let Predicates = [UseSSE2] in {
1273 // Shuffle with MOVLPD
1274 def : Pat<(v2f64 (X86Movlpd VR128:$src1, (load addr:$src2))),
1275 (MOVLPDrm VR128:$src1, addr:$src2)>;
1276 def : Pat<(v2i64 (X86Movlpd VR128:$src1, (load addr:$src2))),
1277 (MOVLPDrm VR128:$src1, addr:$src2)>;
1278 def : Pat<(v2f64 (X86Movsd VR128:$src1,
1279 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
1280 (MOVLPDrm VR128:$src1, addr:$src2)>;
1283 def : Pat<(store (v2f64 (X86Movlpd (load addr:$src1), VR128:$src2)),
1285 (MOVLPDmr addr:$src1, VR128:$src2)>;
1286 def : Pat<(store (v2i64 (X86Movlpd (load addr:$src1), VR128:$src2)),
1288 (MOVLPDmr addr:$src1, VR128:$src2)>;
1291 //===----------------------------------------------------------------------===//
1292 // SSE 1 & 2 - Move Hi packed FP Instructions
1293 //===----------------------------------------------------------------------===//
1295 let AddedComplexity = 20 in {
1296 defm MOVH : sse12_mov_hilo_packed<0x16, X86Movlhps, X86Movlhpd, "movhp",
1300 let SchedRW = [WriteStore] in {
1301 // v2f64 extract element 1 is always custom lowered to unpack high to low
1302 // and extract element 0 so the non-store version isn't too horrible.
1303 let Predicates = [UseAVX] in {
1304 def VMOVHPSmr : VPSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1305 "movhps\t{$src, $dst|$dst, $src}",
1306 [(store (f64 (vector_extract
1307 (X86Unpckh (bc_v2f64 (v4f32 VR128:$src)),
1308 (bc_v2f64 (v4f32 VR128:$src))),
1309 (iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>, VEX;
1310 def VMOVHPDmr : VPDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1311 "movhpd\t{$src, $dst|$dst, $src}",
1312 [(store (f64 (vector_extract
1313 (v2f64 (X86Unpckh VR128:$src, VR128:$src)),
1314 (iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>, VEX;
1316 def MOVHPSmr : PSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1317 "movhps\t{$src, $dst|$dst, $src}",
1318 [(store (f64 (vector_extract
1319 (X86Unpckh (bc_v2f64 (v4f32 VR128:$src)),
1320 (bc_v2f64 (v4f32 VR128:$src))),
1321 (iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>;
1322 def MOVHPDmr : PDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1323 "movhpd\t{$src, $dst|$dst, $src}",
1324 [(store (f64 (vector_extract
1325 (v2f64 (X86Unpckh VR128:$src, VR128:$src)),
1326 (iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>;
1329 let Predicates = [UseAVX] in {
1331 def : Pat<(X86Movlhps VR128:$src1,
1332 (bc_v4f32 (v2i64 (scalar_to_vector (loadi64 addr:$src2))))),
1333 (VMOVHPSrm VR128:$src1, addr:$src2)>;
1334 def : Pat<(X86Movlhps VR128:$src1,
1335 (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
1336 (VMOVHPSrm VR128:$src1, addr:$src2)>;
1340 // FIXME: Instead of X86Unpckl, there should be a X86Movlhpd here, the problem
1341 // is during lowering, where it's not possible to recognize the load fold
1342 // cause it has two uses through a bitcast. One use disappears at isel time
1343 // and the fold opportunity reappears.
1344 def : Pat<(v2f64 (X86Unpckl VR128:$src1,
1345 (scalar_to_vector (loadf64 addr:$src2)))),
1346 (VMOVHPDrm VR128:$src1, addr:$src2)>;
1347 // Also handle an i64 load because that may get selected as a faster way to
1349 def : Pat<(v2f64 (X86Unpckl VR128:$src1,
1350 (bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src2)))))),
1351 (VMOVHPDrm VR128:$src1, addr:$src2)>;
1353 def : Pat<(store (f64 (vector_extract
1354 (v2f64 (X86VPermilpi VR128:$src, (i8 1))),
1355 (iPTR 0))), addr:$dst),
1356 (VMOVHPDmr addr:$dst, VR128:$src)>;
1359 let Predicates = [UseSSE1] in {
1361 def : Pat<(X86Movlhps VR128:$src1,
1362 (bc_v4f32 (v2i64 (scalar_to_vector (loadi64 addr:$src2))))),
1363 (MOVHPSrm VR128:$src1, addr:$src2)>;
1364 def : Pat<(X86Movlhps VR128:$src1,
1365 (bc_v4f32 (v2i64 (X86vzload addr:$src2)))),
1366 (MOVHPSrm VR128:$src1, addr:$src2)>;
1369 let Predicates = [UseSSE2] in {
1372 // FIXME: Instead of X86Unpckl, there should be a X86Movlhpd here, the problem
1373 // is during lowering, where it's not possible to recognize the load fold
1374 // cause it has two uses through a bitcast. One use disappears at isel time
1375 // and the fold opportunity reappears.
1376 def : Pat<(v2f64 (X86Unpckl VR128:$src1,
1377 (scalar_to_vector (loadf64 addr:$src2)))),
1378 (MOVHPDrm VR128:$src1, addr:$src2)>;
1379 // Also handle an i64 load because that may get selected as a faster way to
1381 def : Pat<(v2f64 (X86Unpckl VR128:$src1,
1382 (bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src2)))))),
1383 (MOVHPDrm VR128:$src1, addr:$src2)>;
1385 def : Pat<(store (f64 (vector_extract
1386 (v2f64 (X86Shufp VR128:$src, VR128:$src, (i8 1))),
1387 (iPTR 0))), addr:$dst),
1388 (MOVHPDmr addr:$dst, VR128:$src)>;
1391 //===----------------------------------------------------------------------===//
1392 // SSE 1 & 2 - Move Low to High and High to Low packed FP Instructions
1393 //===----------------------------------------------------------------------===//
1395 let AddedComplexity = 20, Predicates = [UseAVX] in {
1396 def VMOVLHPSrr : VPSI<0x16, MRMSrcReg, (outs VR128:$dst),
1397 (ins VR128:$src1, VR128:$src2),
1398 "movlhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1400 (v4f32 (X86Movlhps VR128:$src1, VR128:$src2)))],
1402 VEX_4V, Sched<[WriteFShuffle]>;
1403 def VMOVHLPSrr : VPSI<0x12, MRMSrcReg, (outs VR128:$dst),
1404 (ins VR128:$src1, VR128:$src2),
1405 "movhlps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1407 (v4f32 (X86Movhlps VR128:$src1, VR128:$src2)))],
1409 VEX_4V, Sched<[WriteFShuffle]>;
1411 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
1412 def MOVLHPSrr : PSI<0x16, MRMSrcReg, (outs VR128:$dst),
1413 (ins VR128:$src1, VR128:$src2),
1414 "movlhps\t{$src2, $dst|$dst, $src2}",
1416 (v4f32 (X86Movlhps VR128:$src1, VR128:$src2)))],
1417 IIC_SSE_MOV_LH>, Sched<[WriteFShuffle]>;
1418 def MOVHLPSrr : PSI<0x12, MRMSrcReg, (outs VR128:$dst),
1419 (ins VR128:$src1, VR128:$src2),
1420 "movhlps\t{$src2, $dst|$dst, $src2}",
1422 (v4f32 (X86Movhlps VR128:$src1, VR128:$src2)))],
1423 IIC_SSE_MOV_LH>, Sched<[WriteFShuffle]>;
1426 let Predicates = [UseAVX] in {
1428 def : Pat<(v4i32 (X86Movlhps VR128:$src1, VR128:$src2)),
1429 (VMOVLHPSrr VR128:$src1, VR128:$src2)>;
1430 def : Pat<(v2i64 (X86Movlhps VR128:$src1, VR128:$src2)),
1431 (VMOVLHPSrr (v2i64 VR128:$src1), VR128:$src2)>;
1434 def : Pat<(v4i32 (X86Movhlps VR128:$src1, VR128:$src2)),
1435 (VMOVHLPSrr VR128:$src1, VR128:$src2)>;
1438 let Predicates = [UseSSE1] in {
1440 def : Pat<(v4i32 (X86Movlhps VR128:$src1, VR128:$src2)),
1441 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
1442 def : Pat<(v2i64 (X86Movlhps VR128:$src1, VR128:$src2)),
1443 (MOVLHPSrr (v2i64 VR128:$src1), VR128:$src2)>;
1446 def : Pat<(v4i32 (X86Movhlps VR128:$src1, VR128:$src2)),
1447 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
1450 //===----------------------------------------------------------------------===//
1451 // SSE 1 & 2 - Conversion Instructions
1452 //===----------------------------------------------------------------------===//
1454 def SSE_CVT_PD : OpndItins<
1455 IIC_SSE_CVT_PD_RR, IIC_SSE_CVT_PD_RM
1458 let Sched = WriteCvtI2F in
1459 def SSE_CVT_PS : OpndItins<
1460 IIC_SSE_CVT_PS_RR, IIC_SSE_CVT_PS_RM
1463 let Sched = WriteCvtI2F in
1464 def SSE_CVT_Scalar : OpndItins<
1465 IIC_SSE_CVT_Scalar_RR, IIC_SSE_CVT_Scalar_RM
1468 let Sched = WriteCvtF2I in
1469 def SSE_CVT_SS2SI_32 : OpndItins<
1470 IIC_SSE_CVT_SS2SI32_RR, IIC_SSE_CVT_SS2SI32_RM
1473 let Sched = WriteCvtF2I in
1474 def SSE_CVT_SS2SI_64 : OpndItins<
1475 IIC_SSE_CVT_SS2SI64_RR, IIC_SSE_CVT_SS2SI64_RM
1478 let Sched = WriteCvtF2I in
1479 def SSE_CVT_SD2SI : OpndItins<
1480 IIC_SSE_CVT_SD2SI_RR, IIC_SSE_CVT_SD2SI_RM
1483 multiclass sse12_cvt_s<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
1484 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
1485 string asm, OpndItins itins> {
1486 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
1487 [(set DstRC:$dst, (OpNode SrcRC:$src))],
1488 itins.rr>, Sched<[itins.Sched]>;
1489 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
1490 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))],
1491 itins.rm>, Sched<[itins.Sched.Folded]>;
1494 multiclass sse12_cvt_p<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
1495 X86MemOperand x86memop, string asm, Domain d,
1497 let hasSideEffects = 0 in {
1498 def rr : I<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
1499 [], itins.rr, d>, Sched<[itins.Sched]>;
1501 def rm : I<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
1502 [], itins.rm, d>, Sched<[itins.Sched.Folded]>;
1506 multiclass sse12_vcvt_avx<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
1507 X86MemOperand x86memop, string asm> {
1508 let hasSideEffects = 0, Predicates = [UseAVX] in {
1509 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src),
1510 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>,
1511 Sched<[WriteCvtI2F]>;
1513 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
1514 (ins DstRC:$src1, x86memop:$src),
1515 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>,
1516 Sched<[WriteCvtI2FLd, ReadAfterLd]>;
1517 } // hasSideEffects = 0
1520 let Predicates = [UseAVX] in {
1521 defm VCVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
1522 "cvttss2si\t{$src, $dst|$dst, $src}",
1525 defm VCVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
1526 "cvttss2si\t{$src, $dst|$dst, $src}",
1528 XS, VEX, VEX_W, VEX_LIG;
1529 defm VCVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
1530 "cvttsd2si\t{$src, $dst|$dst, $src}",
1533 defm VCVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
1534 "cvttsd2si\t{$src, $dst|$dst, $src}",
1536 XD, VEX, VEX_W, VEX_LIG;
1538 def : InstAlias<"vcvttss2si{l}\t{$src, $dst|$dst, $src}",
1539 (VCVTTSS2SIrr GR32:$dst, FR32:$src), 0>;
1540 def : InstAlias<"vcvttss2si{l}\t{$src, $dst|$dst, $src}",
1541 (VCVTTSS2SIrm GR32:$dst, f32mem:$src), 0>;
1542 def : InstAlias<"vcvttsd2si{l}\t{$src, $dst|$dst, $src}",
1543 (VCVTTSD2SIrr GR32:$dst, FR64:$src), 0>;
1544 def : InstAlias<"vcvttsd2si{l}\t{$src, $dst|$dst, $src}",
1545 (VCVTTSD2SIrm GR32:$dst, f64mem:$src), 0>;
1546 def : InstAlias<"vcvttss2si{q}\t{$src, $dst|$dst, $src}",
1547 (VCVTTSS2SI64rr GR64:$dst, FR32:$src), 0>;
1548 def : InstAlias<"vcvttss2si{q}\t{$src, $dst|$dst, $src}",
1549 (VCVTTSS2SI64rm GR64:$dst, f32mem:$src), 0>;
1550 def : InstAlias<"vcvttsd2si{q}\t{$src, $dst|$dst, $src}",
1551 (VCVTTSD2SI64rr GR64:$dst, FR64:$src), 0>;
1552 def : InstAlias<"vcvttsd2si{q}\t{$src, $dst|$dst, $src}",
1553 (VCVTTSD2SI64rm GR64:$dst, f64mem:$src), 0>;
1555 // The assembler can recognize rr 64-bit instructions by seeing a rxx
1556 // register, but the same isn't true when only using memory operands,
1557 // provide other assembly "l" and "q" forms to address this explicitly
1558 // where appropriate to do so.
1559 defm VCVTSI2SS : sse12_vcvt_avx<0x2A, GR32, FR32, i32mem, "cvtsi2ss{l}">,
1560 XS, VEX_4V, VEX_LIG;
1561 defm VCVTSI2SS64 : sse12_vcvt_avx<0x2A, GR64, FR32, i64mem, "cvtsi2ss{q}">,
1562 XS, VEX_4V, VEX_W, VEX_LIG;
1563 defm VCVTSI2SD : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd{l}">,
1564 XD, VEX_4V, VEX_LIG;
1565 defm VCVTSI2SD64 : sse12_vcvt_avx<0x2A, GR64, FR64, i64mem, "cvtsi2sd{q}">,
1566 XD, VEX_4V, VEX_W, VEX_LIG;
1568 let Predicates = [UseAVX] in {
1569 def : InstAlias<"vcvtsi2ss\t{$src, $src1, $dst|$dst, $src1, $src}",
1570 (VCVTSI2SSrm FR64:$dst, FR64:$src1, i32mem:$src), 0>;
1571 def : InstAlias<"vcvtsi2sd\t{$src, $src1, $dst|$dst, $src1, $src}",
1572 (VCVTSI2SDrm FR64:$dst, FR64:$src1, i32mem:$src), 0>;
1574 def : Pat<(f32 (sint_to_fp (loadi32 addr:$src))),
1575 (VCVTSI2SSrm (f32 (IMPLICIT_DEF)), addr:$src)>;
1576 def : Pat<(f32 (sint_to_fp (loadi64 addr:$src))),
1577 (VCVTSI2SS64rm (f32 (IMPLICIT_DEF)), addr:$src)>;
1578 def : Pat<(f64 (sint_to_fp (loadi32 addr:$src))),
1579 (VCVTSI2SDrm (f64 (IMPLICIT_DEF)), addr:$src)>;
1580 def : Pat<(f64 (sint_to_fp (loadi64 addr:$src))),
1581 (VCVTSI2SD64rm (f64 (IMPLICIT_DEF)), addr:$src)>;
1583 def : Pat<(f32 (sint_to_fp GR32:$src)),
1584 (VCVTSI2SSrr (f32 (IMPLICIT_DEF)), GR32:$src)>;
1585 def : Pat<(f32 (sint_to_fp GR64:$src)),
1586 (VCVTSI2SS64rr (f32 (IMPLICIT_DEF)), GR64:$src)>;
1587 def : Pat<(f64 (sint_to_fp GR32:$src)),
1588 (VCVTSI2SDrr (f64 (IMPLICIT_DEF)), GR32:$src)>;
1589 def : Pat<(f64 (sint_to_fp GR64:$src)),
1590 (VCVTSI2SD64rr (f64 (IMPLICIT_DEF)), GR64:$src)>;
1593 defm CVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
1594 "cvttss2si\t{$src, $dst|$dst, $src}",
1595 SSE_CVT_SS2SI_32>, XS;
1596 defm CVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
1597 "cvttss2si\t{$src, $dst|$dst, $src}",
1598 SSE_CVT_SS2SI_64>, XS, REX_W;
1599 defm CVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
1600 "cvttsd2si\t{$src, $dst|$dst, $src}",
1602 defm CVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
1603 "cvttsd2si\t{$src, $dst|$dst, $src}",
1604 SSE_CVT_SD2SI>, XD, REX_W;
1605 defm CVTSI2SS : sse12_cvt_s<0x2A, GR32, FR32, sint_to_fp, i32mem, loadi32,
1606 "cvtsi2ss{l}\t{$src, $dst|$dst, $src}",
1607 SSE_CVT_Scalar>, XS;
1608 defm CVTSI2SS64 : sse12_cvt_s<0x2A, GR64, FR32, sint_to_fp, i64mem, loadi64,
1609 "cvtsi2ss{q}\t{$src, $dst|$dst, $src}",
1610 SSE_CVT_Scalar>, XS, REX_W;
1611 defm CVTSI2SD : sse12_cvt_s<0x2A, GR32, FR64, sint_to_fp, i32mem, loadi32,
1612 "cvtsi2sd{l}\t{$src, $dst|$dst, $src}",
1613 SSE_CVT_Scalar>, XD;
1614 defm CVTSI2SD64 : sse12_cvt_s<0x2A, GR64, FR64, sint_to_fp, i64mem, loadi64,
1615 "cvtsi2sd{q}\t{$src, $dst|$dst, $src}",
1616 SSE_CVT_Scalar>, XD, REX_W;
1618 def : InstAlias<"cvttss2si{l}\t{$src, $dst|$dst, $src}",
1619 (CVTTSS2SIrr GR32:$dst, FR32:$src), 0>;
1620 def : InstAlias<"cvttss2si{l}\t{$src, $dst|$dst, $src}",
1621 (CVTTSS2SIrm GR32:$dst, f32mem:$src), 0>;
1622 def : InstAlias<"cvttsd2si{l}\t{$src, $dst|$dst, $src}",
1623 (CVTTSD2SIrr GR32:$dst, FR64:$src), 0>;
1624 def : InstAlias<"cvttsd2si{l}\t{$src, $dst|$dst, $src}",
1625 (CVTTSD2SIrm GR32:$dst, f64mem:$src), 0>;
1626 def : InstAlias<"cvttss2si{q}\t{$src, $dst|$dst, $src}",
1627 (CVTTSS2SI64rr GR64:$dst, FR32:$src), 0>;
1628 def : InstAlias<"cvttss2si{q}\t{$src, $dst|$dst, $src}",
1629 (CVTTSS2SI64rm GR64:$dst, f32mem:$src), 0>;
1630 def : InstAlias<"cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1631 (CVTTSD2SI64rr GR64:$dst, FR64:$src), 0>;
1632 def : InstAlias<"cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1633 (CVTTSD2SI64rm GR64:$dst, f64mem:$src), 0>;
1635 def : InstAlias<"cvtsi2ss\t{$src, $dst|$dst, $src}",
1636 (CVTSI2SSrm FR64:$dst, i32mem:$src), 0>;
1637 def : InstAlias<"cvtsi2sd\t{$src, $dst|$dst, $src}",
1638 (CVTSI2SDrm FR64:$dst, i32mem:$src), 0>;
1640 // Conversion Instructions Intrinsics - Match intrinsics which expect MM
1641 // and/or XMM operand(s).
1643 multiclass sse12_cvt_sint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
1644 Intrinsic Int, Operand memop, ComplexPattern mem_cpat,
1645 string asm, OpndItins itins> {
1646 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
1647 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
1648 [(set DstRC:$dst, (Int SrcRC:$src))], itins.rr>,
1649 Sched<[itins.Sched]>;
1650 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins memop:$src),
1651 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
1652 [(set DstRC:$dst, (Int mem_cpat:$src))], itins.rm>,
1653 Sched<[itins.Sched.Folded]>;
1656 multiclass sse12_cvt_sint_3addr<bits<8> opc, RegisterClass SrcRC,
1657 RegisterClass DstRC, Intrinsic Int, X86MemOperand x86memop,
1658 PatFrag ld_frag, string asm, OpndItins itins,
1660 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src2),
1662 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
1663 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
1664 [(set DstRC:$dst, (Int DstRC:$src1, SrcRC:$src2))],
1665 itins.rr>, Sched<[itins.Sched]>;
1666 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
1667 (ins DstRC:$src1, x86memop:$src2),
1669 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
1670 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
1671 [(set DstRC:$dst, (Int DstRC:$src1, (ld_frag addr:$src2)))],
1672 itins.rm>, Sched<[itins.Sched.Folded, ReadAfterLd]>;
1675 let Predicates = [UseAVX] in {
1676 defm VCVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32,
1677 int_x86_sse2_cvtsd2si, sdmem, sse_load_f64, "cvtsd2si",
1678 SSE_CVT_SD2SI>, XD, VEX, VEX_LIG;
1679 defm VCVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64,
1680 int_x86_sse2_cvtsd2si64, sdmem, sse_load_f64, "cvtsd2si",
1681 SSE_CVT_SD2SI>, XD, VEX, VEX_W, VEX_LIG;
1683 defm CVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
1684 sdmem, sse_load_f64, "cvtsd2si", SSE_CVT_SD2SI>, XD;
1685 defm CVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse2_cvtsd2si64,
1686 sdmem, sse_load_f64, "cvtsd2si", SSE_CVT_SD2SI>, XD, REX_W;
1689 let isCodeGenOnly = 1 in {
1690 let Predicates = [UseAVX] in {
1691 defm Int_VCVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
1692 int_x86_sse_cvtsi2ss, i32mem, loadi32, "cvtsi2ss{l}",
1693 SSE_CVT_Scalar, 0>, XS, VEX_4V;
1694 defm Int_VCVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
1695 int_x86_sse_cvtsi642ss, i64mem, loadi64, "cvtsi2ss{q}",
1696 SSE_CVT_Scalar, 0>, XS, VEX_4V,
1698 defm Int_VCVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
1699 int_x86_sse2_cvtsi2sd, i32mem, loadi32, "cvtsi2sd{l}",
1700 SSE_CVT_Scalar, 0>, XD, VEX_4V;
1701 defm Int_VCVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
1702 int_x86_sse2_cvtsi642sd, i64mem, loadi64, "cvtsi2sd{q}",
1703 SSE_CVT_Scalar, 0>, XD,
1706 let Constraints = "$src1 = $dst" in {
1707 defm Int_CVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
1708 int_x86_sse_cvtsi2ss, i32mem, loadi32,
1709 "cvtsi2ss{l}", SSE_CVT_Scalar>, XS;
1710 defm Int_CVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
1711 int_x86_sse_cvtsi642ss, i64mem, loadi64,
1712 "cvtsi2ss{q}", SSE_CVT_Scalar>, XS, REX_W;
1713 defm Int_CVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
1714 int_x86_sse2_cvtsi2sd, i32mem, loadi32,
1715 "cvtsi2sd{l}", SSE_CVT_Scalar>, XD;
1716 defm Int_CVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
1717 int_x86_sse2_cvtsi642sd, i64mem, loadi64,
1718 "cvtsi2sd{q}", SSE_CVT_Scalar>, XD, REX_W;
1720 } // isCodeGenOnly = 1
1724 // Aliases for intrinsics
1725 let isCodeGenOnly = 1 in {
1726 let Predicates = [UseAVX] in {
1727 defm Int_VCVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
1728 ssmem, sse_load_f32, "cvttss2si",
1729 SSE_CVT_SS2SI_32>, XS, VEX;
1730 defm Int_VCVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
1731 int_x86_sse_cvttss2si64, ssmem, sse_load_f32,
1732 "cvttss2si", SSE_CVT_SS2SI_64>,
1734 defm Int_VCVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
1735 sdmem, sse_load_f64, "cvttsd2si",
1736 SSE_CVT_SD2SI>, XD, VEX;
1737 defm Int_VCVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
1738 int_x86_sse2_cvttsd2si64, sdmem, sse_load_f64,
1739 "cvttsd2si", SSE_CVT_SD2SI>,
1742 defm Int_CVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
1743 ssmem, sse_load_f32, "cvttss2si",
1744 SSE_CVT_SS2SI_32>, XS;
1745 defm Int_CVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
1746 int_x86_sse_cvttss2si64, ssmem, sse_load_f32,
1747 "cvttss2si", SSE_CVT_SS2SI_64>, XS, REX_W;
1748 defm Int_CVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
1749 sdmem, sse_load_f64, "cvttsd2si",
1751 defm Int_CVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
1752 int_x86_sse2_cvttsd2si64, sdmem, sse_load_f64,
1753 "cvttsd2si", SSE_CVT_SD2SI>, XD, REX_W;
1754 } // isCodeGenOnly = 1
1756 let Predicates = [UseAVX] in {
1757 defm VCVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
1758 ssmem, sse_load_f32, "cvtss2si",
1759 SSE_CVT_SS2SI_32>, XS, VEX, VEX_LIG;
1760 defm VCVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse_cvtss2si64,
1761 ssmem, sse_load_f32, "cvtss2si",
1762 SSE_CVT_SS2SI_64>, XS, VEX, VEX_W, VEX_LIG;
1764 defm CVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
1765 ssmem, sse_load_f32, "cvtss2si",
1766 SSE_CVT_SS2SI_32>, XS;
1767 defm CVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse_cvtss2si64,
1768 ssmem, sse_load_f32, "cvtss2si",
1769 SSE_CVT_SS2SI_64>, XS, REX_W;
1771 defm VCVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, i128mem,
1772 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
1773 SSEPackedSingle, SSE_CVT_PS>,
1774 PS, VEX, Requires<[HasAVX]>;
1775 defm VCVTDQ2PSY : sse12_cvt_p<0x5B, VR256, VR256, i256mem,
1776 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
1777 SSEPackedSingle, SSE_CVT_PS>,
1778 PS, VEX, VEX_L, Requires<[HasAVX]>;
1780 defm CVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, i128mem,
1781 "cvtdq2ps\t{$src, $dst|$dst, $src}",
1782 SSEPackedSingle, SSE_CVT_PS>,
1783 PS, Requires<[UseSSE2]>;
1785 let Predicates = [UseAVX] in {
1786 def : InstAlias<"vcvtss2si{l}\t{$src, $dst|$dst, $src}",
1787 (VCVTSS2SIrr GR32:$dst, VR128:$src), 0>;
1788 def : InstAlias<"vcvtss2si{l}\t{$src, $dst|$dst, $src}",
1789 (VCVTSS2SIrm GR32:$dst, ssmem:$src), 0>;
1790 def : InstAlias<"vcvtsd2si{l}\t{$src, $dst|$dst, $src}",
1791 (VCVTSD2SIrr GR32:$dst, VR128:$src), 0>;
1792 def : InstAlias<"vcvtsd2si{l}\t{$src, $dst|$dst, $src}",
1793 (VCVTSD2SIrm GR32:$dst, sdmem:$src), 0>;
1794 def : InstAlias<"vcvtss2si{q}\t{$src, $dst|$dst, $src}",
1795 (VCVTSS2SI64rr GR64:$dst, VR128:$src), 0>;
1796 def : InstAlias<"vcvtss2si{q}\t{$src, $dst|$dst, $src}",
1797 (VCVTSS2SI64rm GR64:$dst, ssmem:$src), 0>;
1798 def : InstAlias<"vcvtsd2si{q}\t{$src, $dst|$dst, $src}",
1799 (VCVTSD2SI64rr GR64:$dst, VR128:$src), 0>;
1800 def : InstAlias<"vcvtsd2si{q}\t{$src, $dst|$dst, $src}",
1801 (VCVTSD2SI64rm GR64:$dst, sdmem:$src), 0>;
1804 def : InstAlias<"cvtss2si{l}\t{$src, $dst|$dst, $src}",
1805 (CVTSS2SIrr GR32:$dst, VR128:$src), 0>;
1806 def : InstAlias<"cvtss2si{l}\t{$src, $dst|$dst, $src}",
1807 (CVTSS2SIrm GR32:$dst, ssmem:$src), 0>;
1808 def : InstAlias<"cvtsd2si{l}\t{$src, $dst|$dst, $src}",
1809 (CVTSD2SIrr GR32:$dst, VR128:$src), 0>;
1810 def : InstAlias<"cvtsd2si{l}\t{$src, $dst|$dst, $src}",
1811 (CVTSD2SIrm GR32:$dst, sdmem:$src), 0>;
1812 def : InstAlias<"cvtss2si{q}\t{$src, $dst|$dst, $src}",
1813 (CVTSS2SI64rr GR64:$dst, VR128:$src), 0>;
1814 def : InstAlias<"cvtss2si{q}\t{$src, $dst|$dst, $src}",
1815 (CVTSS2SI64rm GR64:$dst, ssmem:$src), 0>;
1816 def : InstAlias<"cvtsd2si{q}\t{$src, $dst|$dst, $src}",
1817 (CVTSD2SI64rr GR64:$dst, VR128:$src), 0>;
1818 def : InstAlias<"cvtsd2si{q}\t{$src, $dst|$dst, $src}",
1819 (CVTSD2SI64rm GR64:$dst, sdmem:$src)>;
1823 // Convert scalar double to scalar single
1824 let hasSideEffects = 0, Predicates = [UseAVX] in {
1825 def VCVTSD2SSrr : VSDI<0x5A, MRMSrcReg, (outs FR32:$dst),
1826 (ins FR64:$src1, FR64:$src2),
1827 "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", [],
1828 IIC_SSE_CVT_Scalar_RR>, VEX_4V, VEX_LIG,
1829 Sched<[WriteCvtF2F]>;
1831 def VCVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst),
1832 (ins FR64:$src1, f64mem:$src2),
1833 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1834 [], IIC_SSE_CVT_Scalar_RM>,
1835 XD, Requires<[HasAVX, OptForSize]>, VEX_4V, VEX_LIG,
1836 Sched<[WriteCvtF2FLd, ReadAfterLd]>;
1839 def : Pat<(f32 (fround FR64:$src)), (VCVTSD2SSrr FR64:$src, FR64:$src)>,
1842 def CVTSD2SSrr : SDI<0x5A, MRMSrcReg, (outs FR32:$dst), (ins FR64:$src),
1843 "cvtsd2ss\t{$src, $dst|$dst, $src}",
1844 [(set FR32:$dst, (fround FR64:$src))],
1845 IIC_SSE_CVT_Scalar_RR>, Sched<[WriteCvtF2F]>;
1846 def CVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src),
1847 "cvtsd2ss\t{$src, $dst|$dst, $src}",
1848 [(set FR32:$dst, (fround (loadf64 addr:$src)))],
1849 IIC_SSE_CVT_Scalar_RM>,
1851 Requires<[UseSSE2, OptForSize]>, Sched<[WriteCvtF2FLd]>;
1853 let isCodeGenOnly = 1 in {
1854 def Int_VCVTSD2SSrr: I<0x5A, MRMSrcReg,
1855 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1856 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1858 (int_x86_sse2_cvtsd2ss VR128:$src1, VR128:$src2))],
1859 IIC_SSE_CVT_Scalar_RR>, XD, VEX_4V, Requires<[HasAVX]>,
1860 Sched<[WriteCvtF2F]>;
1861 def Int_VCVTSD2SSrm: I<0x5A, MRMSrcReg,
1862 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2),
1863 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1864 [(set VR128:$dst, (int_x86_sse2_cvtsd2ss
1865 VR128:$src1, sse_load_f64:$src2))],
1866 IIC_SSE_CVT_Scalar_RM>, XD, VEX_4V, Requires<[HasAVX]>,
1867 Sched<[WriteCvtF2FLd, ReadAfterLd]>;
1869 let Constraints = "$src1 = $dst" in {
1870 def Int_CVTSD2SSrr: I<0x5A, MRMSrcReg,
1871 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1872 "cvtsd2ss\t{$src2, $dst|$dst, $src2}",
1874 (int_x86_sse2_cvtsd2ss VR128:$src1, VR128:$src2))],
1875 IIC_SSE_CVT_Scalar_RR>, XD, Requires<[UseSSE2]>,
1876 Sched<[WriteCvtF2F]>;
1877 def Int_CVTSD2SSrm: I<0x5A, MRMSrcReg,
1878 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2),
1879 "cvtsd2ss\t{$src2, $dst|$dst, $src2}",
1880 [(set VR128:$dst, (int_x86_sse2_cvtsd2ss
1881 VR128:$src1, sse_load_f64:$src2))],
1882 IIC_SSE_CVT_Scalar_RM>, XD, Requires<[UseSSE2]>,
1883 Sched<[WriteCvtF2FLd, ReadAfterLd]>;
1885 } // isCodeGenOnly = 1
1887 // Convert scalar single to scalar double
1888 // SSE2 instructions with XS prefix
1889 let hasSideEffects = 0, Predicates = [UseAVX] in {
1890 def VCVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst),
1891 (ins FR32:$src1, FR32:$src2),
1892 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1893 [], IIC_SSE_CVT_Scalar_RR>,
1894 XS, Requires<[HasAVX]>, VEX_4V, VEX_LIG,
1895 Sched<[WriteCvtF2F]>;
1897 def VCVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst),
1898 (ins FR32:$src1, f32mem:$src2),
1899 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1900 [], IIC_SSE_CVT_Scalar_RM>,
1901 XS, VEX_4V, VEX_LIG, Requires<[HasAVX, OptForSize]>,
1902 Sched<[WriteCvtF2FLd, ReadAfterLd]>;
1905 def : Pat<(f64 (fextend FR32:$src)),
1906 (VCVTSS2SDrr FR32:$src, FR32:$src)>, Requires<[UseAVX]>;
1907 def : Pat<(fextend (loadf32 addr:$src)),
1908 (VCVTSS2SDrm (f32 (IMPLICIT_DEF)), addr:$src)>, Requires<[UseAVX]>;
1910 def : Pat<(extloadf32 addr:$src),
1911 (VCVTSS2SDrm (f32 (IMPLICIT_DEF)), addr:$src)>,
1912 Requires<[UseAVX, OptForSize]>;
1913 def : Pat<(extloadf32 addr:$src),
1914 (VCVTSS2SDrr (f32 (IMPLICIT_DEF)), (VMOVSSrm addr:$src))>,
1915 Requires<[UseAVX, OptForSpeed]>;
1917 def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src),
1918 "cvtss2sd\t{$src, $dst|$dst, $src}",
1919 [(set FR64:$dst, (fextend FR32:$src))],
1920 IIC_SSE_CVT_Scalar_RR>, XS,
1921 Requires<[UseSSE2]>, Sched<[WriteCvtF2F]>;
1922 def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src),
1923 "cvtss2sd\t{$src, $dst|$dst, $src}",
1924 [(set FR64:$dst, (extloadf32 addr:$src))],
1925 IIC_SSE_CVT_Scalar_RM>, XS,
1926 Requires<[UseSSE2, OptForSize]>, Sched<[WriteCvtF2FLd]>;
1928 // extload f32 -> f64. This matches load+fextend because we have a hack in
1929 // the isel (PreprocessForFPConvert) that can introduce loads after dag
1931 // Since these loads aren't folded into the fextend, we have to match it
1933 def : Pat<(fextend (loadf32 addr:$src)),
1934 (CVTSS2SDrm addr:$src)>, Requires<[UseSSE2]>;
1935 def : Pat<(extloadf32 addr:$src),
1936 (CVTSS2SDrr (MOVSSrm addr:$src))>, Requires<[UseSSE2, OptForSpeed]>;
1938 let isCodeGenOnly = 1 in {
1939 def Int_VCVTSS2SDrr: I<0x5A, MRMSrcReg,
1940 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1941 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1943 (int_x86_sse2_cvtss2sd VR128:$src1, VR128:$src2))],
1944 IIC_SSE_CVT_Scalar_RR>, XS, VEX_4V, Requires<[HasAVX]>,
1945 Sched<[WriteCvtF2F]>;
1946 def Int_VCVTSS2SDrm: I<0x5A, MRMSrcMem,
1947 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2),
1948 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1950 (int_x86_sse2_cvtss2sd VR128:$src1, sse_load_f32:$src2))],
1951 IIC_SSE_CVT_Scalar_RM>, XS, VEX_4V, Requires<[HasAVX]>,
1952 Sched<[WriteCvtF2FLd, ReadAfterLd]>;
1953 let Constraints = "$src1 = $dst" in { // SSE2 instructions with XS prefix
1954 def Int_CVTSS2SDrr: I<0x5A, MRMSrcReg,
1955 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1956 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
1958 (int_x86_sse2_cvtss2sd VR128:$src1, VR128:$src2))],
1959 IIC_SSE_CVT_Scalar_RR>, XS, Requires<[UseSSE2]>,
1960 Sched<[WriteCvtF2F]>;
1961 def Int_CVTSS2SDrm: I<0x5A, MRMSrcMem,
1962 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2),
1963 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
1965 (int_x86_sse2_cvtss2sd VR128:$src1, sse_load_f32:$src2))],
1966 IIC_SSE_CVT_Scalar_RM>, XS, Requires<[UseSSE2]>,
1967 Sched<[WriteCvtF2FLd, ReadAfterLd]>;
1969 } // isCodeGenOnly = 1
1971 // Convert packed single/double fp to doubleword
1972 def VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1973 "cvtps2dq\t{$src, $dst|$dst, $src}",
1974 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))],
1975 IIC_SSE_CVT_PS_RR>, VEX, Sched<[WriteCvtF2I]>;
1976 def VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1977 "cvtps2dq\t{$src, $dst|$dst, $src}",
1979 (int_x86_sse2_cvtps2dq (loadv4f32 addr:$src)))],
1980 IIC_SSE_CVT_PS_RM>, VEX, Sched<[WriteCvtF2ILd]>;
1981 def VCVTPS2DQYrr : VPDI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1982 "cvtps2dq\t{$src, $dst|$dst, $src}",
1984 (int_x86_avx_cvt_ps2dq_256 VR256:$src))],
1985 IIC_SSE_CVT_PS_RR>, VEX, VEX_L, Sched<[WriteCvtF2I]>;
1986 def VCVTPS2DQYrm : VPDI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1987 "cvtps2dq\t{$src, $dst|$dst, $src}",
1989 (int_x86_avx_cvt_ps2dq_256 (loadv8f32 addr:$src)))],
1990 IIC_SSE_CVT_PS_RM>, VEX, VEX_L, Sched<[WriteCvtF2ILd]>;
1991 def CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1992 "cvtps2dq\t{$src, $dst|$dst, $src}",
1993 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))],
1994 IIC_SSE_CVT_PS_RR>, Sched<[WriteCvtF2I]>;
1995 def CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1996 "cvtps2dq\t{$src, $dst|$dst, $src}",
1998 (int_x86_sse2_cvtps2dq (memopv4f32 addr:$src)))],
1999 IIC_SSE_CVT_PS_RM>, Sched<[WriteCvtF2ILd]>;
2002 // Convert Packed Double FP to Packed DW Integers
2003 let Predicates = [HasAVX] in {
2004 // The assembler can recognize rr 256-bit instructions by seeing a ymm
2005 // register, but the same isn't true when using memory operands instead.
2006 // Provide other assembly rr and rm forms to address this explicitly.
2007 def VCVTPD2DQrr : SDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2008 "vcvtpd2dq\t{$src, $dst|$dst, $src}",
2009 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
2010 VEX, Sched<[WriteCvtF2I]>;
2013 def : InstAlias<"vcvtpd2dqx\t{$src, $dst|$dst, $src}",
2014 (VCVTPD2DQrr VR128:$dst, VR128:$src), 0>;
2015 def VCVTPD2DQXrm : SDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2016 "vcvtpd2dqx\t{$src, $dst|$dst, $src}",
2018 (int_x86_sse2_cvtpd2dq (loadv2f64 addr:$src)))]>, VEX,
2019 Sched<[WriteCvtF2ILd]>;
2022 def VCVTPD2DQYrr : SDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
2023 "vcvtpd2dq{y}\t{$src, $dst|$dst, $src}",
2025 (int_x86_avx_cvt_pd2dq_256 VR256:$src))]>, VEX, VEX_L,
2026 Sched<[WriteCvtF2I]>;
2027 def VCVTPD2DQYrm : SDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
2028 "vcvtpd2dq{y}\t{$src, $dst|$dst, $src}",
2030 (int_x86_avx_cvt_pd2dq_256 (loadv4f64 addr:$src)))]>,
2031 VEX, VEX_L, Sched<[WriteCvtF2ILd]>;
2032 def : InstAlias<"vcvtpd2dq\t{$src, $dst|$dst, $src}",
2033 (VCVTPD2DQYrr VR128:$dst, VR256:$src), 0>;
2036 def CVTPD2DQrm : SDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2037 "cvtpd2dq\t{$src, $dst|$dst, $src}",
2039 (int_x86_sse2_cvtpd2dq (memopv2f64 addr:$src)))],
2040 IIC_SSE_CVT_PD_RM>, Sched<[WriteCvtF2ILd]>;
2041 def CVTPD2DQrr : SDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2042 "cvtpd2dq\t{$src, $dst|$dst, $src}",
2043 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))],
2044 IIC_SSE_CVT_PD_RR>, Sched<[WriteCvtF2I]>;
2046 // Convert with truncation packed single/double fp to doubleword
2047 // SSE2 packed instructions with XS prefix
2048 def VCVTTPS2DQrr : VS2SI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2049 "cvttps2dq\t{$src, $dst|$dst, $src}",
2051 (int_x86_sse2_cvttps2dq VR128:$src))],
2052 IIC_SSE_CVT_PS_RR>, VEX, Sched<[WriteCvtF2I]>;
2053 def VCVTTPS2DQrm : VS2SI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2054 "cvttps2dq\t{$src, $dst|$dst, $src}",
2055 [(set VR128:$dst, (int_x86_sse2_cvttps2dq
2056 (loadv4f32 addr:$src)))],
2057 IIC_SSE_CVT_PS_RM>, VEX, Sched<[WriteCvtF2ILd]>;
2058 def VCVTTPS2DQYrr : VS2SI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2059 "cvttps2dq\t{$src, $dst|$dst, $src}",
2061 (int_x86_avx_cvtt_ps2dq_256 VR256:$src))],
2062 IIC_SSE_CVT_PS_RR>, VEX, VEX_L, Sched<[WriteCvtF2I]>;
2063 def VCVTTPS2DQYrm : VS2SI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
2064 "cvttps2dq\t{$src, $dst|$dst, $src}",
2065 [(set VR256:$dst, (int_x86_avx_cvtt_ps2dq_256
2066 (loadv8f32 addr:$src)))],
2067 IIC_SSE_CVT_PS_RM>, VEX, VEX_L,
2068 Sched<[WriteCvtF2ILd]>;
2070 def CVTTPS2DQrr : S2SI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2071 "cvttps2dq\t{$src, $dst|$dst, $src}",
2072 [(set VR128:$dst, (int_x86_sse2_cvttps2dq VR128:$src))],
2073 IIC_SSE_CVT_PS_RR>, Sched<[WriteCvtF2I]>;
2074 def CVTTPS2DQrm : S2SI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2075 "cvttps2dq\t{$src, $dst|$dst, $src}",
2077 (int_x86_sse2_cvttps2dq (memopv4f32 addr:$src)))],
2078 IIC_SSE_CVT_PS_RM>, Sched<[WriteCvtF2ILd]>;
2080 let Predicates = [HasAVX] in {
2081 def : Pat<(int_x86_sse2_cvtdq2ps VR128:$src),
2082 (VCVTDQ2PSrr VR128:$src)>;
2083 def : Pat<(int_x86_sse2_cvtdq2ps (bc_v4i32 (loadv2i64 addr:$src))),
2084 (VCVTDQ2PSrm addr:$src)>;
2087 let Predicates = [HasAVX, NoVLX] in {
2088 def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
2089 (VCVTDQ2PSrr VR128:$src)>;
2090 def : Pat<(v4f32 (sint_to_fp (bc_v4i32 (loadv2i64 addr:$src)))),
2091 (VCVTDQ2PSrm addr:$src)>;
2093 def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
2094 (VCVTTPS2DQrr VR128:$src)>;
2095 def : Pat<(v4i32 (fp_to_sint (loadv4f32 addr:$src))),
2096 (VCVTTPS2DQrm addr:$src)>;
2098 def : Pat<(v8f32 (sint_to_fp (v8i32 VR256:$src))),
2099 (VCVTDQ2PSYrr VR256:$src)>;
2100 def : Pat<(v8f32 (sint_to_fp (bc_v8i32 (loadv4i64 addr:$src)))),
2101 (VCVTDQ2PSYrm addr:$src)>;
2103 def : Pat<(v8i32 (fp_to_sint (v8f32 VR256:$src))),
2104 (VCVTTPS2DQYrr VR256:$src)>;
2105 def : Pat<(v8i32 (fp_to_sint (loadv8f32 addr:$src))),
2106 (VCVTTPS2DQYrm addr:$src)>;
2109 let Predicates = [UseSSE2] in {
2110 def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
2111 (CVTDQ2PSrr VR128:$src)>;
2112 def : Pat<(v4f32 (sint_to_fp (bc_v4i32 (memopv2i64 addr:$src)))),
2113 (CVTDQ2PSrm addr:$src)>;
2115 def : Pat<(int_x86_sse2_cvtdq2ps VR128:$src),
2116 (CVTDQ2PSrr VR128:$src)>;
2117 def : Pat<(int_x86_sse2_cvtdq2ps (bc_v4i32 (memopv2i64 addr:$src))),
2118 (CVTDQ2PSrm addr:$src)>;
2120 def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
2121 (CVTTPS2DQrr VR128:$src)>;
2122 def : Pat<(v4i32 (fp_to_sint (memopv4f32 addr:$src))),
2123 (CVTTPS2DQrm addr:$src)>;
2126 def VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2127 "cvttpd2dq\t{$src, $dst|$dst, $src}",
2129 (int_x86_sse2_cvttpd2dq VR128:$src))],
2130 IIC_SSE_CVT_PD_RR>, VEX, Sched<[WriteCvtF2I]>;
2132 // The assembler can recognize rr 256-bit instructions by seeing a ymm
2133 // register, but the same isn't true when using memory operands instead.
2134 // Provide other assembly rr and rm forms to address this explicitly.
2137 def : InstAlias<"vcvttpd2dqx\t{$src, $dst|$dst, $src}",
2138 (VCVTTPD2DQrr VR128:$dst, VR128:$src), 0>;
2139 def VCVTTPD2DQXrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2140 "cvttpd2dqx\t{$src, $dst|$dst, $src}",
2141 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
2142 (loadv2f64 addr:$src)))],
2143 IIC_SSE_CVT_PD_RM>, VEX, Sched<[WriteCvtF2ILd]>;
2146 def VCVTTPD2DQYrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
2147 "cvttpd2dq{y}\t{$src, $dst|$dst, $src}",
2149 (int_x86_avx_cvtt_pd2dq_256 VR256:$src))],
2150 IIC_SSE_CVT_PD_RR>, VEX, VEX_L, Sched<[WriteCvtF2I]>;
2151 def VCVTTPD2DQYrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
2152 "cvttpd2dq{y}\t{$src, $dst|$dst, $src}",
2154 (int_x86_avx_cvtt_pd2dq_256 (loadv4f64 addr:$src)))],
2155 IIC_SSE_CVT_PD_RM>, VEX, VEX_L, Sched<[WriteCvtF2ILd]>;
2156 def : InstAlias<"vcvttpd2dq\t{$src, $dst|$dst, $src}",
2157 (VCVTTPD2DQYrr VR128:$dst, VR256:$src), 0>;
2159 let Predicates = [HasAVX, NoVLX] in {
2160 def : Pat<(v4i32 (fp_to_sint (v4f64 VR256:$src))),
2161 (VCVTTPD2DQYrr VR256:$src)>;
2162 def : Pat<(v4i32 (fp_to_sint (loadv4f64 addr:$src))),
2163 (VCVTTPD2DQYrm addr:$src)>;
2164 } // Predicates = [HasAVX]
2166 def CVTTPD2DQrr : PDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2167 "cvttpd2dq\t{$src, $dst|$dst, $src}",
2168 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))],
2169 IIC_SSE_CVT_PD_RR>, Sched<[WriteCvtF2I]>;
2170 def CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst),(ins f128mem:$src),
2171 "cvttpd2dq\t{$src, $dst|$dst, $src}",
2172 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
2173 (memopv2f64 addr:$src)))],
2175 Sched<[WriteCvtF2ILd]>;
2177 // Convert packed single to packed double
2178 let Predicates = [HasAVX] in {
2179 // SSE2 instructions without OpSize prefix
2180 def VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2181 "vcvtps2pd\t{$src, $dst|$dst, $src}",
2182 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))],
2183 IIC_SSE_CVT_PD_RR>, PS, VEX, Sched<[WriteCvtF2F]>;
2184 def VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
2185 "vcvtps2pd\t{$src, $dst|$dst, $src}",
2186 [(set VR128:$dst, (v2f64 (extloadv2f32 addr:$src)))],
2187 IIC_SSE_CVT_PD_RM>, PS, VEX, Sched<[WriteCvtF2FLd]>;
2188 def VCVTPS2PDYrr : I<0x5A, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
2189 "vcvtps2pd\t{$src, $dst|$dst, $src}",
2191 (int_x86_avx_cvt_ps2_pd_256 VR128:$src))],
2192 IIC_SSE_CVT_PD_RR>, PS, VEX, VEX_L, Sched<[WriteCvtF2F]>;
2193 def VCVTPS2PDYrm : I<0x5A, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
2194 "vcvtps2pd\t{$src, $dst|$dst, $src}",
2196 (int_x86_avx_cvt_ps2_pd_256 (loadv4f32 addr:$src)))],
2197 IIC_SSE_CVT_PD_RM>, PS, VEX, VEX_L, Sched<[WriteCvtF2FLd]>;
2200 let Predicates = [UseSSE2] in {
2201 def CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2202 "cvtps2pd\t{$src, $dst|$dst, $src}",
2203 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))],
2204 IIC_SSE_CVT_PD_RR>, PS, Sched<[WriteCvtF2F]>;
2205 def CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
2206 "cvtps2pd\t{$src, $dst|$dst, $src}",
2207 [(set VR128:$dst, (v2f64 (extloadv2f32 addr:$src)))],
2208 IIC_SSE_CVT_PD_RM>, PS, Sched<[WriteCvtF2FLd]>;
2211 // Convert Packed DW Integers to Packed Double FP
2212 let Predicates = [HasAVX] in {
2213 let hasSideEffects = 0, mayLoad = 1 in
2214 def VCVTDQ2PDrm : S2SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
2215 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
2216 []>, VEX, Sched<[WriteCvtI2FLd]>;
2217 def VCVTDQ2PDrr : S2SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2218 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
2220 (int_x86_sse2_cvtdq2pd VR128:$src))]>, VEX,
2221 Sched<[WriteCvtI2F]>;
2222 def VCVTDQ2PDYrm : S2SI<0xE6, MRMSrcMem, (outs VR256:$dst), (ins i128mem:$src),
2223 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
2225 (int_x86_avx_cvtdq2_pd_256
2226 (bitconvert (loadv2i64 addr:$src))))]>, VEX, VEX_L,
2227 Sched<[WriteCvtI2FLd]>;
2228 def VCVTDQ2PDYrr : S2SI<0xE6, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
2229 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
2231 (int_x86_avx_cvtdq2_pd_256 VR128:$src))]>, VEX, VEX_L,
2232 Sched<[WriteCvtI2F]>;
2235 let hasSideEffects = 0, mayLoad = 1 in
2236 def CVTDQ2PDrm : S2SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
2237 "cvtdq2pd\t{$src, $dst|$dst, $src}", [],
2238 IIC_SSE_CVT_PD_RR>, Sched<[WriteCvtI2FLd]>;
2239 def CVTDQ2PDrr : S2SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2240 "cvtdq2pd\t{$src, $dst|$dst, $src}",
2241 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))],
2242 IIC_SSE_CVT_PD_RM>, Sched<[WriteCvtI2F]>;
2244 // AVX register conversion intrinsics
2245 let Predicates = [HasAVX] in {
2246 def : Pat<(v2f64 (X86cvtdq2pd (v4i32 VR128:$src))),
2247 (VCVTDQ2PDrr VR128:$src)>;
2248 def : Pat<(v2f64 (X86cvtdq2pd (bc_v4i32 (loadv2i64 addr:$src)))),
2249 (VCVTDQ2PDrm addr:$src)>;
2251 def : Pat<(v4f64 (sint_to_fp (v4i32 VR128:$src))),
2252 (VCVTDQ2PDYrr VR128:$src)>;
2253 def : Pat<(v4f64 (sint_to_fp (bc_v4i32 (loadv2i64 addr:$src)))),
2254 (VCVTDQ2PDYrm addr:$src)>;
2255 } // Predicates = [HasAVX]
2257 // SSE2 register conversion intrinsics
2258 let Predicates = [HasSSE2] in {
2259 def : Pat<(v2f64 (X86cvtdq2pd (v4i32 VR128:$src))),
2260 (CVTDQ2PDrr VR128:$src)>;
2261 def : Pat<(v2f64 (X86cvtdq2pd (bc_v4i32 (loadv2i64 addr:$src)))),
2262 (CVTDQ2PDrm addr:$src)>;
2263 } // Predicates = [HasSSE2]
2265 // Convert packed double to packed single
2266 // The assembler can recognize rr 256-bit instructions by seeing a ymm
2267 // register, but the same isn't true when using memory operands instead.
2268 // Provide other assembly rr and rm forms to address this explicitly.
2269 def VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2270 "cvtpd2ps\t{$src, $dst|$dst, $src}",
2271 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))],
2272 IIC_SSE_CVT_PD_RR>, VEX, Sched<[WriteCvtF2F]>;
2275 def : InstAlias<"vcvtpd2psx\t{$src, $dst|$dst, $src}",
2276 (VCVTPD2PSrr VR128:$dst, VR128:$src), 0>;
2277 def VCVTPD2PSXrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2278 "cvtpd2psx\t{$src, $dst|$dst, $src}",
2280 (int_x86_sse2_cvtpd2ps (loadv2f64 addr:$src)))],
2281 IIC_SSE_CVT_PD_RM>, VEX, Sched<[WriteCvtF2FLd]>;
2284 def VCVTPD2PSYrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
2285 "cvtpd2ps{y}\t{$src, $dst|$dst, $src}",
2287 (int_x86_avx_cvt_pd2_ps_256 VR256:$src))],
2288 IIC_SSE_CVT_PD_RR>, VEX, VEX_L, Sched<[WriteCvtF2F]>;
2289 def VCVTPD2PSYrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
2290 "cvtpd2ps{y}\t{$src, $dst|$dst, $src}",
2292 (int_x86_avx_cvt_pd2_ps_256 (loadv4f64 addr:$src)))],
2293 IIC_SSE_CVT_PD_RM>, VEX, VEX_L, Sched<[WriteCvtF2FLd]>;
2294 def : InstAlias<"vcvtpd2ps\t{$src, $dst|$dst, $src}",
2295 (VCVTPD2PSYrr VR128:$dst, VR256:$src), 0>;
2297 def CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2298 "cvtpd2ps\t{$src, $dst|$dst, $src}",
2299 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))],
2300 IIC_SSE_CVT_PD_RR>, Sched<[WriteCvtF2F]>;
2301 def CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2302 "cvtpd2ps\t{$src, $dst|$dst, $src}",
2304 (int_x86_sse2_cvtpd2ps (memopv2f64 addr:$src)))],
2305 IIC_SSE_CVT_PD_RM>, Sched<[WriteCvtF2FLd]>;
2308 // AVX 256-bit register conversion intrinsics
2309 // FIXME: Migrate SSE conversion intrinsics matching to use patterns as below
2310 // whenever possible to avoid declaring two versions of each one.
2311 let Predicates = [HasAVX] in {
2312 def : Pat<(int_x86_avx_cvtdq2_ps_256 VR256:$src),
2313 (VCVTDQ2PSYrr VR256:$src)>;
2314 def : Pat<(int_x86_avx_cvtdq2_ps_256 (bitconvert (loadv4i64 addr:$src))),
2315 (VCVTDQ2PSYrm addr:$src)>;
2318 let Predicates = [HasAVX, NoVLX] in {
2319 // Match fround and fextend for 128/256-bit conversions
2320 def : Pat<(v4f32 (X86vfpround (v2f64 VR128:$src))),
2321 (VCVTPD2PSrr VR128:$src)>;
2322 def : Pat<(v4f32 (X86vfpround (loadv2f64 addr:$src))),
2323 (VCVTPD2PSXrm addr:$src)>;
2324 def : Pat<(v4f32 (fround (v4f64 VR256:$src))),
2325 (VCVTPD2PSYrr VR256:$src)>;
2326 def : Pat<(v4f32 (fround (loadv4f64 addr:$src))),
2327 (VCVTPD2PSYrm addr:$src)>;
2329 def : Pat<(v2f64 (X86vfpext (v4f32 VR128:$src))),
2330 (VCVTPS2PDrr VR128:$src)>;
2331 def : Pat<(v4f64 (fextend (v4f32 VR128:$src))),
2332 (VCVTPS2PDYrr VR128:$src)>;
2333 def : Pat<(v4f64 (extloadv4f32 addr:$src)),
2334 (VCVTPS2PDYrm addr:$src)>;
2337 let Predicates = [UseSSE2] in {
2338 // Match fround and fextend for 128 conversions
2339 def : Pat<(v4f32 (X86vfpround (v2f64 VR128:$src))),
2340 (CVTPD2PSrr VR128:$src)>;
2341 def : Pat<(v4f32 (X86vfpround (memopv2f64 addr:$src))),
2342 (CVTPD2PSrm addr:$src)>;
2344 def : Pat<(v2f64 (X86vfpext (v4f32 VR128:$src))),
2345 (CVTPS2PDrr VR128:$src)>;
2348 //===----------------------------------------------------------------------===//
2349 // SSE 1 & 2 - Compare Instructions
2350 //===----------------------------------------------------------------------===//
2352 // sse12_cmp_scalar - sse 1 & 2 compare scalar instructions
2353 multiclass sse12_cmp_scalar<RegisterClass RC, X86MemOperand x86memop,
2354 Operand CC, SDNode OpNode, ValueType VT,
2355 PatFrag ld_frag, string asm, string asm_alt,
2356 OpndItins itins, ImmLeaf immLeaf> {
2357 def rr : SIi8<0xC2, MRMSrcReg,
2358 (outs RC:$dst), (ins RC:$src1, RC:$src2, CC:$cc), asm,
2359 [(set RC:$dst, (OpNode (VT RC:$src1), RC:$src2, immLeaf:$cc))],
2360 itins.rr>, Sched<[itins.Sched]>;
2361 def rm : SIi8<0xC2, MRMSrcMem,
2362 (outs RC:$dst), (ins RC:$src1, x86memop:$src2, CC:$cc), asm,
2363 [(set RC:$dst, (OpNode (VT RC:$src1),
2364 (ld_frag addr:$src2), immLeaf:$cc))],
2366 Sched<[itins.Sched.Folded, ReadAfterLd]>;
2368 // Accept explicit immediate argument form instead of comparison code.
2369 let isAsmParserOnly = 1, hasSideEffects = 0 in {
2370 def rr_alt : SIi8<0xC2, MRMSrcReg, (outs RC:$dst),
2371 (ins RC:$src1, RC:$src2, u8imm:$cc), asm_alt, [],
2372 IIC_SSE_ALU_F32S_RR>, Sched<[itins.Sched]>;
2374 def rm_alt : SIi8<0xC2, MRMSrcMem, (outs RC:$dst),
2375 (ins RC:$src1, x86memop:$src2, u8imm:$cc), asm_alt, [],
2376 IIC_SSE_ALU_F32S_RM>,
2377 Sched<[itins.Sched.Folded, ReadAfterLd]>;
2381 defm VCMPSS : sse12_cmp_scalar<FR32, f32mem, AVXCC, X86cmps, f32, loadf32,
2382 "cmp${cc}ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2383 "cmpss\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2384 SSE_ALU_F32S, i8immZExt5>, XS, VEX_4V, VEX_LIG;
2385 defm VCMPSD : sse12_cmp_scalar<FR64, f64mem, AVXCC, X86cmps, f64, loadf64,
2386 "cmp${cc}sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2387 "cmpsd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2388 SSE_ALU_F32S, i8immZExt5>, // same latency as 32 bit compare
2389 XD, VEX_4V, VEX_LIG;
2391 let Constraints = "$src1 = $dst" in {
2392 defm CMPSS : sse12_cmp_scalar<FR32, f32mem, SSECC, X86cmps, f32, loadf32,
2393 "cmp${cc}ss\t{$src2, $dst|$dst, $src2}",
2394 "cmpss\t{$cc, $src2, $dst|$dst, $src2, $cc}", SSE_ALU_F32S,
2396 defm CMPSD : sse12_cmp_scalar<FR64, f64mem, SSECC, X86cmps, f64, loadf64,
2397 "cmp${cc}sd\t{$src2, $dst|$dst, $src2}",
2398 "cmpsd\t{$cc, $src2, $dst|$dst, $src2, $cc}",
2399 SSE_ALU_F64S, i8immZExt3>, XD;
2402 multiclass sse12_cmp_scalar_int<X86MemOperand x86memop, Operand CC,
2403 Intrinsic Int, string asm, OpndItins itins,
2405 def rr : SIi8<0xC2, MRMSrcReg, (outs VR128:$dst),
2406 (ins VR128:$src1, VR128:$src, CC:$cc), asm,
2407 [(set VR128:$dst, (Int VR128:$src1,
2408 VR128:$src, immLeaf:$cc))],
2410 Sched<[itins.Sched]>;
2411 def rm : SIi8<0xC2, MRMSrcMem, (outs VR128:$dst),
2412 (ins VR128:$src1, x86memop:$src, CC:$cc), asm,
2413 [(set VR128:$dst, (Int VR128:$src1,
2414 (load addr:$src), immLeaf:$cc))],
2416 Sched<[itins.Sched.Folded, ReadAfterLd]>;
2419 let isCodeGenOnly = 1 in {
2420 // Aliases to match intrinsics which expect XMM operand(s).
2421 defm Int_VCMPSS : sse12_cmp_scalar_int<f32mem, AVXCC, int_x86_sse_cmp_ss,
2422 "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}",
2423 SSE_ALU_F32S, i8immZExt5>,
2425 defm Int_VCMPSD : sse12_cmp_scalar_int<f64mem, AVXCC, int_x86_sse2_cmp_sd,
2426 "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}",
2427 SSE_ALU_F32S, i8immZExt5>, // same latency as f32
2429 let Constraints = "$src1 = $dst" in {
2430 defm Int_CMPSS : sse12_cmp_scalar_int<f32mem, SSECC, int_x86_sse_cmp_ss,
2431 "cmp${cc}ss\t{$src, $dst|$dst, $src}",
2432 SSE_ALU_F32S, i8immZExt3>, XS;
2433 defm Int_CMPSD : sse12_cmp_scalar_int<f64mem, SSECC, int_x86_sse2_cmp_sd,
2434 "cmp${cc}sd\t{$src, $dst|$dst, $src}",
2435 SSE_ALU_F64S, i8immZExt3>,
2441 // sse12_ord_cmp - Unordered/Ordered scalar fp compare and set EFLAGS
2442 multiclass sse12_ord_cmp<bits<8> opc, RegisterClass RC, SDNode OpNode,
2443 ValueType vt, X86MemOperand x86memop,
2444 PatFrag ld_frag, string OpcodeStr> {
2445 def rr: SI<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
2446 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
2447 [(set EFLAGS, (OpNode (vt RC:$src1), RC:$src2))],
2450 def rm: SI<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
2451 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
2452 [(set EFLAGS, (OpNode (vt RC:$src1),
2453 (ld_frag addr:$src2)))],
2455 Sched<[WriteFAddLd, ReadAfterLd]>;
2458 let Defs = [EFLAGS] in {
2459 defm VUCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
2460 "ucomiss">, PS, VEX, VEX_LIG;
2461 defm VUCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
2462 "ucomisd">, PD, VEX, VEX_LIG;
2463 let Pattern = []<dag> in {
2464 defm VCOMISS : sse12_ord_cmp<0x2F, FR32, undef, f32, f32mem, loadf32,
2465 "comiss">, PS, VEX, VEX_LIG;
2466 defm VCOMISD : sse12_ord_cmp<0x2F, FR64, undef, f64, f64mem, loadf64,
2467 "comisd">, PD, VEX, VEX_LIG;
2470 let isCodeGenOnly = 1 in {
2471 defm Int_VUCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
2472 load, "ucomiss">, PS, VEX;
2473 defm Int_VUCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
2474 load, "ucomisd">, PD, VEX;
2476 defm Int_VCOMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem,
2477 load, "comiss">, PS, VEX;
2478 defm Int_VCOMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem,
2479 load, "comisd">, PD, VEX;
2481 defm UCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
2483 defm UCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
2486 let Pattern = []<dag> in {
2487 defm COMISS : sse12_ord_cmp<0x2F, FR32, undef, f32, f32mem, loadf32,
2489 defm COMISD : sse12_ord_cmp<0x2F, FR64, undef, f64, f64mem, loadf64,
2493 let isCodeGenOnly = 1 in {
2494 defm Int_UCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
2495 load, "ucomiss">, PS;
2496 defm Int_UCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
2497 load, "ucomisd">, PD;
2499 defm Int_COMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem, load,
2501 defm Int_COMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem, load,
2504 } // Defs = [EFLAGS]
2506 // sse12_cmp_packed - sse 1 & 2 compare packed instructions
2507 multiclass sse12_cmp_packed<RegisterClass RC, X86MemOperand x86memop,
2508 Operand CC, Intrinsic Int, string asm,
2509 string asm_alt, Domain d, ImmLeaf immLeaf,
2510 PatFrag ld_frag, OpndItins itins = SSE_ALU_F32P> {
2511 let isCommutable = 1 in
2512 def rri : PIi8<0xC2, MRMSrcReg,
2513 (outs RC:$dst), (ins RC:$src1, RC:$src2, CC:$cc), asm,
2514 [(set RC:$dst, (Int RC:$src1, RC:$src2, immLeaf:$cc))],
2517 def rmi : PIi8<0xC2, MRMSrcMem,
2518 (outs RC:$dst), (ins RC:$src1, x86memop:$src2, CC:$cc), asm,
2519 [(set RC:$dst, (Int RC:$src1, (ld_frag addr:$src2), immLeaf:$cc))],
2521 Sched<[WriteFAddLd, ReadAfterLd]>;
2523 // Accept explicit immediate argument form instead of comparison code.
2524 let isAsmParserOnly = 1, hasSideEffects = 0 in {
2525 def rri_alt : PIi8<0xC2, MRMSrcReg,
2526 (outs RC:$dst), (ins RC:$src1, RC:$src2, u8imm:$cc),
2527 asm_alt, [], itins.rr, d>, Sched<[WriteFAdd]>;
2529 def rmi_alt : PIi8<0xC2, MRMSrcMem,
2530 (outs RC:$dst), (ins RC:$src1, x86memop:$src2, u8imm:$cc),
2531 asm_alt, [], itins.rm, d>,
2532 Sched<[WriteFAddLd, ReadAfterLd]>;
2536 defm VCMPPS : sse12_cmp_packed<VR128, f128mem, AVXCC, int_x86_sse_cmp_ps,
2537 "cmp${cc}ps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2538 "cmpps\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2539 SSEPackedSingle, i8immZExt5, loadv4f32>, PS, VEX_4V;
2540 defm VCMPPD : sse12_cmp_packed<VR128, f128mem, AVXCC, int_x86_sse2_cmp_pd,
2541 "cmp${cc}pd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2542 "cmppd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2543 SSEPackedDouble, i8immZExt5, loadv2f64>, PD, VEX_4V;
2544 defm VCMPPSY : sse12_cmp_packed<VR256, f256mem, AVXCC, int_x86_avx_cmp_ps_256,
2545 "cmp${cc}ps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2546 "cmpps\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2547 SSEPackedSingle, i8immZExt5, loadv8f32>, PS, VEX_4V, VEX_L;
2548 defm VCMPPDY : sse12_cmp_packed<VR256, f256mem, AVXCC, int_x86_avx_cmp_pd_256,
2549 "cmp${cc}pd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2550 "cmppd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2551 SSEPackedDouble, i8immZExt5, loadv4f64>, PD, VEX_4V, VEX_L;
2552 let Constraints = "$src1 = $dst" in {
2553 defm CMPPS : sse12_cmp_packed<VR128, f128mem, SSECC, int_x86_sse_cmp_ps,
2554 "cmp${cc}ps\t{$src2, $dst|$dst, $src2}",
2555 "cmpps\t{$cc, $src2, $dst|$dst, $src2, $cc}",
2556 SSEPackedSingle, i8immZExt5, memopv4f32, SSE_ALU_F32P>, PS;
2557 defm CMPPD : sse12_cmp_packed<VR128, f128mem, SSECC, int_x86_sse2_cmp_pd,
2558 "cmp${cc}pd\t{$src2, $dst|$dst, $src2}",
2559 "cmppd\t{$cc, $src2, $dst|$dst, $src2, $cc}",
2560 SSEPackedDouble, i8immZExt5, memopv2f64, SSE_ALU_F64P>, PD;
2563 let Predicates = [HasAVX] in {
2564 def : Pat<(v4i32 (X86cmpp (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
2565 (VCMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
2566 def : Pat<(v4i32 (X86cmpp (v4f32 VR128:$src1), (loadv4f32 addr:$src2), imm:$cc)),
2567 (VCMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
2568 def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
2569 (VCMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
2570 def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), (loadv2f64 addr:$src2), imm:$cc)),
2571 (VCMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
2573 def : Pat<(v8i32 (X86cmpp (v8f32 VR256:$src1), VR256:$src2, imm:$cc)),
2574 (VCMPPSYrri (v8f32 VR256:$src1), (v8f32 VR256:$src2), imm:$cc)>;
2575 def : Pat<(v8i32 (X86cmpp (v8f32 VR256:$src1), (loadv8f32 addr:$src2), imm:$cc)),
2576 (VCMPPSYrmi (v8f32 VR256:$src1), addr:$src2, imm:$cc)>;
2577 def : Pat<(v4i64 (X86cmpp (v4f64 VR256:$src1), VR256:$src2, imm:$cc)),
2578 (VCMPPDYrri VR256:$src1, VR256:$src2, imm:$cc)>;
2579 def : Pat<(v4i64 (X86cmpp (v4f64 VR256:$src1), (loadv4f64 addr:$src2), imm:$cc)),
2580 (VCMPPDYrmi VR256:$src1, addr:$src2, imm:$cc)>;
2583 let Predicates = [UseSSE1] in {
2584 def : Pat<(v4i32 (X86cmpp (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
2585 (CMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
2586 def : Pat<(v4i32 (X86cmpp (v4f32 VR128:$src1), (memopv4f32 addr:$src2), imm:$cc)),
2587 (CMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
2590 let Predicates = [UseSSE2] in {
2591 def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
2592 (CMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
2593 def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), (memopv2f64 addr:$src2), imm:$cc)),
2594 (CMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
2597 //===----------------------------------------------------------------------===//
2598 // SSE 1 & 2 - Shuffle Instructions
2599 //===----------------------------------------------------------------------===//
2601 /// sse12_shuffle - sse 1 & 2 fp shuffle instructions
2602 multiclass sse12_shuffle<RegisterClass RC, X86MemOperand x86memop,
2603 ValueType vt, string asm, PatFrag mem_frag,
2605 def rmi : PIi8<0xC6, MRMSrcMem, (outs RC:$dst),
2606 (ins RC:$src1, x86memop:$src2, u8imm:$src3), asm,
2607 [(set RC:$dst, (vt (X86Shufp RC:$src1, (mem_frag addr:$src2),
2608 (i8 imm:$src3))))], IIC_SSE_SHUFP, d>,
2609 Sched<[WriteFShuffleLd, ReadAfterLd]>;
2610 def rri : PIi8<0xC6, MRMSrcReg, (outs RC:$dst),
2611 (ins RC:$src1, RC:$src2, u8imm:$src3), asm,
2612 [(set RC:$dst, (vt (X86Shufp RC:$src1, RC:$src2,
2613 (i8 imm:$src3))))], IIC_SSE_SHUFP, d>,
2614 Sched<[WriteFShuffle]>;
2617 let Predicates = [HasAVX, NoVLX] in {
2618 defm VSHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
2619 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2620 loadv4f32, SSEPackedSingle>, PS, VEX_4V;
2621 defm VSHUFPSY : sse12_shuffle<VR256, f256mem, v8f32,
2622 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2623 loadv8f32, SSEPackedSingle>, PS, VEX_4V, VEX_L;
2624 defm VSHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
2625 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2626 loadv2f64, SSEPackedDouble>, PD, VEX_4V;
2627 defm VSHUFPDY : sse12_shuffle<VR256, f256mem, v4f64,
2628 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2629 loadv4f64, SSEPackedDouble>, PD, VEX_4V, VEX_L;
2631 let Constraints = "$src1 = $dst" in {
2632 defm SHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
2633 "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2634 memopv4f32, SSEPackedSingle>, PS;
2635 defm SHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
2636 "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2637 memopv2f64, SSEPackedDouble>, PD;
2640 let Predicates = [HasAVX, NoVLX] in {
2641 def : Pat<(v4i32 (X86Shufp VR128:$src1,
2642 (bc_v4i32 (loadv2i64 addr:$src2)), (i8 imm:$imm))),
2643 (VSHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
2644 def : Pat<(v4i32 (X86Shufp VR128:$src1, VR128:$src2, (i8 imm:$imm))),
2645 (VSHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
2647 def : Pat<(v2i64 (X86Shufp VR128:$src1,
2648 (loadv2i64 addr:$src2), (i8 imm:$imm))),
2649 (VSHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>;
2650 def : Pat<(v2i64 (X86Shufp VR128:$src1, VR128:$src2, (i8 imm:$imm))),
2651 (VSHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
2654 def : Pat<(v8i32 (X86Shufp VR256:$src1, VR256:$src2, (i8 imm:$imm))),
2655 (VSHUFPSYrri VR256:$src1, VR256:$src2, imm:$imm)>;
2656 def : Pat<(v8i32 (X86Shufp VR256:$src1,
2657 (bc_v8i32 (loadv4i64 addr:$src2)), (i8 imm:$imm))),
2658 (VSHUFPSYrmi VR256:$src1, addr:$src2, imm:$imm)>;
2660 def : Pat<(v4i64 (X86Shufp VR256:$src1, VR256:$src2, (i8 imm:$imm))),
2661 (VSHUFPDYrri VR256:$src1, VR256:$src2, imm:$imm)>;
2662 def : Pat<(v4i64 (X86Shufp VR256:$src1,
2663 (loadv4i64 addr:$src2), (i8 imm:$imm))),
2664 (VSHUFPDYrmi VR256:$src1, addr:$src2, imm:$imm)>;
2667 let Predicates = [UseSSE1] in {
2668 def : Pat<(v4i32 (X86Shufp VR128:$src1,
2669 (bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))),
2670 (SHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
2671 def : Pat<(v4i32 (X86Shufp VR128:$src1, VR128:$src2, (i8 imm:$imm))),
2672 (SHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
2675 let Predicates = [UseSSE2] in {
2676 // Generic SHUFPD patterns
2677 def : Pat<(v2i64 (X86Shufp VR128:$src1,
2678 (memopv2i64 addr:$src2), (i8 imm:$imm))),
2679 (SHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>;
2680 def : Pat<(v2i64 (X86Shufp VR128:$src1, VR128:$src2, (i8 imm:$imm))),
2681 (SHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
2684 //===----------------------------------------------------------------------===//
2685 // SSE 1 & 2 - Unpack FP Instructions
2686 //===----------------------------------------------------------------------===//
2688 /// sse12_unpack_interleave - sse 1 & 2 fp unpack and interleave
2689 multiclass sse12_unpack_interleave<bits<8> opc, SDNode OpNode, ValueType vt,
2690 PatFrag mem_frag, RegisterClass RC,
2691 X86MemOperand x86memop, string asm,
2693 def rr : PI<opc, MRMSrcReg,
2694 (outs RC:$dst), (ins RC:$src1, RC:$src2),
2696 (vt (OpNode RC:$src1, RC:$src2)))],
2697 IIC_SSE_UNPCK, d>, Sched<[WriteFShuffle]>;
2698 def rm : PI<opc, MRMSrcMem,
2699 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
2701 (vt (OpNode RC:$src1,
2702 (mem_frag addr:$src2))))],
2704 Sched<[WriteFShuffleLd, ReadAfterLd]>;
2707 let Predicates = [HasAVX, NoVLX] in {
2708 defm VUNPCKHPS: sse12_unpack_interleave<0x15, X86Unpckh, v4f32, loadv4f32,
2709 VR128, f128mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2710 SSEPackedSingle>, PS, VEX_4V;
2711 defm VUNPCKHPD: sse12_unpack_interleave<0x15, X86Unpckh, v2f64, loadv2f64,
2712 VR128, f128mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2713 SSEPackedDouble>, PD, VEX_4V;
2714 defm VUNPCKLPS: sse12_unpack_interleave<0x14, X86Unpckl, v4f32, loadv4f32,
2715 VR128, f128mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2716 SSEPackedSingle>, PS, VEX_4V;
2717 defm VUNPCKLPD: sse12_unpack_interleave<0x14, X86Unpckl, v2f64, loadv2f64,
2718 VR128, f128mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2719 SSEPackedDouble>, PD, VEX_4V;
2721 defm VUNPCKHPSY: sse12_unpack_interleave<0x15, X86Unpckh, v8f32, loadv8f32,
2722 VR256, f256mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2723 SSEPackedSingle>, PS, VEX_4V, VEX_L;
2724 defm VUNPCKHPDY: sse12_unpack_interleave<0x15, X86Unpckh, v4f64, loadv4f64,
2725 VR256, f256mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2726 SSEPackedDouble>, PD, VEX_4V, VEX_L;
2727 defm VUNPCKLPSY: sse12_unpack_interleave<0x14, X86Unpckl, v8f32, loadv8f32,
2728 VR256, f256mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2729 SSEPackedSingle>, PS, VEX_4V, VEX_L;
2730 defm VUNPCKLPDY: sse12_unpack_interleave<0x14, X86Unpckl, v4f64, loadv4f64,
2731 VR256, f256mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2732 SSEPackedDouble>, PD, VEX_4V, VEX_L;
2733 }// Predicates = [HasAVX, NoVLX]
2734 let Constraints = "$src1 = $dst" in {
2735 defm UNPCKHPS: sse12_unpack_interleave<0x15, X86Unpckh, v4f32, memopv4f32,
2736 VR128, f128mem, "unpckhps\t{$src2, $dst|$dst, $src2}",
2737 SSEPackedSingle>, PS;
2738 defm UNPCKHPD: sse12_unpack_interleave<0x15, X86Unpckh, v2f64, memopv2f64,
2739 VR128, f128mem, "unpckhpd\t{$src2, $dst|$dst, $src2}",
2740 SSEPackedDouble>, PD;
2741 defm UNPCKLPS: sse12_unpack_interleave<0x14, X86Unpckl, v4f32, memopv4f32,
2742 VR128, f128mem, "unpcklps\t{$src2, $dst|$dst, $src2}",
2743 SSEPackedSingle>, PS;
2744 defm UNPCKLPD: sse12_unpack_interleave<0x14, X86Unpckl, v2f64, memopv2f64,
2745 VR128, f128mem, "unpcklpd\t{$src2, $dst|$dst, $src2}",
2746 SSEPackedDouble>, PD;
2747 } // Constraints = "$src1 = $dst"
2749 let Predicates = [HasAVX1Only] in {
2750 def : Pat<(v8i32 (X86Unpckl VR256:$src1, (bc_v8i32 (loadv4i64 addr:$src2)))),
2751 (VUNPCKLPSYrm VR256:$src1, addr:$src2)>;
2752 def : Pat<(v8i32 (X86Unpckl VR256:$src1, VR256:$src2)),
2753 (VUNPCKLPSYrr VR256:$src1, VR256:$src2)>;
2754 def : Pat<(v8i32 (X86Unpckh VR256:$src1, (bc_v8i32 (loadv4i64 addr:$src2)))),
2755 (VUNPCKHPSYrm VR256:$src1, addr:$src2)>;
2756 def : Pat<(v8i32 (X86Unpckh VR256:$src1, VR256:$src2)),
2757 (VUNPCKHPSYrr VR256:$src1, VR256:$src2)>;
2759 def : Pat<(v4i64 (X86Unpckl VR256:$src1, (loadv4i64 addr:$src2))),
2760 (VUNPCKLPDYrm VR256:$src1, addr:$src2)>;
2761 def : Pat<(v4i64 (X86Unpckl VR256:$src1, VR256:$src2)),
2762 (VUNPCKLPDYrr VR256:$src1, VR256:$src2)>;
2763 def : Pat<(v4i64 (X86Unpckh VR256:$src1, (loadv4i64 addr:$src2))),
2764 (VUNPCKHPDYrm VR256:$src1, addr:$src2)>;
2765 def : Pat<(v4i64 (X86Unpckh VR256:$src1, VR256:$src2)),
2766 (VUNPCKHPDYrr VR256:$src1, VR256:$src2)>;
2769 //===----------------------------------------------------------------------===//
2770 // SSE 1 & 2 - Extract Floating-Point Sign mask
2771 //===----------------------------------------------------------------------===//
2773 /// sse12_extr_sign_mask - sse 1 & 2 unpack and interleave
2774 multiclass sse12_extr_sign_mask<RegisterClass RC, Intrinsic Int, string asm,
2776 def rr : PI<0x50, MRMSrcReg, (outs GR32orGR64:$dst), (ins RC:$src),
2777 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
2778 [(set GR32orGR64:$dst, (Int RC:$src))], IIC_SSE_MOVMSK, d>,
2779 Sched<[WriteVecLogic]>;
2782 let Predicates = [HasAVX] in {
2783 defm VMOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps,
2784 "movmskps", SSEPackedSingle>, PS, VEX;
2785 defm VMOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd,
2786 "movmskpd", SSEPackedDouble>, PD, VEX;
2787 defm VMOVMSKPSY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_ps_256,
2788 "movmskps", SSEPackedSingle>, PS,
2790 defm VMOVMSKPDY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_pd_256,
2791 "movmskpd", SSEPackedDouble>, PD,
2794 def : Pat<(i32 (X86fgetsign FR32:$src)),
2795 (VMOVMSKPSrr (COPY_TO_REGCLASS FR32:$src, VR128))>;
2796 def : Pat<(i64 (X86fgetsign FR32:$src)),
2797 (SUBREG_TO_REG (i64 0),
2798 (VMOVMSKPSrr (COPY_TO_REGCLASS FR32:$src, VR128)), sub_32bit)>;
2799 def : Pat<(i32 (X86fgetsign FR64:$src)),
2800 (VMOVMSKPDrr (COPY_TO_REGCLASS FR64:$src, VR128))>;
2801 def : Pat<(i64 (X86fgetsign FR64:$src)),
2802 (SUBREG_TO_REG (i64 0),
2803 (VMOVMSKPDrr (COPY_TO_REGCLASS FR64:$src, VR128)), sub_32bit)>;
2806 defm MOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps, "movmskps",
2807 SSEPackedSingle>, PS;
2808 defm MOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd, "movmskpd",
2809 SSEPackedDouble>, PD;
2811 def : Pat<(i32 (X86fgetsign FR32:$src)),
2812 (MOVMSKPSrr (COPY_TO_REGCLASS FR32:$src, VR128))>,
2813 Requires<[UseSSE1]>;
2814 def : Pat<(i64 (X86fgetsign FR32:$src)),
2815 (SUBREG_TO_REG (i64 0),
2816 (MOVMSKPSrr (COPY_TO_REGCLASS FR32:$src, VR128)), sub_32bit)>,
2817 Requires<[UseSSE1]>;
2818 def : Pat<(i32 (X86fgetsign FR64:$src)),
2819 (MOVMSKPDrr (COPY_TO_REGCLASS FR64:$src, VR128))>,
2820 Requires<[UseSSE2]>;
2821 def : Pat<(i64 (X86fgetsign FR64:$src)),
2822 (SUBREG_TO_REG (i64 0),
2823 (MOVMSKPDrr (COPY_TO_REGCLASS FR64:$src, VR128)), sub_32bit)>,
2824 Requires<[UseSSE2]>;
2826 //===---------------------------------------------------------------------===//
2827 // SSE2 - Packed Integer Logical Instructions
2828 //===---------------------------------------------------------------------===//
2830 let ExeDomain = SSEPackedInt in { // SSE integer instructions
2832 /// PDI_binop_rm - Simple SSE2 binary operator.
2833 multiclass PDI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
2834 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
2835 X86MemOperand x86memop, OpndItins itins,
2836 bit IsCommutable, bit Is2Addr> {
2837 let isCommutable = IsCommutable in
2838 def rr : PDI<opc, MRMSrcReg, (outs RC:$dst),
2839 (ins RC:$src1, RC:$src2),
2841 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2842 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2843 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2)))], itins.rr>,
2844 Sched<[itins.Sched]>;
2845 def rm : PDI<opc, MRMSrcMem, (outs RC:$dst),
2846 (ins RC:$src1, x86memop:$src2),
2848 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2849 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2850 [(set RC:$dst, (OpVT (OpNode RC:$src1,
2851 (bitconvert (memop_frag addr:$src2)))))],
2853 Sched<[itins.Sched.Folded, ReadAfterLd]>;
2855 } // ExeDomain = SSEPackedInt
2857 multiclass PDI_binop_all<bits<8> opc, string OpcodeStr, SDNode Opcode,
2858 ValueType OpVT128, ValueType OpVT256,
2859 OpndItins itins, bit IsCommutable = 0, Predicate prd> {
2860 let Predicates = [HasAVX, prd] in
2861 defm V#NAME : PDI_binop_rm<opc, !strconcat("v", OpcodeStr), Opcode, OpVT128,
2862 VR128, loadv2i64, i128mem, itins, IsCommutable, 0>, VEX_4V;
2864 let Constraints = "$src1 = $dst" in
2865 defm NAME : PDI_binop_rm<opc, OpcodeStr, Opcode, OpVT128, VR128,
2866 memopv2i64, i128mem, itins, IsCommutable, 1>;
2868 let Predicates = [HasAVX2, prd] in
2869 defm V#NAME#Y : PDI_binop_rm<opc, !strconcat("v", OpcodeStr), Opcode,
2870 OpVT256, VR256, loadv4i64, i256mem, itins,
2871 IsCommutable, 0>, VEX_4V, VEX_L;
2874 // These are ordered here for pattern ordering requirements with the fp versions
2876 defm PAND : PDI_binop_all<0xDB, "pand", and, v2i64, v4i64,
2877 SSE_VEC_BIT_ITINS_P, 1, NoVLX>;
2878 defm POR : PDI_binop_all<0xEB, "por", or, v2i64, v4i64,
2879 SSE_VEC_BIT_ITINS_P, 1, NoVLX>;
2880 defm PXOR : PDI_binop_all<0xEF, "pxor", xor, v2i64, v4i64,
2881 SSE_VEC_BIT_ITINS_P, 1, NoVLX>;
2882 defm PANDN : PDI_binop_all<0xDF, "pandn", X86andnp, v2i64, v4i64,
2883 SSE_VEC_BIT_ITINS_P, 0, NoVLX>;
2885 //===----------------------------------------------------------------------===//
2886 // SSE 1 & 2 - Logical Instructions
2887 //===----------------------------------------------------------------------===//
2889 // Multiclass for scalars using the X86 logical operation aliases for FP.
2890 multiclass sse12_fp_packed_scalar_logical_alias<
2891 bits<8> opc, string OpcodeStr, SDNode OpNode, OpndItins itins> {
2892 defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
2893 FR32, f32, f128mem, loadf32_128, SSEPackedSingle, itins, 0>,
2896 defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
2897 FR64, f64, f128mem, loadf64_128, SSEPackedDouble, itins, 0>,
2900 let Constraints = "$src1 = $dst" in {
2901 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, FR32,
2902 f32, f128mem, memopfsf32_128, SSEPackedSingle, itins>, PS;
2904 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, FR64,
2905 f64, f128mem, memopfsf64_128, SSEPackedDouble, itins>, PD;
2909 let isCodeGenOnly = 1 in {
2910 defm FsAND : sse12_fp_packed_scalar_logical_alias<0x54, "and", X86fand,
2912 defm FsOR : sse12_fp_packed_scalar_logical_alias<0x56, "or", X86for,
2914 defm FsXOR : sse12_fp_packed_scalar_logical_alias<0x57, "xor", X86fxor,
2917 let isCommutable = 0 in
2918 defm FsANDN : sse12_fp_packed_scalar_logical_alias<0x55, "andn", X86fandn,
2922 // Multiclass for vectors using the X86 logical operation aliases for FP.
2923 multiclass sse12_fp_packed_vector_logical_alias<
2924 bits<8> opc, string OpcodeStr, SDNode OpNode, OpndItins itins> {
2925 let Predicates = [HasAVX, NoVLX] in {
2926 defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
2927 VR128, v4f32, f128mem, loadv4f32, SSEPackedSingle, itins, 0>,
2930 defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
2931 VR128, v2f64, f128mem, loadv2f64, SSEPackedDouble, itins, 0>,
2934 defm V#NAME#PSY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
2935 VR256, v8f32, f256mem, loadv8f32, SSEPackedSingle, itins, 0>,
2938 defm V#NAME#PDY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
2939 VR256, v4f64, f256mem, loadv4f64, SSEPackedDouble, itins, 0>,
2943 let Constraints = "$src1 = $dst" in {
2944 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR128,
2945 v4f32, f128mem, memopv4f32, SSEPackedSingle, itins>,
2948 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR128,
2949 v2f64, f128mem, memopv2f64, SSEPackedDouble, itins>,
2954 let isCodeGenOnly = 1 in {
2955 defm FvAND : sse12_fp_packed_vector_logical_alias<0x54, "and", X86fand,
2957 defm FvOR : sse12_fp_packed_vector_logical_alias<0x56, "or", X86for,
2959 defm FvXOR : sse12_fp_packed_vector_logical_alias<0x57, "xor", X86fxor,
2962 let isCommutable = 0 in
2963 defm FvANDN : sse12_fp_packed_vector_logical_alias<0x55, "andn", X86fandn,
2967 /// sse12_fp_packed_logical - SSE 1 & 2 packed FP logical ops
2969 multiclass sse12_fp_packed_logical<bits<8> opc, string OpcodeStr,
2971 let Predicates = [HasAVX, NoVLX] in {
2972 defm V#NAME#PSY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedSingle,
2973 !strconcat(OpcodeStr, "ps"), f256mem,
2974 [(set VR256:$dst, (v4i64 (OpNode VR256:$src1, VR256:$src2)))],
2975 [(set VR256:$dst, (OpNode (bc_v4i64 (v8f32 VR256:$src1)),
2976 (loadv4i64 addr:$src2)))], 0>, PS, VEX_4V, VEX_L;
2978 defm V#NAME#PDY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedDouble,
2979 !strconcat(OpcodeStr, "pd"), f256mem,
2980 [(set VR256:$dst, (OpNode (bc_v4i64 (v4f64 VR256:$src1)),
2981 (bc_v4i64 (v4f64 VR256:$src2))))],
2982 [(set VR256:$dst, (OpNode (bc_v4i64 (v4f64 VR256:$src1)),
2983 (loadv4i64 addr:$src2)))], 0>,
2986 // In AVX no need to add a pattern for 128-bit logical rr ps, because they
2987 // are all promoted to v2i64, and the patterns are covered by the int
2988 // version. This is needed in SSE only, because v2i64 isn't supported on
2989 // SSE1, but only on SSE2.
2990 defm V#NAME#PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
2991 !strconcat(OpcodeStr, "ps"), f128mem, [],
2992 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
2993 (loadv2i64 addr:$src2)))], 0>, PS, VEX_4V;
2995 defm V#NAME#PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
2996 !strconcat(OpcodeStr, "pd"), f128mem,
2997 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
2998 (bc_v2i64 (v2f64 VR128:$src2))))],
2999 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
3000 (loadv2i64 addr:$src2)))], 0>,
3004 let Constraints = "$src1 = $dst" in {
3005 defm PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
3006 !strconcat(OpcodeStr, "ps"), f128mem,
3007 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))],
3008 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
3009 (memopv2i64 addr:$src2)))]>, PS;
3011 defm PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
3012 !strconcat(OpcodeStr, "pd"), f128mem,
3013 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
3014 (bc_v2i64 (v2f64 VR128:$src2))))],
3015 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
3016 (memopv2i64 addr:$src2)))]>, PD;
3020 defm AND : sse12_fp_packed_logical<0x54, "and", and>;
3021 defm OR : sse12_fp_packed_logical<0x56, "or", or>;
3022 defm XOR : sse12_fp_packed_logical<0x57, "xor", xor>;
3023 let isCommutable = 0 in
3024 defm ANDN : sse12_fp_packed_logical<0x55, "andn", X86andnp>;
3026 // AVX1 requires type coercions in order to fold loads directly into logical
3028 let Predicates = [HasAVX1Only] in {
3029 def : Pat<(bc_v8f32 (and VR256:$src1, (loadv4i64 addr:$src2))),
3030 (VANDPSYrm VR256:$src1, addr:$src2)>;
3031 def : Pat<(bc_v8f32 (or VR256:$src1, (loadv4i64 addr:$src2))),
3032 (VORPSYrm VR256:$src1, addr:$src2)>;
3033 def : Pat<(bc_v8f32 (xor VR256:$src1, (loadv4i64 addr:$src2))),
3034 (VXORPSYrm VR256:$src1, addr:$src2)>;
3035 def : Pat<(bc_v8f32 (X86andnp VR256:$src1, (loadv4i64 addr:$src2))),
3036 (VANDNPSYrm VR256:$src1, addr:$src2)>;
3039 //===----------------------------------------------------------------------===//
3040 // SSE 1 & 2 - Arithmetic Instructions
3041 //===----------------------------------------------------------------------===//
3043 /// basic_sse12_fp_binop_xxx - SSE 1 & 2 binops come in both scalar and
3046 /// In addition, we also have a special variant of the scalar form here to
3047 /// represent the associated intrinsic operation. This form is unlike the
3048 /// plain scalar form, in that it takes an entire vector (instead of a scalar)
3049 /// and leaves the top elements unmodified (therefore these cannot be commuted).
3051 /// These three forms can each be reg+reg or reg+mem.
3054 /// FIXME: once all 256-bit intrinsics are matched, cleanup and refactor those
3056 multiclass basic_sse12_fp_binop_p<bits<8> opc, string OpcodeStr,
3057 SDNode OpNode, SizeItins itins> {
3058 let Predicates = [HasAVX, NoVLX] in {
3059 defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
3060 VR128, v4f32, f128mem, loadv4f32,
3061 SSEPackedSingle, itins.s, 0>, PS, VEX_4V;
3062 defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
3063 VR128, v2f64, f128mem, loadv2f64,
3064 SSEPackedDouble, itins.d, 0>, PD, VEX_4V;
3066 defm V#NAME#PSY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"),
3067 OpNode, VR256, v8f32, f256mem, loadv8f32,
3068 SSEPackedSingle, itins.s, 0>, PS, VEX_4V, VEX_L;
3069 defm V#NAME#PDY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"),
3070 OpNode, VR256, v4f64, f256mem, loadv4f64,
3071 SSEPackedDouble, itins.d, 0>, PD, VEX_4V, VEX_L;
3074 let Constraints = "$src1 = $dst" in {
3075 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR128,
3076 v4f32, f128mem, memopv4f32, SSEPackedSingle,
3078 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR128,
3079 v2f64, f128mem, memopv2f64, SSEPackedDouble,
3084 multiclass basic_sse12_fp_binop_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
3086 defm V#NAME#SS : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"),
3087 OpNode, FR32, f32mem, SSEPackedSingle, itins.s, 0>,
3088 XS, VEX_4V, VEX_LIG;
3089 defm V#NAME#SD : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd"),
3090 OpNode, FR64, f64mem, SSEPackedDouble, itins.d, 0>,
3091 XD, VEX_4V, VEX_LIG;
3093 let Constraints = "$src1 = $dst" in {
3094 defm SS : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"),
3095 OpNode, FR32, f32mem, SSEPackedSingle,
3097 defm SD : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd"),
3098 OpNode, FR64, f64mem, SSEPackedDouble,
3103 multiclass basic_sse12_fp_binop_s_int<bits<8> opc, string OpcodeStr,
3105 defm V#NAME#SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
3106 !strconcat(OpcodeStr, "ss"), "", "_ss", ssmem, sse_load_f32,
3107 SSEPackedSingle, itins.s, 0>, XS, VEX_4V, VEX_LIG;
3108 defm V#NAME#SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
3109 !strconcat(OpcodeStr, "sd"), "2", "_sd", sdmem, sse_load_f64,
3110 SSEPackedDouble, itins.d, 0>, XD, VEX_4V, VEX_LIG;
3112 let Constraints = "$src1 = $dst" in {
3113 defm SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
3114 !strconcat(OpcodeStr, "ss"), "", "_ss", ssmem, sse_load_f32,
3115 SSEPackedSingle, itins.s>, XS;
3116 defm SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
3117 !strconcat(OpcodeStr, "sd"), "2", "_sd", sdmem, sse_load_f64,
3118 SSEPackedDouble, itins.d>, XD;
3122 // Binary Arithmetic instructions
3123 defm ADD : basic_sse12_fp_binop_p<0x58, "add", fadd, SSE_ALU_ITINS_P>,
3124 basic_sse12_fp_binop_s<0x58, "add", fadd, SSE_ALU_ITINS_S>,
3125 basic_sse12_fp_binop_s_int<0x58, "add", SSE_ALU_ITINS_S>;
3126 defm MUL : basic_sse12_fp_binop_p<0x59, "mul", fmul, SSE_MUL_ITINS_P>,
3127 basic_sse12_fp_binop_s<0x59, "mul", fmul, SSE_MUL_ITINS_S>,
3128 basic_sse12_fp_binop_s_int<0x59, "mul", SSE_MUL_ITINS_S>;
3129 let isCommutable = 0 in {
3130 defm SUB : basic_sse12_fp_binop_p<0x5C, "sub", fsub, SSE_ALU_ITINS_P>,
3131 basic_sse12_fp_binop_s<0x5C, "sub", fsub, SSE_ALU_ITINS_S>,
3132 basic_sse12_fp_binop_s_int<0x5C, "sub", SSE_ALU_ITINS_S>;
3133 defm DIV : basic_sse12_fp_binop_p<0x5E, "div", fdiv, SSE_DIV_ITINS_P>,
3134 basic_sse12_fp_binop_s<0x5E, "div", fdiv, SSE_DIV_ITINS_S>,
3135 basic_sse12_fp_binop_s_int<0x5E, "div", SSE_DIV_ITINS_S>;
3136 defm MAX : basic_sse12_fp_binop_p<0x5F, "max", X86fmax, SSE_ALU_ITINS_P>,
3137 basic_sse12_fp_binop_s<0x5F, "max", X86fmax, SSE_ALU_ITINS_S>,
3138 basic_sse12_fp_binop_s_int<0x5F, "max", SSE_ALU_ITINS_S>;
3139 defm MIN : basic_sse12_fp_binop_p<0x5D, "min", X86fmin, SSE_ALU_ITINS_P>,
3140 basic_sse12_fp_binop_s<0x5D, "min", X86fmin, SSE_ALU_ITINS_S>,
3141 basic_sse12_fp_binop_s_int<0x5D, "min", SSE_ALU_ITINS_S>;
3144 let isCodeGenOnly = 1 in {
3145 defm MAXC: basic_sse12_fp_binop_p<0x5F, "max", X86fmaxc, SSE_ALU_ITINS_P>,
3146 basic_sse12_fp_binop_s<0x5F, "max", X86fmaxc, SSE_ALU_ITINS_S>;
3147 defm MINC: basic_sse12_fp_binop_p<0x5D, "min", X86fminc, SSE_ALU_ITINS_P>,
3148 basic_sse12_fp_binop_s<0x5D, "min", X86fminc, SSE_ALU_ITINS_S>;
3151 // Patterns used to select SSE scalar fp arithmetic instructions from
3154 // (1) a scalar fp operation followed by a blend
3156 // The effect is that the backend no longer emits unnecessary vector
3157 // insert instructions immediately after SSE scalar fp instructions
3158 // like addss or mulss.
3160 // For example, given the following code:
3161 // __m128 foo(__m128 A, __m128 B) {
3166 // Previously we generated:
3167 // addss %xmm0, %xmm1
3168 // movss %xmm1, %xmm0
3171 // addss %xmm1, %xmm0
3173 // (2) a vector packed single/double fp operation followed by a vector insert
3175 // The effect is that the backend converts the packed fp instruction
3176 // followed by a vector insert into a single SSE scalar fp instruction.
3178 // For example, given the following code:
3179 // __m128 foo(__m128 A, __m128 B) {
3180 // __m128 C = A + B;
3181 // return (__m128) {c[0], a[1], a[2], a[3]};
3184 // Previously we generated:
3185 // addps %xmm0, %xmm1
3186 // movss %xmm1, %xmm0
3189 // addss %xmm1, %xmm0
3191 // TODO: Some canonicalization in lowering would simplify the number of
3192 // patterns we have to try to match.
3193 multiclass scalar_math_f32_patterns<SDNode Op, string OpcPrefix> {
3194 let Predicates = [UseSSE1] in {
3195 // extracted scalar math op with insert via movss
3196 def : Pat<(v4f32 (X86Movss (v4f32 VR128:$dst), (v4f32 (scalar_to_vector
3197 (Op (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
3199 (!cast<I>(OpcPrefix#SSrr_Int) v4f32:$dst,
3200 (COPY_TO_REGCLASS FR32:$src, VR128))>;
3202 // vector math op with insert via movss
3203 def : Pat<(v4f32 (X86Movss (v4f32 VR128:$dst),
3204 (Op (v4f32 VR128:$dst), (v4f32 VR128:$src)))),
3205 (!cast<I>(OpcPrefix#SSrr_Int) v4f32:$dst, v4f32:$src)>;
3208 // With SSE 4.1, blendi is preferred to movsd, so match that too.
3209 let Predicates = [UseSSE41] in {
3210 // extracted scalar math op with insert via blend
3211 def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst), (v4f32 (scalar_to_vector
3212 (Op (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
3213 FR32:$src))), (i8 1))),
3214 (!cast<I>(OpcPrefix#SSrr_Int) v4f32:$dst,
3215 (COPY_TO_REGCLASS FR32:$src, VR128))>;
3217 // vector math op with insert via blend
3218 def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst),
3219 (Op (v4f32 VR128:$dst), (v4f32 VR128:$src)), (i8 1))),
3220 (!cast<I>(OpcPrefix#SSrr_Int)v4f32:$dst, v4f32:$src)>;
3224 // Repeat everything for AVX, except for the movss + scalar combo...
3225 // because that one shouldn't occur with AVX codegen?
3226 let Predicates = [HasAVX] in {
3227 // extracted scalar math op with insert via blend
3228 def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst), (v4f32 (scalar_to_vector
3229 (Op (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
3230 FR32:$src))), (i8 1))),
3231 (!cast<I>("V"#OpcPrefix#SSrr_Int) v4f32:$dst,
3232 (COPY_TO_REGCLASS FR32:$src, VR128))>;
3234 // vector math op with insert via movss
3235 def : Pat<(v4f32 (X86Movss (v4f32 VR128:$dst),
3236 (Op (v4f32 VR128:$dst), (v4f32 VR128:$src)))),
3237 (!cast<I>("V"#OpcPrefix#SSrr_Int) v4f32:$dst, v4f32:$src)>;
3239 // vector math op with insert via blend
3240 def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst),
3241 (Op (v4f32 VR128:$dst), (v4f32 VR128:$src)), (i8 1))),
3242 (!cast<I>("V"#OpcPrefix#SSrr_Int) v4f32:$dst, v4f32:$src)>;
3246 defm : scalar_math_f32_patterns<fadd, "ADD">;
3247 defm : scalar_math_f32_patterns<fsub, "SUB">;
3248 defm : scalar_math_f32_patterns<fmul, "MUL">;
3249 defm : scalar_math_f32_patterns<fdiv, "DIV">;
3251 multiclass scalar_math_f64_patterns<SDNode Op, string OpcPrefix> {
3252 let Predicates = [UseSSE2] in {
3253 // extracted scalar math op with insert via movsd
3254 def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst), (v2f64 (scalar_to_vector
3255 (Op (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
3257 (!cast<I>(OpcPrefix#SDrr_Int) v2f64:$dst,
3258 (COPY_TO_REGCLASS FR64:$src, VR128))>;
3260 // vector math op with insert via movsd
3261 def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst),
3262 (Op (v2f64 VR128:$dst), (v2f64 VR128:$src)))),
3263 (!cast<I>(OpcPrefix#SDrr_Int) v2f64:$dst, v2f64:$src)>;
3266 // With SSE 4.1, blendi is preferred to movsd, so match those too.
3267 let Predicates = [UseSSE41] in {
3268 // extracted scalar math op with insert via blend
3269 def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst), (v2f64 (scalar_to_vector
3270 (Op (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
3271 FR64:$src))), (i8 1))),
3272 (!cast<I>(OpcPrefix#SDrr_Int) v2f64:$dst,
3273 (COPY_TO_REGCLASS FR64:$src, VR128))>;
3275 // vector math op with insert via blend
3276 def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst),
3277 (Op (v2f64 VR128:$dst), (v2f64 VR128:$src)), (i8 1))),
3278 (!cast<I>(OpcPrefix#SDrr_Int) v2f64:$dst, v2f64:$src)>;
3281 // Repeat everything for AVX.
3282 let Predicates = [HasAVX] in {
3283 // extracted scalar math op with insert via movsd
3284 def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst), (v2f64 (scalar_to_vector
3285 (Op (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
3287 (!cast<I>("V"#OpcPrefix#SDrr_Int) v2f64:$dst,
3288 (COPY_TO_REGCLASS FR64:$src, VR128))>;
3290 // extracted scalar math op with insert via blend
3291 def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst), (v2f64 (scalar_to_vector
3292 (Op (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
3293 FR64:$src))), (i8 1))),
3294 (!cast<I>("V"#OpcPrefix#SDrr_Int) v2f64:$dst,
3295 (COPY_TO_REGCLASS FR64:$src, VR128))>;
3297 // vector math op with insert via movsd
3298 def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst),
3299 (Op (v2f64 VR128:$dst), (v2f64 VR128:$src)))),
3300 (!cast<I>("V"#OpcPrefix#SDrr_Int) v2f64:$dst, v2f64:$src)>;
3302 // vector math op with insert via blend
3303 def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst),
3304 (Op (v2f64 VR128:$dst), (v2f64 VR128:$src)), (i8 1))),
3305 (!cast<I>("V"#OpcPrefix#SDrr_Int) v2f64:$dst, v2f64:$src)>;
3309 defm : scalar_math_f64_patterns<fadd, "ADD">;
3310 defm : scalar_math_f64_patterns<fsub, "SUB">;
3311 defm : scalar_math_f64_patterns<fmul, "MUL">;
3312 defm : scalar_math_f64_patterns<fdiv, "DIV">;
3316 /// In addition, we also have a special variant of the scalar form here to
3317 /// represent the associated intrinsic operation. This form is unlike the
3318 /// plain scalar form, in that it takes an entire vector (instead of a
3319 /// scalar) and leaves the top elements undefined.
3321 /// And, we have a special variant form for a full-vector intrinsic form.
3323 let Sched = WriteFSqrt in {
3324 def SSE_SQRTPS : OpndItins<
3325 IIC_SSE_SQRTPS_RR, IIC_SSE_SQRTPS_RM
3328 def SSE_SQRTSS : OpndItins<
3329 IIC_SSE_SQRTSS_RR, IIC_SSE_SQRTSS_RM
3332 def SSE_SQRTPD : OpndItins<
3333 IIC_SSE_SQRTPD_RR, IIC_SSE_SQRTPD_RM
3336 def SSE_SQRTSD : OpndItins<
3337 IIC_SSE_SQRTSD_RR, IIC_SSE_SQRTSD_RM
3341 let Sched = WriteFRsqrt in {
3342 def SSE_RSQRTPS : OpndItins<
3343 IIC_SSE_RSQRTPS_RR, IIC_SSE_RSQRTPS_RM
3346 def SSE_RSQRTSS : OpndItins<
3347 IIC_SSE_RSQRTSS_RR, IIC_SSE_RSQRTSS_RM
3351 let Sched = WriteFRcp in {
3352 def SSE_RCPP : OpndItins<
3353 IIC_SSE_RCPP_RR, IIC_SSE_RCPP_RM
3356 def SSE_RCPS : OpndItins<
3357 IIC_SSE_RCPS_RR, IIC_SSE_RCPS_RM
3361 /// sse_fp_unop_s - SSE1 unops in scalar form
3362 /// For the non-AVX defs, we need $src1 to be tied to $dst because
3363 /// the HW instructions are 2 operand / destructive.
3364 multiclass sse_fp_unop_s<bits<8> opc, string OpcodeStr, RegisterClass RC,
3365 ValueType vt, ValueType ScalarVT,
3366 X86MemOperand x86memop, Operand vec_memop,
3367 ComplexPattern mem_cpat, Intrinsic Intr,
3368 SDNode OpNode, Domain d, OpndItins itins,
3369 Predicate target, string Suffix> {
3370 let hasSideEffects = 0 in {
3371 def r : I<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1),
3372 !strconcat(OpcodeStr, "\t{$src1, $dst|$dst, $src1}"),
3373 [(set RC:$dst, (OpNode RC:$src1))], itins.rr, d>, Sched<[itins.Sched]>,
3376 def m : I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src1),
3377 !strconcat(OpcodeStr, "\t{$src1, $dst|$dst, $src1}"),
3378 [(set RC:$dst, (OpNode (load addr:$src1)))], itins.rm, d>,
3379 Sched<[itins.Sched.Folded, ReadAfterLd]>,
3380 Requires<[target, OptForSize]>;
3382 let isCodeGenOnly = 1, Constraints = "$src1 = $dst" in {
3383 def r_Int : I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
3384 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3385 []>, Sched<[itins.Sched.Folded, ReadAfterLd]>;
3387 def m_Int : I<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, vec_memop:$src2),
3388 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3389 []>, Sched<[itins.Sched.Folded, ReadAfterLd]>;
3393 let Predicates = [target] in {
3394 def : Pat<(vt (OpNode mem_cpat:$src)),
3395 (vt (COPY_TO_REGCLASS (vt (!cast<Instruction>(NAME#Suffix##m_Int)
3396 (vt (IMPLICIT_DEF)), mem_cpat:$src)), RC))>;
3397 // These are unary operations, but they are modeled as having 2 source operands
3398 // because the high elements of the destination are unchanged in SSE.
3399 def : Pat<(Intr VR128:$src),
3400 (!cast<Instruction>(NAME#Suffix##r_Int) VR128:$src, VR128:$src)>;
3401 def : Pat<(Intr (load addr:$src)),
3402 (vt (COPY_TO_REGCLASS(!cast<Instruction>(NAME#Suffix##m)
3403 addr:$src), VR128))>;
3404 def : Pat<(Intr mem_cpat:$src),
3405 (!cast<Instruction>(NAME#Suffix##m_Int)
3406 (vt (IMPLICIT_DEF)), mem_cpat:$src)>;
3410 multiclass avx_fp_unop_s<bits<8> opc, string OpcodeStr, RegisterClass RC,
3411 ValueType vt, ValueType ScalarVT,
3412 X86MemOperand x86memop, Operand vec_memop,
3413 ComplexPattern mem_cpat,
3414 Intrinsic Intr, SDNode OpNode, Domain d,
3415 OpndItins itins, string Suffix> {
3416 let hasSideEffects = 0 in {
3417 def r : I<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
3418 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3419 [], itins.rr, d>, Sched<[itins.Sched]>;
3421 def m : I<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
3422 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3423 [], itins.rm, d>, Sched<[itins.Sched.Folded, ReadAfterLd]>;
3424 let isCodeGenOnly = 1 in {
3425 def r_Int : I<opc, MRMSrcReg, (outs VR128:$dst),
3426 (ins VR128:$src1, VR128:$src2),
3427 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3428 []>, Sched<[itins.Sched.Folded]>;
3430 def m_Int : I<opc, MRMSrcMem, (outs VR128:$dst),
3431 (ins VR128:$src1, vec_memop:$src2),
3432 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3433 []>, Sched<[itins.Sched.Folded, ReadAfterLd]>;
3437 let Predicates = [UseAVX] in {
3438 def : Pat<(OpNode RC:$src), (!cast<Instruction>("V"#NAME#Suffix##r)
3439 (ScalarVT (IMPLICIT_DEF)), RC:$src)>;
3441 def : Pat<(vt (OpNode mem_cpat:$src)),
3442 (!cast<Instruction>("V"#NAME#Suffix##m_Int) (vt (IMPLICIT_DEF)),
3446 let Predicates = [HasAVX] in {
3447 def : Pat<(Intr VR128:$src),
3448 (!cast<Instruction>("V"#NAME#Suffix##r_Int) (vt (IMPLICIT_DEF)),
3451 def : Pat<(Intr mem_cpat:$src),
3452 (!cast<Instruction>("V"#NAME#Suffix##m_Int)
3453 (vt (IMPLICIT_DEF)), mem_cpat:$src)>;
3455 let Predicates = [UseAVX, OptForSize] in
3456 def : Pat<(ScalarVT (OpNode (load addr:$src))),
3457 (!cast<Instruction>("V"#NAME#Suffix##m) (ScalarVT (IMPLICIT_DEF)),
3461 /// sse1_fp_unop_p - SSE1 unops in packed form.
3462 multiclass sse1_fp_unop_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
3464 let Predicates = [HasAVX] in {
3465 def V#NAME#PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3466 !strconcat("v", OpcodeStr,
3467 "ps\t{$src, $dst|$dst, $src}"),
3468 [(set VR128:$dst, (v4f32 (OpNode VR128:$src)))],
3469 itins.rr>, VEX, Sched<[itins.Sched]>;
3470 def V#NAME#PSm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3471 !strconcat("v", OpcodeStr,
3472 "ps\t{$src, $dst|$dst, $src}"),
3473 [(set VR128:$dst, (OpNode (loadv4f32 addr:$src)))],
3474 itins.rm>, VEX, Sched<[itins.Sched.Folded]>;
3475 def V#NAME#PSYr : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3476 !strconcat("v", OpcodeStr,
3477 "ps\t{$src, $dst|$dst, $src}"),
3478 [(set VR256:$dst, (v8f32 (OpNode VR256:$src)))],
3479 itins.rr>, VEX, VEX_L, Sched<[itins.Sched]>;
3480 def V#NAME#PSYm : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
3481 !strconcat("v", OpcodeStr,
3482 "ps\t{$src, $dst|$dst, $src}"),
3483 [(set VR256:$dst, (OpNode (loadv8f32 addr:$src)))],
3484 itins.rm>, VEX, VEX_L, Sched<[itins.Sched.Folded]>;
3487 def PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3488 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
3489 [(set VR128:$dst, (v4f32 (OpNode VR128:$src)))], itins.rr>,
3490 Sched<[itins.Sched]>;
3491 def PSm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3492 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
3493 [(set VR128:$dst, (OpNode (memopv4f32 addr:$src)))], itins.rm>,
3494 Sched<[itins.Sched.Folded]>;
3497 /// sse2_fp_unop_p - SSE2 unops in vector forms.
3498 multiclass sse2_fp_unop_p<bits<8> opc, string OpcodeStr,
3499 SDNode OpNode, OpndItins itins> {
3500 let Predicates = [HasAVX] in {
3501 def V#NAME#PDr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3502 !strconcat("v", OpcodeStr,
3503 "pd\t{$src, $dst|$dst, $src}"),
3504 [(set VR128:$dst, (v2f64 (OpNode VR128:$src)))],
3505 itins.rr>, VEX, Sched<[itins.Sched]>;
3506 def V#NAME#PDm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3507 !strconcat("v", OpcodeStr,
3508 "pd\t{$src, $dst|$dst, $src}"),
3509 [(set VR128:$dst, (OpNode (loadv2f64 addr:$src)))],
3510 itins.rm>, VEX, Sched<[itins.Sched.Folded]>;
3511 def V#NAME#PDYr : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3512 !strconcat("v", OpcodeStr,
3513 "pd\t{$src, $dst|$dst, $src}"),
3514 [(set VR256:$dst, (v4f64 (OpNode VR256:$src)))],
3515 itins.rr>, VEX, VEX_L, Sched<[itins.Sched]>;
3516 def V#NAME#PDYm : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
3517 !strconcat("v", OpcodeStr,
3518 "pd\t{$src, $dst|$dst, $src}"),
3519 [(set VR256:$dst, (OpNode (loadv4f64 addr:$src)))],
3520 itins.rm>, VEX, VEX_L, Sched<[itins.Sched.Folded]>;
3523 def PDr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3524 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
3525 [(set VR128:$dst, (v2f64 (OpNode VR128:$src)))], itins.rr>,
3526 Sched<[itins.Sched]>;
3527 def PDm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3528 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
3529 [(set VR128:$dst, (OpNode (memopv2f64 addr:$src)))], itins.rm>,
3530 Sched<[itins.Sched.Folded]>;
3533 multiclass sse1_fp_unop_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
3535 defm SS : sse_fp_unop_s<opc, OpcodeStr##ss, FR32, v4f32, f32, f32mem,
3536 ssmem, sse_load_f32,
3537 !cast<Intrinsic>("int_x86_sse_"##OpcodeStr##_ss), OpNode,
3538 SSEPackedSingle, itins, UseSSE1, "SS">, XS;
3539 defm V#NAME#SS : avx_fp_unop_s<opc, "v"#OpcodeStr##ss, FR32, v4f32, f32,
3540 f32mem, ssmem, sse_load_f32,
3541 !cast<Intrinsic>("int_x86_sse_"##OpcodeStr##_ss), OpNode,
3542 SSEPackedSingle, itins, "SS">, XS, VEX_4V, VEX_LIG;
3545 multiclass sse2_fp_unop_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
3547 defm SD : sse_fp_unop_s<opc, OpcodeStr##sd, FR64, v2f64, f64, f64mem,
3548 sdmem, sse_load_f64,
3549 !cast<Intrinsic>("int_x86_sse2_"##OpcodeStr##_sd),
3550 OpNode, SSEPackedDouble, itins, UseSSE2, "SD">, XD;
3551 defm V#NAME#SD : avx_fp_unop_s<opc, "v"#OpcodeStr##sd, FR64, v2f64, f64,
3552 f64mem, sdmem, sse_load_f64,
3553 !cast<Intrinsic>("int_x86_sse2_"##OpcodeStr##_sd),
3554 OpNode, SSEPackedDouble, itins, "SD">,
3555 XD, VEX_4V, VEX_LIG;
3559 defm SQRT : sse1_fp_unop_s<0x51, "sqrt", fsqrt, SSE_SQRTSS>,
3560 sse1_fp_unop_p<0x51, "sqrt", fsqrt, SSE_SQRTPS>,
3561 sse2_fp_unop_s<0x51, "sqrt", fsqrt, SSE_SQRTSD>,
3562 sse2_fp_unop_p<0x51, "sqrt", fsqrt, SSE_SQRTPD>;
3564 // Reciprocal approximations. Note that these typically require refinement
3565 // in order to obtain suitable precision.
3566 defm RSQRT : sse1_fp_unop_s<0x52, "rsqrt", X86frsqrt, SSE_RSQRTSS>,
3567 sse1_fp_unop_p<0x52, "rsqrt", X86frsqrt, SSE_RSQRTPS>;
3568 defm RCP : sse1_fp_unop_s<0x53, "rcp", X86frcp, SSE_RCPS>,
3569 sse1_fp_unop_p<0x53, "rcp", X86frcp, SSE_RCPP>;
3571 // There is no f64 version of the reciprocal approximation instructions.
3573 // TODO: We should add *scalar* op patterns for these just like we have for
3574 // the binops above. If the binop and unop patterns could all be unified
3575 // that would be even better.
3577 multiclass scalar_unary_math_patterns<Intrinsic Intr, string OpcPrefix,
3578 SDNode Move, ValueType VT,
3579 Predicate BasePredicate> {
3580 let Predicates = [BasePredicate] in {
3581 def : Pat<(VT (Move VT:$dst, (Intr VT:$src))),
3582 (!cast<I>(OpcPrefix#r_Int) VT:$dst, VT:$src)>;
3585 // With SSE 4.1, blendi is preferred to movs*, so match that too.
3586 let Predicates = [UseSSE41] in {
3587 def : Pat<(VT (X86Blendi VT:$dst, (Intr VT:$src), (i8 1))),
3588 (!cast<I>(OpcPrefix#r_Int) VT:$dst, VT:$src)>;
3591 // Repeat for AVX versions of the instructions.
3592 let Predicates = [HasAVX] in {
3593 def : Pat<(VT (Move VT:$dst, (Intr VT:$src))),
3594 (!cast<I>("V"#OpcPrefix#r_Int) VT:$dst, VT:$src)>;
3596 def : Pat<(VT (X86Blendi VT:$dst, (Intr VT:$src), (i8 1))),
3597 (!cast<I>("V"#OpcPrefix#r_Int) VT:$dst, VT:$src)>;
3601 defm : scalar_unary_math_patterns<int_x86_sse_rcp_ss, "RCPSS", X86Movss,
3603 defm : scalar_unary_math_patterns<int_x86_sse_rsqrt_ss, "RSQRTSS", X86Movss,
3605 defm : scalar_unary_math_patterns<int_x86_sse_sqrt_ss, "SQRTSS", X86Movss,
3607 defm : scalar_unary_math_patterns<int_x86_sse2_sqrt_sd, "SQRTSD", X86Movsd,
3611 //===----------------------------------------------------------------------===//
3612 // SSE 1 & 2 - Non-temporal stores
3613 //===----------------------------------------------------------------------===//
3615 let AddedComplexity = 400 in { // Prefer non-temporal versions
3616 let SchedRW = [WriteStore] in {
3617 let Predicates = [HasAVX, NoVLX] in {
3618 def VMOVNTPSmr : VPSI<0x2B, MRMDestMem, (outs),
3619 (ins f128mem:$dst, VR128:$src),
3620 "movntps\t{$src, $dst|$dst, $src}",
3621 [(alignednontemporalstore (v4f32 VR128:$src),
3623 IIC_SSE_MOVNT>, VEX;
3624 def VMOVNTPDmr : VPDI<0x2B, MRMDestMem, (outs),
3625 (ins f128mem:$dst, VR128:$src),
3626 "movntpd\t{$src, $dst|$dst, $src}",
3627 [(alignednontemporalstore (v2f64 VR128:$src),
3629 IIC_SSE_MOVNT>, VEX;
3631 let ExeDomain = SSEPackedInt in
3632 def VMOVNTDQmr : VPDI<0xE7, MRMDestMem, (outs),
3633 (ins f128mem:$dst, VR128:$src),
3634 "movntdq\t{$src, $dst|$dst, $src}",
3635 [(alignednontemporalstore (v2i64 VR128:$src),
3637 IIC_SSE_MOVNT>, VEX;
3639 def VMOVNTPSYmr : VPSI<0x2B, MRMDestMem, (outs),
3640 (ins f256mem:$dst, VR256:$src),
3641 "movntps\t{$src, $dst|$dst, $src}",
3642 [(alignednontemporalstore (v8f32 VR256:$src),
3644 IIC_SSE_MOVNT>, VEX, VEX_L;
3645 def VMOVNTPDYmr : VPDI<0x2B, MRMDestMem, (outs),
3646 (ins f256mem:$dst, VR256:$src),
3647 "movntpd\t{$src, $dst|$dst, $src}",
3648 [(alignednontemporalstore (v4f64 VR256:$src),
3650 IIC_SSE_MOVNT>, VEX, VEX_L;
3651 let ExeDomain = SSEPackedInt in
3652 def VMOVNTDQYmr : VPDI<0xE7, MRMDestMem, (outs),
3653 (ins f256mem:$dst, VR256:$src),
3654 "movntdq\t{$src, $dst|$dst, $src}",
3655 [(alignednontemporalstore (v4i64 VR256:$src),
3657 IIC_SSE_MOVNT>, VEX, VEX_L;
3660 def MOVNTPSmr : PSI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
3661 "movntps\t{$src, $dst|$dst, $src}",
3662 [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)],
3664 def MOVNTPDmr : PDI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
3665 "movntpd\t{$src, $dst|$dst, $src}",
3666 [(alignednontemporalstore(v2f64 VR128:$src), addr:$dst)],
3669 let ExeDomain = SSEPackedInt in
3670 def MOVNTDQmr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
3671 "movntdq\t{$src, $dst|$dst, $src}",
3672 [(alignednontemporalstore (v2i64 VR128:$src), addr:$dst)],
3675 // There is no AVX form for instructions below this point
3676 def MOVNTImr : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
3677 "movnti{l}\t{$src, $dst|$dst, $src}",
3678 [(nontemporalstore (i32 GR32:$src), addr:$dst)],
3680 PS, Requires<[HasSSE2]>;
3681 def MOVNTI_64mr : RI<0xC3, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
3682 "movnti{q}\t{$src, $dst|$dst, $src}",
3683 [(nontemporalstore (i64 GR64:$src), addr:$dst)],
3685 PS, Requires<[HasSSE2]>;
3686 } // SchedRW = [WriteStore]
3688 let Predicates = [HasAVX2, NoVLX] in {
3689 def : Pat<(alignednontemporalstore (v8i32 VR256:$src), addr:$dst),
3690 (VMOVNTDQYmr addr:$dst, VR256:$src)>;
3691 def : Pat<(alignednontemporalstore (v16i16 VR256:$src), addr:$dst),
3692 (VMOVNTDQYmr addr:$dst, VR256:$src)>;
3693 def : Pat<(alignednontemporalstore (v32i8 VR256:$src), addr:$dst),
3694 (VMOVNTDQYmr addr:$dst, VR256:$src)>;
3697 let Predicates = [HasAVX, NoVLX] in {
3698 def : Pat<(alignednontemporalstore (v4i32 VR128:$src), addr:$dst),
3699 (VMOVNTDQmr addr:$dst, VR128:$src)>;
3700 def : Pat<(alignednontemporalstore (v8i16 VR128:$src), addr:$dst),
3701 (VMOVNTDQmr addr:$dst, VR128:$src)>;
3702 def : Pat<(alignednontemporalstore (v16i8 VR128:$src), addr:$dst),
3703 (VMOVNTDQmr addr:$dst, VR128:$src)>;
3706 def : Pat<(alignednontemporalstore (v4i32 VR128:$src), addr:$dst),
3707 (MOVNTDQmr addr:$dst, VR128:$src)>;
3708 def : Pat<(alignednontemporalstore (v8i16 VR128:$src), addr:$dst),
3709 (MOVNTDQmr addr:$dst, VR128:$src)>;
3710 def : Pat<(alignednontemporalstore (v16i8 VR128:$src), addr:$dst),
3711 (MOVNTDQmr addr:$dst, VR128:$src)>;
3713 } // AddedComplexity
3715 //===----------------------------------------------------------------------===//
3716 // SSE 1 & 2 - Prefetch and memory fence
3717 //===----------------------------------------------------------------------===//
3719 // Prefetch intrinsic.
3720 let Predicates = [HasSSE1], SchedRW = [WriteLoad] in {
3721 def PREFETCHT0 : I<0x18, MRM1m, (outs), (ins i8mem:$src),
3722 "prefetcht0\t$src", [(prefetch addr:$src, imm, (i32 3), (i32 1))],
3723 IIC_SSE_PREFETCH>, TB;
3724 def PREFETCHT1 : I<0x18, MRM2m, (outs), (ins i8mem:$src),
3725 "prefetcht1\t$src", [(prefetch addr:$src, imm, (i32 2), (i32 1))],
3726 IIC_SSE_PREFETCH>, TB;
3727 def PREFETCHT2 : I<0x18, MRM3m, (outs), (ins i8mem:$src),
3728 "prefetcht2\t$src", [(prefetch addr:$src, imm, (i32 1), (i32 1))],
3729 IIC_SSE_PREFETCH>, TB;
3730 def PREFETCHNTA : I<0x18, MRM0m, (outs), (ins i8mem:$src),
3731 "prefetchnta\t$src", [(prefetch addr:$src, imm, (i32 0), (i32 1))],
3732 IIC_SSE_PREFETCH>, TB;
3735 // FIXME: How should flush instruction be modeled?
3736 let SchedRW = [WriteLoad] in {
3738 def CLFLUSH : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
3739 "clflush\t$src", [(int_x86_sse2_clflush addr:$src)],
3740 IIC_SSE_PREFETCH>, PS, Requires<[HasSSE2]>;
3743 let SchedRW = [WriteNop] in {
3744 // Pause. This "instruction" is encoded as "rep; nop", so even though it
3745 // was introduced with SSE2, it's backward compatible.
3746 def PAUSE : I<0x90, RawFrm, (outs), (ins),
3747 "pause", [(int_x86_sse2_pause)], IIC_SSE_PAUSE>,
3748 OBXS, Requires<[HasSSE2]>;
3751 let SchedRW = [WriteFence] in {
3752 // Load, store, and memory fence
3753 def SFENCE : I<0xAE, MRM_F8, (outs), (ins),
3754 "sfence", [(int_x86_sse_sfence)], IIC_SSE_SFENCE>,
3755 PS, Requires<[HasSSE1]>;
3756 def LFENCE : I<0xAE, MRM_E8, (outs), (ins),
3757 "lfence", [(int_x86_sse2_lfence)], IIC_SSE_LFENCE>,
3758 TB, Requires<[HasSSE2]>;
3759 def MFENCE : I<0xAE, MRM_F0, (outs), (ins),
3760 "mfence", [(int_x86_sse2_mfence)], IIC_SSE_MFENCE>,
3761 TB, Requires<[HasSSE2]>;
3764 def : Pat<(X86SFence), (SFENCE)>;
3765 def : Pat<(X86LFence), (LFENCE)>;
3766 def : Pat<(X86MFence), (MFENCE)>;
3768 //===----------------------------------------------------------------------===//
3769 // SSE 1 & 2 - Load/Store XCSR register
3770 //===----------------------------------------------------------------------===//
3772 def VLDMXCSR : VPSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
3773 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)],
3774 IIC_SSE_LDMXCSR>, VEX, Sched<[WriteLoad]>;
3775 def VSTMXCSR : VPSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
3776 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)],
3777 IIC_SSE_STMXCSR>, VEX, Sched<[WriteStore]>;
3779 let Predicates = [UseSSE1] in {
3780 def LDMXCSR : I<0xAE, MRM2m, (outs), (ins i32mem:$src),
3781 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)],
3782 IIC_SSE_LDMXCSR>, TB, Sched<[WriteLoad]>;
3783 def STMXCSR : I<0xAE, MRM3m, (outs), (ins i32mem:$dst),
3784 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)],
3785 IIC_SSE_STMXCSR>, TB, Sched<[WriteStore]>;
3788 //===---------------------------------------------------------------------===//
3789 // SSE2 - Move Aligned/Unaligned Packed Integer Instructions
3790 //===---------------------------------------------------------------------===//
3792 let ExeDomain = SSEPackedInt in { // SSE integer instructions
3794 let hasSideEffects = 0, SchedRW = [WriteMove] in {
3795 def VMOVDQArr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3796 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RR>,
3798 def VMOVDQAYrr : VPDI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3799 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RR>,
3801 def VMOVDQUrr : VSSI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3802 "movdqu\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVU_P_RR>,
3804 def VMOVDQUYrr : VSSI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3805 "movdqu\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVU_P_RR>,
3810 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
3811 SchedRW = [WriteMove] in {
3812 def VMOVDQArr_REV : VPDI<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
3813 "movdqa\t{$src, $dst|$dst, $src}", [],
3816 def VMOVDQAYrr_REV : VPDI<0x7F, MRMDestReg, (outs VR256:$dst), (ins VR256:$src),
3817 "movdqa\t{$src, $dst|$dst, $src}", [],
3818 IIC_SSE_MOVA_P_RR>, VEX, VEX_L;
3819 def VMOVDQUrr_REV : VSSI<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
3820 "movdqu\t{$src, $dst|$dst, $src}", [],
3823 def VMOVDQUYrr_REV : VSSI<0x7F, MRMDestReg, (outs VR256:$dst), (ins VR256:$src),
3824 "movdqu\t{$src, $dst|$dst, $src}", [],
3825 IIC_SSE_MOVU_P_RR>, VEX, VEX_L;
3828 let canFoldAsLoad = 1, mayLoad = 1, isReMaterializable = 1,
3829 hasSideEffects = 0, SchedRW = [WriteLoad] in {
3830 def VMOVDQArm : VPDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3831 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RM>,
3833 def VMOVDQAYrm : VPDI<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
3834 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RM>,
3836 let Predicates = [HasAVX] in {
3837 def VMOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3838 "vmovdqu\t{$src, $dst|$dst, $src}",[], IIC_SSE_MOVU_P_RM>,
3840 def VMOVDQUYrm : I<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
3841 "vmovdqu\t{$src, $dst|$dst, $src}",[], IIC_SSE_MOVU_P_RM>,
3846 let mayStore = 1, hasSideEffects = 0, SchedRW = [WriteStore] in {
3847 def VMOVDQAmr : VPDI<0x7F, MRMDestMem, (outs),
3848 (ins i128mem:$dst, VR128:$src),
3849 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_MR>,
3851 def VMOVDQAYmr : VPDI<0x7F, MRMDestMem, (outs),
3852 (ins i256mem:$dst, VR256:$src),
3853 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_MR>,
3855 let Predicates = [HasAVX] in {
3856 def VMOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
3857 "vmovdqu\t{$src, $dst|$dst, $src}",[], IIC_SSE_MOVU_P_MR>,
3859 def VMOVDQUYmr : I<0x7F, MRMDestMem, (outs), (ins i256mem:$dst, VR256:$src),
3860 "vmovdqu\t{$src, $dst|$dst, $src}",[], IIC_SSE_MOVU_P_MR>,
3865 let SchedRW = [WriteMove] in {
3866 let hasSideEffects = 0 in
3867 def MOVDQArr : PDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3868 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RR>;
3870 def MOVDQUrr : I<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3871 "movdqu\t{$src, $dst|$dst, $src}",
3872 [], IIC_SSE_MOVU_P_RR>, XS, Requires<[UseSSE2]>;
3875 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in {
3876 def MOVDQArr_REV : PDI<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
3877 "movdqa\t{$src, $dst|$dst, $src}", [],
3880 def MOVDQUrr_REV : I<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
3881 "movdqu\t{$src, $dst|$dst, $src}",
3882 [], IIC_SSE_MOVU_P_RR>, XS, Requires<[UseSSE2]>;
3886 let canFoldAsLoad = 1, mayLoad = 1, isReMaterializable = 1,
3887 hasSideEffects = 0, SchedRW = [WriteLoad] in {
3888 def MOVDQArm : PDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3889 "movdqa\t{$src, $dst|$dst, $src}",
3890 [/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/],
3892 def MOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3893 "movdqu\t{$src, $dst|$dst, $src}",
3894 [/*(set VR128:$dst, (loadv2i64 addr:$src))*/],
3896 XS, Requires<[UseSSE2]>;
3899 let mayStore = 1, hasSideEffects = 0, SchedRW = [WriteStore] in {
3900 def MOVDQAmr : PDI<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
3901 "movdqa\t{$src, $dst|$dst, $src}",
3902 [/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/],
3904 def MOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
3905 "movdqu\t{$src, $dst|$dst, $src}",
3906 [/*(store (v2i64 VR128:$src), addr:$dst)*/],
3908 XS, Requires<[UseSSE2]>;
3911 } // ExeDomain = SSEPackedInt
3913 let Predicates = [HasAVX] in {
3914 def : Pat<(int_x86_sse2_storeu_dq addr:$dst, VR128:$src),
3915 (VMOVDQUmr addr:$dst, VR128:$src)>;
3916 def : Pat<(int_x86_avx_storeu_dq_256 addr:$dst, VR256:$src),
3917 (VMOVDQUYmr addr:$dst, VR256:$src)>;
3919 let Predicates = [UseSSE2] in
3920 def : Pat<(int_x86_sse2_storeu_dq addr:$dst, VR128:$src),
3921 (MOVDQUmr addr:$dst, VR128:$src)>;
3923 //===---------------------------------------------------------------------===//
3924 // SSE2 - Packed Integer Arithmetic Instructions
3925 //===---------------------------------------------------------------------===//
3927 let Sched = WriteVecIMul in
3928 def SSE_PMADD : OpndItins<
3929 IIC_SSE_PMADD, IIC_SSE_PMADD
3932 let ExeDomain = SSEPackedInt in { // SSE integer instructions
3934 multiclass PDI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId,
3935 RegisterClass RC, PatFrag memop_frag,
3936 X86MemOperand x86memop,
3938 bit IsCommutable = 0,
3940 let isCommutable = IsCommutable in
3941 def rr : PDI<opc, MRMSrcReg, (outs RC:$dst),
3942 (ins RC:$src1, RC:$src2),
3944 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3945 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3946 [(set RC:$dst, (IntId RC:$src1, RC:$src2))], itins.rr>,
3947 Sched<[itins.Sched]>;
3948 def rm : PDI<opc, MRMSrcMem, (outs RC:$dst),
3949 (ins RC:$src1, x86memop:$src2),
3951 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3952 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3953 [(set RC:$dst, (IntId RC:$src1, (bitconvert (memop_frag addr:$src2))))],
3954 itins.rm>, Sched<[itins.Sched.Folded, ReadAfterLd]>;
3957 multiclass PDI_binop_all_int<bits<8> opc, string OpcodeStr, Intrinsic IntId128,
3958 Intrinsic IntId256, OpndItins itins,
3959 bit IsCommutable = 0> {
3960 let Predicates = [HasAVX] in
3961 defm V#NAME : PDI_binop_rm_int<opc, !strconcat("v", OpcodeStr), IntId128,
3962 VR128, loadv2i64, i128mem, itins,
3963 IsCommutable, 0>, VEX_4V;
3965 let Constraints = "$src1 = $dst" in
3966 defm NAME : PDI_binop_rm_int<opc, OpcodeStr, IntId128, VR128, memopv2i64,
3967 i128mem, itins, IsCommutable, 1>;
3969 let Predicates = [HasAVX2] in
3970 defm V#NAME#Y : PDI_binop_rm_int<opc, !strconcat("v", OpcodeStr), IntId256,
3971 VR256, loadv4i64, i256mem, itins,
3972 IsCommutable, 0>, VEX_4V, VEX_L;
3975 multiclass PDI_binop_rmi<bits<8> opc, bits<8> opc2, Format ImmForm,
3976 string OpcodeStr, SDNode OpNode,
3977 SDNode OpNode2, RegisterClass RC,
3978 ValueType DstVT, ValueType SrcVT, PatFrag bc_frag,
3979 PatFrag ld_frag, ShiftOpndItins itins,
3981 // src2 is always 128-bit
3982 def rr : PDI<opc, MRMSrcReg, (outs RC:$dst),
3983 (ins RC:$src1, VR128:$src2),
3985 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3986 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3987 [(set RC:$dst, (DstVT (OpNode RC:$src1, (SrcVT VR128:$src2))))],
3988 itins.rr>, Sched<[WriteVecShift]>;
3989 def rm : PDI<opc, MRMSrcMem, (outs RC:$dst),
3990 (ins RC:$src1, i128mem:$src2),
3992 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3993 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3994 [(set RC:$dst, (DstVT (OpNode RC:$src1,
3995 (bc_frag (ld_frag addr:$src2)))))], itins.rm>,
3996 Sched<[WriteVecShiftLd, ReadAfterLd]>;
3997 def ri : PDIi8<opc2, ImmForm, (outs RC:$dst),
3998 (ins RC:$src1, u8imm:$src2),
4000 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4001 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4002 [(set RC:$dst, (DstVT (OpNode2 RC:$src1, (i8 imm:$src2))))], itins.ri>,
4003 Sched<[WriteVecShift]>;
4006 /// PDI_binop_rm2 - Simple SSE2 binary operator with different src and dst types
4007 multiclass PDI_binop_rm2<bits<8> opc, string OpcodeStr, SDNode OpNode,
4008 ValueType DstVT, ValueType SrcVT, RegisterClass RC,
4009 PatFrag memop_frag, X86MemOperand x86memop,
4011 bit IsCommutable = 0, bit Is2Addr = 1> {
4012 let isCommutable = IsCommutable in
4013 def rr : PDI<opc, MRMSrcReg, (outs RC:$dst),
4014 (ins RC:$src1, RC:$src2),
4016 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4017 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4018 [(set RC:$dst, (DstVT (OpNode (SrcVT RC:$src1), RC:$src2)))]>,
4019 Sched<[itins.Sched]>;
4020 def rm : PDI<opc, MRMSrcMem, (outs RC:$dst),
4021 (ins RC:$src1, x86memop:$src2),
4023 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4024 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4025 [(set RC:$dst, (DstVT (OpNode (SrcVT RC:$src1),
4026 (bitconvert (memop_frag addr:$src2)))))]>,
4027 Sched<[itins.Sched.Folded, ReadAfterLd]>;
4029 } // ExeDomain = SSEPackedInt
4031 defm PADDB : PDI_binop_all<0xFC, "paddb", add, v16i8, v32i8,
4032 SSE_INTALU_ITINS_P, 1, NoVLX_Or_NoBWI>;
4033 defm PADDW : PDI_binop_all<0xFD, "paddw", add, v8i16, v16i16,
4034 SSE_INTALU_ITINS_P, 1, NoVLX_Or_NoBWI>;
4035 defm PADDD : PDI_binop_all<0xFE, "paddd", add, v4i32, v8i32,
4036 SSE_INTALU_ITINS_P, 1, NoVLX>;
4037 defm PADDQ : PDI_binop_all<0xD4, "paddq", add, v2i64, v4i64,
4038 SSE_INTALUQ_ITINS_P, 1, NoVLX>;
4039 defm PMULLW : PDI_binop_all<0xD5, "pmullw", mul, v8i16, v16i16,
4040 SSE_INTMUL_ITINS_P, 1, NoVLX_Or_NoBWI>;
4041 defm PMULHUW : PDI_binop_all<0xE4, "pmulhuw", mulhu, v8i16, v16i16,
4042 SSE_INTMUL_ITINS_P, 1, NoVLX_Or_NoBWI>;
4043 defm PMULHW : PDI_binop_all<0xE5, "pmulhw", mulhs, v8i16, v16i16,
4044 SSE_INTMUL_ITINS_P, 1, NoVLX_Or_NoBWI>;
4045 defm PSUBB : PDI_binop_all<0xF8, "psubb", sub, v16i8, v32i8,
4046 SSE_INTALU_ITINS_P, 0, NoVLX_Or_NoBWI>;
4047 defm PSUBW : PDI_binop_all<0xF9, "psubw", sub, v8i16, v16i16,
4048 SSE_INTALU_ITINS_P, 0, NoVLX_Or_NoBWI>;
4049 defm PSUBD : PDI_binop_all<0xFA, "psubd", sub, v4i32, v8i32,
4050 SSE_INTALU_ITINS_P, 0, NoVLX>;
4051 defm PSUBQ : PDI_binop_all<0xFB, "psubq", sub, v2i64, v4i64,
4052 SSE_INTALUQ_ITINS_P, 0, NoVLX>;
4053 defm PSUBUSB : PDI_binop_all<0xD8, "psubusb", X86subus, v16i8, v32i8,
4054 SSE_INTALU_ITINS_P, 0, NoVLX_Or_NoBWI>;
4055 defm PSUBUSW : PDI_binop_all<0xD9, "psubusw", X86subus, v8i16, v16i16,
4056 SSE_INTALU_ITINS_P, 0, NoVLX_Or_NoBWI>;
4057 defm PMINUB : PDI_binop_all<0xDA, "pminub", umin, v16i8, v32i8,
4058 SSE_INTALU_ITINS_P, 1, NoVLX_Or_NoBWI>;
4059 defm PMINSW : PDI_binop_all<0xEA, "pminsw", smin, v8i16, v16i16,
4060 SSE_INTALU_ITINS_P, 1, NoVLX_Or_NoBWI>;
4061 defm PMAXUB : PDI_binop_all<0xDE, "pmaxub", umax, v16i8, v32i8,
4062 SSE_INTALU_ITINS_P, 1, NoVLX_Or_NoBWI>;
4063 defm PMAXSW : PDI_binop_all<0xEE, "pmaxsw", smax, v8i16, v16i16,
4064 SSE_INTALU_ITINS_P, 1, NoVLX_Or_NoBWI>;
4067 defm PSUBSB : PDI_binop_all_int<0xE8, "psubsb", int_x86_sse2_psubs_b,
4068 int_x86_avx2_psubs_b, SSE_INTALU_ITINS_P, 0>;
4069 defm PSUBSW : PDI_binop_all_int<0xE9, "psubsw" , int_x86_sse2_psubs_w,
4070 int_x86_avx2_psubs_w, SSE_INTALU_ITINS_P, 0>;
4071 defm PADDSB : PDI_binop_all_int<0xEC, "paddsb" , int_x86_sse2_padds_b,
4072 int_x86_avx2_padds_b, SSE_INTALU_ITINS_P, 1>;
4073 defm PADDSW : PDI_binop_all_int<0xED, "paddsw" , int_x86_sse2_padds_w,
4074 int_x86_avx2_padds_w, SSE_INTALU_ITINS_P, 1>;
4075 defm PADDUSB : PDI_binop_all_int<0xDC, "paddusb", int_x86_sse2_paddus_b,
4076 int_x86_avx2_paddus_b, SSE_INTALU_ITINS_P, 1>;
4077 defm PADDUSW : PDI_binop_all_int<0xDD, "paddusw", int_x86_sse2_paddus_w,
4078 int_x86_avx2_paddus_w, SSE_INTALU_ITINS_P, 1>;
4079 defm PMADDWD : PDI_binop_all_int<0xF5, "pmaddwd", int_x86_sse2_pmadd_wd,
4080 int_x86_avx2_pmadd_wd, SSE_PMADD, 1>;
4081 defm PAVGB : PDI_binop_all_int<0xE0, "pavgb", int_x86_sse2_pavg_b,
4082 int_x86_avx2_pavg_b, SSE_INTALU_ITINS_P, 1>;
4083 defm PAVGW : PDI_binop_all_int<0xE3, "pavgw", int_x86_sse2_pavg_w,
4084 int_x86_avx2_pavg_w, SSE_INTALU_ITINS_P, 1>;
4085 defm PSADBW : PDI_binop_all_int<0xF6, "psadbw", int_x86_sse2_psad_bw,
4086 int_x86_avx2_psad_bw, SSE_PMADD, 1>;
4088 let Predicates = [HasAVX2] in
4089 def : Pat<(v32i8 (X86psadbw (v32i8 VR256:$src1),
4090 (v32i8 VR256:$src2))),
4091 (VPSADBWYrr VR256:$src2, VR256:$src1)>;
4093 let Predicates = [HasAVX] in
4094 def : Pat<(v16i8 (X86psadbw (v16i8 VR128:$src1),
4095 (v16i8 VR128:$src2))),
4096 (VPSADBWrr VR128:$src2, VR128:$src1)>;
4098 def : Pat<(v16i8 (X86psadbw (v16i8 VR128:$src1),
4099 (v16i8 VR128:$src2))),
4100 (PSADBWrr VR128:$src2, VR128:$src1)>;
4102 let Predicates = [HasAVX] in
4103 defm VPMULUDQ : PDI_binop_rm2<0xF4, "vpmuludq", X86pmuludq, v2i64, v4i32, VR128,
4104 loadv2i64, i128mem, SSE_INTMUL_ITINS_P, 1, 0>,
4106 let Predicates = [HasAVX2] in
4107 defm VPMULUDQY : PDI_binop_rm2<0xF4, "vpmuludq", X86pmuludq, v4i64, v8i32,
4108 VR256, loadv4i64, i256mem,
4109 SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V, VEX_L;
4110 let Constraints = "$src1 = $dst" in
4111 defm PMULUDQ : PDI_binop_rm2<0xF4, "pmuludq", X86pmuludq, v2i64, v4i32, VR128,
4112 memopv2i64, i128mem, SSE_INTMUL_ITINS_P, 1>;
4114 //===---------------------------------------------------------------------===//
4115 // SSE2 - Packed Integer Logical Instructions
4116 //===---------------------------------------------------------------------===//
4118 let Predicates = [HasAVX, NoVLX] in {
4119 defm VPSLLW : PDI_binop_rmi<0xF1, 0x71, MRM6r, "vpsllw", X86vshl, X86vshli,
4120 VR128, v8i16, v8i16, bc_v8i16, loadv2i64,
4121 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
4122 defm VPSLLD : PDI_binop_rmi<0xF2, 0x72, MRM6r, "vpslld", X86vshl, X86vshli,
4123 VR128, v4i32, v4i32, bc_v4i32, loadv2i64,
4124 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
4125 defm VPSLLQ : PDI_binop_rmi<0xF3, 0x73, MRM6r, "vpsllq", X86vshl, X86vshli,
4126 VR128, v2i64, v2i64, bc_v2i64, loadv2i64,
4127 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
4129 defm VPSRLW : PDI_binop_rmi<0xD1, 0x71, MRM2r, "vpsrlw", X86vsrl, X86vsrli,
4130 VR128, v8i16, v8i16, bc_v8i16, loadv2i64,
4131 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
4132 defm VPSRLD : PDI_binop_rmi<0xD2, 0x72, MRM2r, "vpsrld", X86vsrl, X86vsrli,
4133 VR128, v4i32, v4i32, bc_v4i32, loadv2i64,
4134 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
4135 defm VPSRLQ : PDI_binop_rmi<0xD3, 0x73, MRM2r, "vpsrlq", X86vsrl, X86vsrli,
4136 VR128, v2i64, v2i64, bc_v2i64, loadv2i64,
4137 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
4139 defm VPSRAW : PDI_binop_rmi<0xE1, 0x71, MRM4r, "vpsraw", X86vsra, X86vsrai,
4140 VR128, v8i16, v8i16, bc_v8i16, loadv2i64,
4141 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
4142 defm VPSRAD : PDI_binop_rmi<0xE2, 0x72, MRM4r, "vpsrad", X86vsra, X86vsrai,
4143 VR128, v4i32, v4i32, bc_v4i32, loadv2i64,
4144 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
4145 } // Predicates = [HasAVX]
4147 let ExeDomain = SSEPackedInt, SchedRW = [WriteVecShift] ,
4148 Predicates = [HasAVX, NoVLX_Or_NoBWI]in {
4149 // 128-bit logical shifts.
4150 def VPSLLDQri : PDIi8<0x73, MRM7r,
4151 (outs VR128:$dst), (ins VR128:$src1, u8imm:$src2),
4152 "vpslldq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4154 (v2i64 (X86vshldq VR128:$src1, (i8 imm:$src2))))]>,
4156 def VPSRLDQri : PDIi8<0x73, MRM3r,
4157 (outs VR128:$dst), (ins VR128:$src1, u8imm:$src2),
4158 "vpsrldq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4160 (v2i64 (X86vshrdq VR128:$src1, (i8 imm:$src2))))]>,
4162 // PSRADQri doesn't exist in SSE[1-3].
4163 } // Predicates = [HasAVX, NoVLX_Or_NoBWI]
4165 let Predicates = [HasAVX2, NoVLX] in {
4166 defm VPSLLWY : PDI_binop_rmi<0xF1, 0x71, MRM6r, "vpsllw", X86vshl, X86vshli,
4167 VR256, v16i16, v8i16, bc_v8i16, loadv2i64,
4168 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
4169 defm VPSLLDY : PDI_binop_rmi<0xF2, 0x72, MRM6r, "vpslld", X86vshl, X86vshli,
4170 VR256, v8i32, v4i32, bc_v4i32, loadv2i64,
4171 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
4172 defm VPSLLQY : PDI_binop_rmi<0xF3, 0x73, MRM6r, "vpsllq", X86vshl, X86vshli,
4173 VR256, v4i64, v2i64, bc_v2i64, loadv2i64,
4174 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
4176 defm VPSRLWY : PDI_binop_rmi<0xD1, 0x71, MRM2r, "vpsrlw", X86vsrl, X86vsrli,
4177 VR256, v16i16, v8i16, bc_v8i16, loadv2i64,
4178 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
4179 defm VPSRLDY : PDI_binop_rmi<0xD2, 0x72, MRM2r, "vpsrld", X86vsrl, X86vsrli,
4180 VR256, v8i32, v4i32, bc_v4i32, loadv2i64,
4181 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
4182 defm VPSRLQY : PDI_binop_rmi<0xD3, 0x73, MRM2r, "vpsrlq", X86vsrl, X86vsrli,
4183 VR256, v4i64, v2i64, bc_v2i64, loadv2i64,
4184 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
4186 defm VPSRAWY : PDI_binop_rmi<0xE1, 0x71, MRM4r, "vpsraw", X86vsra, X86vsrai,
4187 VR256, v16i16, v8i16, bc_v8i16, loadv2i64,
4188 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
4189 defm VPSRADY : PDI_binop_rmi<0xE2, 0x72, MRM4r, "vpsrad", X86vsra, X86vsrai,
4190 VR256, v8i32, v4i32, bc_v4i32, loadv2i64,
4191 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
4192 }// Predicates = [HasAVX2]
4194 let ExeDomain = SSEPackedInt, SchedRW = [WriteVecShift], hasSideEffects = 0 ,
4195 Predicates = [HasAVX2, NoVLX_Or_NoBWI] in {
4196 // 256-bit logical shifts.
4197 def VPSLLDQYri : PDIi8<0x73, MRM7r,
4198 (outs VR256:$dst), (ins VR256:$src1, u8imm:$src2),
4199 "vpslldq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4201 (v4i64 (X86vshldq VR256:$src1, (i8 imm:$src2))))]>,
4203 def VPSRLDQYri : PDIi8<0x73, MRM3r,
4204 (outs VR256:$dst), (ins VR256:$src1, u8imm:$src2),
4205 "vpsrldq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4207 (v4i64 (X86vshrdq VR256:$src1, (i8 imm:$src2))))]>,
4209 // PSRADQYri doesn't exist in SSE[1-3].
4210 } // Predicates = [HasAVX2, NoVLX_Or_NoBWI]
4212 let Constraints = "$src1 = $dst" in {
4213 defm PSLLW : PDI_binop_rmi<0xF1, 0x71, MRM6r, "psllw", X86vshl, X86vshli,
4214 VR128, v8i16, v8i16, bc_v8i16, memopv2i64,
4215 SSE_INTSHIFT_ITINS_P>;
4216 defm PSLLD : PDI_binop_rmi<0xF2, 0x72, MRM6r, "pslld", X86vshl, X86vshli,
4217 VR128, v4i32, v4i32, bc_v4i32, memopv2i64,
4218 SSE_INTSHIFT_ITINS_P>;
4219 defm PSLLQ : PDI_binop_rmi<0xF3, 0x73, MRM6r, "psllq", X86vshl, X86vshli,
4220 VR128, v2i64, v2i64, bc_v2i64, memopv2i64,
4221 SSE_INTSHIFT_ITINS_P>;
4223 defm PSRLW : PDI_binop_rmi<0xD1, 0x71, MRM2r, "psrlw", X86vsrl, X86vsrli,
4224 VR128, v8i16, v8i16, bc_v8i16, memopv2i64,
4225 SSE_INTSHIFT_ITINS_P>;
4226 defm PSRLD : PDI_binop_rmi<0xD2, 0x72, MRM2r, "psrld", X86vsrl, X86vsrli,
4227 VR128, v4i32, v4i32, bc_v4i32, memopv2i64,
4228 SSE_INTSHIFT_ITINS_P>;
4229 defm PSRLQ : PDI_binop_rmi<0xD3, 0x73, MRM2r, "psrlq", X86vsrl, X86vsrli,
4230 VR128, v2i64, v2i64, bc_v2i64, memopv2i64,
4231 SSE_INTSHIFT_ITINS_P>;
4233 defm PSRAW : PDI_binop_rmi<0xE1, 0x71, MRM4r, "psraw", X86vsra, X86vsrai,
4234 VR128, v8i16, v8i16, bc_v8i16, memopv2i64,
4235 SSE_INTSHIFT_ITINS_P>;
4236 defm PSRAD : PDI_binop_rmi<0xE2, 0x72, MRM4r, "psrad", X86vsra, X86vsrai,
4237 VR128, v4i32, v4i32, bc_v4i32, memopv2i64,
4238 SSE_INTSHIFT_ITINS_P>;
4240 let ExeDomain = SSEPackedInt, SchedRW = [WriteVecShift], hasSideEffects = 0 in {
4241 // 128-bit logical shifts.
4242 def PSLLDQri : PDIi8<0x73, MRM7r,
4243 (outs VR128:$dst), (ins VR128:$src1, u8imm:$src2),
4244 "pslldq\t{$src2, $dst|$dst, $src2}",
4246 (v2i64 (X86vshldq VR128:$src1, (i8 imm:$src2))))],
4247 IIC_SSE_INTSHDQ_P_RI>;
4248 def PSRLDQri : PDIi8<0x73, MRM3r,
4249 (outs VR128:$dst), (ins VR128:$src1, u8imm:$src2),
4250 "psrldq\t{$src2, $dst|$dst, $src2}",
4252 (v2i64 (X86vshrdq VR128:$src1, (i8 imm:$src2))))],
4253 IIC_SSE_INTSHDQ_P_RI>;
4254 // PSRADQri doesn't exist in SSE[1-3].
4256 } // Constraints = "$src1 = $dst"
4258 //===---------------------------------------------------------------------===//
4259 // SSE2 - Packed Integer Comparison Instructions
4260 //===---------------------------------------------------------------------===//
4262 defm PCMPEQB : PDI_binop_all<0x74, "pcmpeqb", X86pcmpeq, v16i8, v32i8,
4263 SSE_INTALU_ITINS_P, 1, NoVLX_Or_NoBWI>;
4264 defm PCMPEQW : PDI_binop_all<0x75, "pcmpeqw", X86pcmpeq, v8i16, v16i16,
4265 SSE_INTALU_ITINS_P, 1, NoVLX_Or_NoBWI>;
4266 defm PCMPEQD : PDI_binop_all<0x76, "pcmpeqd", X86pcmpeq, v4i32, v8i32,
4267 SSE_INTALU_ITINS_P, 1, NoVLX>;
4268 defm PCMPGTB : PDI_binop_all<0x64, "pcmpgtb", X86pcmpgt, v16i8, v32i8,
4269 SSE_INTALU_ITINS_P, 0, NoVLX_Or_NoBWI>;
4270 defm PCMPGTW : PDI_binop_all<0x65, "pcmpgtw", X86pcmpgt, v8i16, v16i16,
4271 SSE_INTALU_ITINS_P, 0, NoVLX_Or_NoBWI>;
4272 defm PCMPGTD : PDI_binop_all<0x66, "pcmpgtd", X86pcmpgt, v4i32, v8i32,
4273 SSE_INTALU_ITINS_P, 0, NoVLX>;
4275 //===---------------------------------------------------------------------===//
4276 // SSE2 - Packed Integer Shuffle Instructions
4277 //===---------------------------------------------------------------------===//
4279 let ExeDomain = SSEPackedInt in {
4280 multiclass sse2_pshuffle<string OpcodeStr, ValueType vt128, ValueType vt256,
4282 let Predicates = [HasAVX] in {
4283 def V#NAME#ri : Ii8<0x70, MRMSrcReg, (outs VR128:$dst),
4284 (ins VR128:$src1, u8imm:$src2),
4285 !strconcat("v", OpcodeStr,
4286 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4288 (vt128 (OpNode VR128:$src1, (i8 imm:$src2))))],
4289 IIC_SSE_PSHUF_RI>, VEX, Sched<[WriteShuffle]>;
4290 def V#NAME#mi : Ii8<0x70, MRMSrcMem, (outs VR128:$dst),
4291 (ins i128mem:$src1, u8imm:$src2),
4292 !strconcat("v", OpcodeStr,
4293 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4295 (vt128 (OpNode (bitconvert (loadv2i64 addr:$src1)),
4296 (i8 imm:$src2))))], IIC_SSE_PSHUF_MI>, VEX,
4297 Sched<[WriteShuffleLd]>;
4300 let Predicates = [HasAVX2] in {
4301 def V#NAME#Yri : Ii8<0x70, MRMSrcReg, (outs VR256:$dst),
4302 (ins VR256:$src1, u8imm:$src2),
4303 !strconcat("v", OpcodeStr,
4304 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4306 (vt256 (OpNode VR256:$src1, (i8 imm:$src2))))],
4307 IIC_SSE_PSHUF_RI>, VEX, VEX_L, Sched<[WriteShuffle]>;
4308 def V#NAME#Ymi : Ii8<0x70, MRMSrcMem, (outs VR256:$dst),
4309 (ins i256mem:$src1, u8imm:$src2),
4310 !strconcat("v", OpcodeStr,
4311 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4313 (vt256 (OpNode (bitconvert (loadv4i64 addr:$src1)),
4314 (i8 imm:$src2))))], IIC_SSE_PSHUF_MI>, VEX, VEX_L,
4315 Sched<[WriteShuffleLd]>;
4318 let Predicates = [UseSSE2] in {
4319 def ri : Ii8<0x70, MRMSrcReg,
4320 (outs VR128:$dst), (ins VR128:$src1, u8imm:$src2),
4321 !strconcat(OpcodeStr,
4322 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4324 (vt128 (OpNode VR128:$src1, (i8 imm:$src2))))],
4325 IIC_SSE_PSHUF_RI>, Sched<[WriteShuffle]>;
4326 def mi : Ii8<0x70, MRMSrcMem,
4327 (outs VR128:$dst), (ins i128mem:$src1, u8imm:$src2),
4328 !strconcat(OpcodeStr,
4329 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4331 (vt128 (OpNode (bitconvert (memopv2i64 addr:$src1)),
4332 (i8 imm:$src2))))], IIC_SSE_PSHUF_MI>,
4333 Sched<[WriteShuffleLd, ReadAfterLd]>;
4336 } // ExeDomain = SSEPackedInt
4338 defm PSHUFD : sse2_pshuffle<"pshufd", v4i32, v8i32, X86PShufd>, PD;
4339 defm PSHUFHW : sse2_pshuffle<"pshufhw", v8i16, v16i16, X86PShufhw>, XS;
4340 defm PSHUFLW : sse2_pshuffle<"pshuflw", v8i16, v16i16, X86PShuflw>, XD;
4342 let Predicates = [HasAVX] in {
4343 def : Pat<(v4f32 (X86PShufd (loadv4f32 addr:$src1), (i8 imm:$imm))),
4344 (VPSHUFDmi addr:$src1, imm:$imm)>;
4345 def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
4346 (VPSHUFDri VR128:$src1, imm:$imm)>;
4349 let Predicates = [UseSSE2] in {
4350 def : Pat<(v4f32 (X86PShufd (memopv4f32 addr:$src1), (i8 imm:$imm))),
4351 (PSHUFDmi addr:$src1, imm:$imm)>;
4352 def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
4353 (PSHUFDri VR128:$src1, imm:$imm)>;
4356 //===---------------------------------------------------------------------===//
4357 // Packed Integer Pack Instructions (SSE & AVX)
4358 //===---------------------------------------------------------------------===//
4360 let ExeDomain = SSEPackedInt in {
4361 multiclass sse2_pack<bits<8> opc, string OpcodeStr, ValueType OutVT,
4362 ValueType ArgVT, SDNode OpNode, PatFrag bc_frag,
4363 PatFrag ld_frag, bit Is2Addr = 1> {
4364 def rr : PDI<opc, MRMSrcReg,
4365 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
4367 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4368 !strconcat(OpcodeStr,
4369 "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4371 (OutVT (OpNode (ArgVT VR128:$src1), VR128:$src2)))]>,
4372 Sched<[WriteShuffle]>;
4373 def rm : PDI<opc, MRMSrcMem,
4374 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
4376 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4377 !strconcat(OpcodeStr,
4378 "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4380 (OutVT (OpNode VR128:$src1,
4381 (bc_frag (ld_frag addr:$src2)))))]>,
4382 Sched<[WriteShuffleLd, ReadAfterLd]>;
4385 multiclass sse2_pack_y<bits<8> opc, string OpcodeStr, ValueType OutVT,
4386 ValueType ArgVT, SDNode OpNode, PatFrag bc_frag> {
4387 def Yrr : PDI<opc, MRMSrcReg,
4388 (outs VR256:$dst), (ins VR256:$src1, VR256:$src2),
4389 !strconcat(OpcodeStr,
4390 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4392 (OutVT (OpNode (ArgVT VR256:$src1), VR256:$src2)))]>,
4393 Sched<[WriteShuffle]>;
4394 def Yrm : PDI<opc, MRMSrcMem,
4395 (outs VR256:$dst), (ins VR256:$src1, i256mem:$src2),
4396 !strconcat(OpcodeStr,
4397 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4399 (OutVT (OpNode VR256:$src1,
4400 (bc_frag (loadv4i64 addr:$src2)))))]>,
4401 Sched<[WriteShuffleLd, ReadAfterLd]>;
4404 multiclass sse4_pack<bits<8> opc, string OpcodeStr, ValueType OutVT,
4405 ValueType ArgVT, SDNode OpNode, PatFrag bc_frag,
4406 PatFrag ld_frag, bit Is2Addr = 1> {
4407 def rr : SS48I<opc, MRMSrcReg,
4408 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
4410 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4411 !strconcat(OpcodeStr,
4412 "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4414 (OutVT (OpNode (ArgVT VR128:$src1), VR128:$src2)))]>,
4415 Sched<[WriteShuffle]>;
4416 def rm : SS48I<opc, MRMSrcMem,
4417 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
4419 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4420 !strconcat(OpcodeStr,
4421 "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4423 (OutVT (OpNode VR128:$src1,
4424 (bc_frag (ld_frag addr:$src2)))))]>,
4425 Sched<[WriteShuffleLd, ReadAfterLd]>;
4428 multiclass sse4_pack_y<bits<8> opc, string OpcodeStr, ValueType OutVT,
4429 ValueType ArgVT, SDNode OpNode, PatFrag bc_frag> {
4430 def Yrr : SS48I<opc, MRMSrcReg,
4431 (outs VR256:$dst), (ins VR256:$src1, VR256:$src2),
4432 !strconcat(OpcodeStr,
4433 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4435 (OutVT (OpNode (ArgVT VR256:$src1), VR256:$src2)))]>,
4436 Sched<[WriteShuffle]>;
4437 def Yrm : SS48I<opc, MRMSrcMem,
4438 (outs VR256:$dst), (ins VR256:$src1, i256mem:$src2),
4439 !strconcat(OpcodeStr,
4440 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4442 (OutVT (OpNode VR256:$src1,
4443 (bc_frag (loadv4i64 addr:$src2)))))]>,
4444 Sched<[WriteShuffleLd, ReadAfterLd]>;
4447 let Predicates = [HasAVX] in {
4448 defm VPACKSSWB : sse2_pack<0x63, "vpacksswb", v16i8, v8i16, X86Packss,
4449 bc_v8i16, loadv2i64, 0>, VEX_4V;
4450 defm VPACKSSDW : sse2_pack<0x6B, "vpackssdw", v8i16, v4i32, X86Packss,
4451 bc_v4i32, loadv2i64, 0>, VEX_4V;
4453 defm VPACKUSWB : sse2_pack<0x67, "vpackuswb", v16i8, v8i16, X86Packus,
4454 bc_v8i16, loadv2i64, 0>, VEX_4V;
4455 defm VPACKUSDW : sse4_pack<0x2B, "vpackusdw", v8i16, v4i32, X86Packus,
4456 bc_v4i32, loadv2i64, 0>, VEX_4V;
4459 let Predicates = [HasAVX2] in {
4460 defm VPACKSSWB : sse2_pack_y<0x63, "vpacksswb", v32i8, v16i16, X86Packss,
4461 bc_v16i16>, VEX_4V, VEX_L;
4462 defm VPACKSSDW : sse2_pack_y<0x6B, "vpackssdw", v16i16, v8i32, X86Packss,
4463 bc_v8i32>, VEX_4V, VEX_L;
4465 defm VPACKUSWB : sse2_pack_y<0x67, "vpackuswb", v32i8, v16i16, X86Packus,
4466 bc_v16i16>, VEX_4V, VEX_L;
4467 defm VPACKUSDW : sse4_pack_y<0x2B, "vpackusdw", v16i16, v8i32, X86Packus,
4468 bc_v8i32>, VEX_4V, VEX_L;
4471 let Constraints = "$src1 = $dst" in {
4472 defm PACKSSWB : sse2_pack<0x63, "packsswb", v16i8, v8i16, X86Packss,
4473 bc_v8i16, memopv2i64>;
4474 defm PACKSSDW : sse2_pack<0x6B, "packssdw", v8i16, v4i32, X86Packss,
4475 bc_v4i32, memopv2i64>;
4477 defm PACKUSWB : sse2_pack<0x67, "packuswb", v16i8, v8i16, X86Packus,
4478 bc_v8i16, memopv2i64>;
4480 let Predicates = [HasSSE41] in
4481 defm PACKUSDW : sse4_pack<0x2B, "packusdw", v8i16, v4i32, X86Packus,
4482 bc_v4i32, memopv2i64>;
4484 } // ExeDomain = SSEPackedInt
4486 //===---------------------------------------------------------------------===//
4487 // SSE2 - Packed Integer Unpack Instructions
4488 //===---------------------------------------------------------------------===//
4490 let ExeDomain = SSEPackedInt in {
4491 multiclass sse2_unpack<bits<8> opc, string OpcodeStr, ValueType vt,
4492 SDNode OpNode, PatFrag bc_frag, PatFrag ld_frag,
4494 def rr : PDI<opc, MRMSrcReg,
4495 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
4497 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
4498 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4499 [(set VR128:$dst, (vt (OpNode VR128:$src1, VR128:$src2)))],
4500 IIC_SSE_UNPCK>, Sched<[WriteShuffle]>;
4501 def rm : PDI<opc, MRMSrcMem,
4502 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
4504 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
4505 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4506 [(set VR128:$dst, (OpNode VR128:$src1,
4507 (bc_frag (ld_frag addr:$src2))))],
4509 Sched<[WriteShuffleLd, ReadAfterLd]>;
4512 multiclass sse2_unpack_y<bits<8> opc, string OpcodeStr, ValueType vt,
4513 SDNode OpNode, PatFrag bc_frag> {
4514 def Yrr : PDI<opc, MRMSrcReg,
4515 (outs VR256:$dst), (ins VR256:$src1, VR256:$src2),
4516 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4517 [(set VR256:$dst, (vt (OpNode VR256:$src1, VR256:$src2)))]>,
4518 Sched<[WriteShuffle]>;
4519 def Yrm : PDI<opc, MRMSrcMem,
4520 (outs VR256:$dst), (ins VR256:$src1, i256mem:$src2),
4521 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4522 [(set VR256:$dst, (OpNode VR256:$src1,
4523 (bc_frag (loadv4i64 addr:$src2))))]>,
4524 Sched<[WriteShuffleLd, ReadAfterLd]>;
4528 let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
4529 defm VPUNPCKLBW : sse2_unpack<0x60, "vpunpcklbw", v16i8, X86Unpckl,
4530 bc_v16i8, loadv2i64, 0>, VEX_4V;
4531 defm VPUNPCKLWD : sse2_unpack<0x61, "vpunpcklwd", v8i16, X86Unpckl,
4532 bc_v8i16, loadv2i64, 0>, VEX_4V;
4533 defm VPUNPCKHBW : sse2_unpack<0x68, "vpunpckhbw", v16i8, X86Unpckh,
4534 bc_v16i8, loadv2i64, 0>, VEX_4V;
4535 defm VPUNPCKHWD : sse2_unpack<0x69, "vpunpckhwd", v8i16, X86Unpckh,
4536 bc_v8i16, loadv2i64, 0>, VEX_4V;
4538 let Predicates = [HasAVX, NoVLX] in {
4539 defm VPUNPCKLDQ : sse2_unpack<0x62, "vpunpckldq", v4i32, X86Unpckl,
4540 bc_v4i32, loadv2i64, 0>, VEX_4V;
4541 defm VPUNPCKLQDQ : sse2_unpack<0x6C, "vpunpcklqdq", v2i64, X86Unpckl,
4542 bc_v2i64, loadv2i64, 0>, VEX_4V;
4543 defm VPUNPCKHDQ : sse2_unpack<0x6A, "vpunpckhdq", v4i32, X86Unpckh,
4544 bc_v4i32, loadv2i64, 0>, VEX_4V;
4545 defm VPUNPCKHQDQ : sse2_unpack<0x6D, "vpunpckhqdq", v2i64, X86Unpckh,
4546 bc_v2i64, loadv2i64, 0>, VEX_4V;
4549 let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in {
4550 defm VPUNPCKLBW : sse2_unpack_y<0x60, "vpunpcklbw", v32i8, X86Unpckl,
4551 bc_v32i8>, VEX_4V, VEX_L;
4552 defm VPUNPCKLWD : sse2_unpack_y<0x61, "vpunpcklwd", v16i16, X86Unpckl,
4553 bc_v16i16>, VEX_4V, VEX_L;
4554 defm VPUNPCKHBW : sse2_unpack_y<0x68, "vpunpckhbw", v32i8, X86Unpckh,
4555 bc_v32i8>, VEX_4V, VEX_L;
4556 defm VPUNPCKHWD : sse2_unpack_y<0x69, "vpunpckhwd", v16i16, X86Unpckh,
4557 bc_v16i16>, VEX_4V, VEX_L;
4559 let Predicates = [HasAVX2, NoVLX] in {
4560 defm VPUNPCKLDQ : sse2_unpack_y<0x62, "vpunpckldq", v8i32, X86Unpckl,
4561 bc_v8i32>, VEX_4V, VEX_L;
4562 defm VPUNPCKLQDQ : sse2_unpack_y<0x6C, "vpunpcklqdq", v4i64, X86Unpckl,
4563 bc_v4i64>, VEX_4V, VEX_L;
4564 defm VPUNPCKHDQ : sse2_unpack_y<0x6A, "vpunpckhdq", v8i32, X86Unpckh,
4565 bc_v8i32>, VEX_4V, VEX_L;
4566 defm VPUNPCKHQDQ : sse2_unpack_y<0x6D, "vpunpckhqdq", v4i64, X86Unpckh,
4567 bc_v4i64>, VEX_4V, VEX_L;
4570 let Constraints = "$src1 = $dst" in {
4571 defm PUNPCKLBW : sse2_unpack<0x60, "punpcklbw", v16i8, X86Unpckl,
4572 bc_v16i8, memopv2i64>;
4573 defm PUNPCKLWD : sse2_unpack<0x61, "punpcklwd", v8i16, X86Unpckl,
4574 bc_v8i16, memopv2i64>;
4575 defm PUNPCKLDQ : sse2_unpack<0x62, "punpckldq", v4i32, X86Unpckl,
4576 bc_v4i32, memopv2i64>;
4577 defm PUNPCKLQDQ : sse2_unpack<0x6C, "punpcklqdq", v2i64, X86Unpckl,
4578 bc_v2i64, memopv2i64>;
4580 defm PUNPCKHBW : sse2_unpack<0x68, "punpckhbw", v16i8, X86Unpckh,
4581 bc_v16i8, memopv2i64>;
4582 defm PUNPCKHWD : sse2_unpack<0x69, "punpckhwd", v8i16, X86Unpckh,
4583 bc_v8i16, memopv2i64>;
4584 defm PUNPCKHDQ : sse2_unpack<0x6A, "punpckhdq", v4i32, X86Unpckh,
4585 bc_v4i32, memopv2i64>;
4586 defm PUNPCKHQDQ : sse2_unpack<0x6D, "punpckhqdq", v2i64, X86Unpckh,
4587 bc_v2i64, memopv2i64>;
4589 } // ExeDomain = SSEPackedInt
4591 //===---------------------------------------------------------------------===//
4592 // SSE2 - Packed Integer Extract and Insert
4593 //===---------------------------------------------------------------------===//
4595 let ExeDomain = SSEPackedInt in {
4596 multiclass sse2_pinsrw<bit Is2Addr = 1> {
4597 def rri : Ii8<0xC4, MRMSrcReg,
4598 (outs VR128:$dst), (ins VR128:$src1,
4599 GR32orGR64:$src2, u8imm:$src3),
4601 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
4602 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4604 (X86pinsrw VR128:$src1, GR32orGR64:$src2, imm:$src3))],
4605 IIC_SSE_PINSRW>, Sched<[WriteShuffle]>;
4606 def rmi : Ii8<0xC4, MRMSrcMem,
4607 (outs VR128:$dst), (ins VR128:$src1,
4608 i16mem:$src2, u8imm:$src3),
4610 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
4611 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4613 (X86pinsrw VR128:$src1, (extloadi16 addr:$src2),
4614 imm:$src3))], IIC_SSE_PINSRW>,
4615 Sched<[WriteShuffleLd, ReadAfterLd]>;
4619 let Predicates = [HasAVX, NoBWI] in
4620 def VPEXTRWri : Ii8<0xC5, MRMSrcReg,
4621 (outs GR32orGR64:$dst), (ins VR128:$src1, u8imm:$src2),
4622 "vpextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4623 [(set GR32orGR64:$dst, (X86pextrw (v8i16 VR128:$src1),
4624 imm:$src2))]>, PD, VEX,
4625 Sched<[WriteShuffle]>;
4626 def PEXTRWri : PDIi8<0xC5, MRMSrcReg,
4627 (outs GR32orGR64:$dst), (ins VR128:$src1, u8imm:$src2),
4628 "pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4629 [(set GR32orGR64:$dst, (X86pextrw (v8i16 VR128:$src1),
4630 imm:$src2))], IIC_SSE_PEXTRW>,
4631 Sched<[WriteShuffleLd, ReadAfterLd]>;
4634 let Predicates = [HasAVX, NoBWI] in
4635 defm VPINSRW : sse2_pinsrw<0>, PD, VEX_4V;
4637 let Predicates = [UseSSE2], Constraints = "$src1 = $dst" in
4638 defm PINSRW : sse2_pinsrw, PD;
4640 } // ExeDomain = SSEPackedInt
4642 //===---------------------------------------------------------------------===//
4643 // SSE2 - Packed Mask Creation
4644 //===---------------------------------------------------------------------===//
4646 let ExeDomain = SSEPackedInt, SchedRW = [WriteVecLogic] in {
4648 def VPMOVMSKBrr : VPDI<0xD7, MRMSrcReg, (outs GR32orGR64:$dst),
4650 "pmovmskb\t{$src, $dst|$dst, $src}",
4651 [(set GR32orGR64:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))],
4652 IIC_SSE_MOVMSK>, VEX;
4654 let Predicates = [HasAVX2] in {
4655 def VPMOVMSKBYrr : VPDI<0xD7, MRMSrcReg, (outs GR32orGR64:$dst),
4657 "pmovmskb\t{$src, $dst|$dst, $src}",
4658 [(set GR32orGR64:$dst, (int_x86_avx2_pmovmskb VR256:$src))]>,
4662 def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32orGR64:$dst), (ins VR128:$src),
4663 "pmovmskb\t{$src, $dst|$dst, $src}",
4664 [(set GR32orGR64:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))],
4667 } // ExeDomain = SSEPackedInt
4669 //===---------------------------------------------------------------------===//
4670 // SSE2 - Conditional Store
4671 //===---------------------------------------------------------------------===//
4673 let ExeDomain = SSEPackedInt, SchedRW = [WriteStore] in {
4675 let Uses = [EDI], Predicates = [HasAVX,Not64BitMode] in
4676 def VMASKMOVDQU : VPDI<0xF7, MRMSrcReg, (outs),
4677 (ins VR128:$src, VR128:$mask),
4678 "maskmovdqu\t{$mask, $src|$src, $mask}",
4679 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)],
4680 IIC_SSE_MASKMOV>, VEX;
4681 let Uses = [RDI], Predicates = [HasAVX,In64BitMode] in
4682 def VMASKMOVDQU64 : VPDI<0xF7, MRMSrcReg, (outs),
4683 (ins VR128:$src, VR128:$mask),
4684 "maskmovdqu\t{$mask, $src|$src, $mask}",
4685 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)],
4686 IIC_SSE_MASKMOV>, VEX;
4688 let Uses = [EDI], Predicates = [UseSSE2,Not64BitMode] in
4689 def MASKMOVDQU : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
4690 "maskmovdqu\t{$mask, $src|$src, $mask}",
4691 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)],
4693 let Uses = [RDI], Predicates = [UseSSE2,In64BitMode] in
4694 def MASKMOVDQU64 : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
4695 "maskmovdqu\t{$mask, $src|$src, $mask}",
4696 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)],
4699 } // ExeDomain = SSEPackedInt
4701 //===---------------------------------------------------------------------===//
4702 // SSE2 - Move Doubleword
4703 //===---------------------------------------------------------------------===//
4705 //===---------------------------------------------------------------------===//
4706 // Move Int Doubleword to Packed Double Int
4708 def VMOVDI2PDIrr : VS2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
4709 "movd\t{$src, $dst|$dst, $src}",
4711 (v4i32 (scalar_to_vector GR32:$src)))], IIC_SSE_MOVDQ>,
4712 VEX, Sched<[WriteMove]>;
4713 def VMOVDI2PDIrm : VS2I<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
4714 "movd\t{$src, $dst|$dst, $src}",
4716 (v4i32 (scalar_to_vector (loadi32 addr:$src))))],
4718 VEX, Sched<[WriteLoad]>;
4719 def VMOV64toPQIrr : VRS2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
4720 "movq\t{$src, $dst|$dst, $src}",
4722 (v2i64 (scalar_to_vector GR64:$src)))],
4723 IIC_SSE_MOVDQ>, VEX, Sched<[WriteMove]>;
4724 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0, mayLoad = 1 in
4725 def VMOV64toPQIrm : VRS2I<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
4726 "movq\t{$src, $dst|$dst, $src}",
4727 [], IIC_SSE_MOVDQ>, VEX, Sched<[WriteLoad]>;
4728 let isCodeGenOnly = 1 in
4729 def VMOV64toSDrr : VRS2I<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
4730 "movq\t{$src, $dst|$dst, $src}",
4731 [(set FR64:$dst, (bitconvert GR64:$src))],
4732 IIC_SSE_MOVDQ>, VEX, Sched<[WriteMove]>;
4734 def MOVDI2PDIrr : S2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
4735 "movd\t{$src, $dst|$dst, $src}",
4737 (v4i32 (scalar_to_vector GR32:$src)))], IIC_SSE_MOVDQ>,
4739 def MOVDI2PDIrm : S2I<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
4740 "movd\t{$src, $dst|$dst, $src}",
4742 (v4i32 (scalar_to_vector (loadi32 addr:$src))))],
4743 IIC_SSE_MOVDQ>, Sched<[WriteLoad]>;
4744 def MOV64toPQIrr : RS2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
4745 "mov{d|q}\t{$src, $dst|$dst, $src}",
4747 (v2i64 (scalar_to_vector GR64:$src)))],
4748 IIC_SSE_MOVDQ>, Sched<[WriteMove]>;
4749 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0, mayLoad = 1 in
4750 def MOV64toPQIrm : RS2I<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
4751 "mov{d|q}\t{$src, $dst|$dst, $src}",
4752 [], IIC_SSE_MOVDQ>, Sched<[WriteLoad]>;
4753 let isCodeGenOnly = 1 in
4754 def MOV64toSDrr : RS2I<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
4755 "mov{d|q}\t{$src, $dst|$dst, $src}",
4756 [(set FR64:$dst, (bitconvert GR64:$src))],
4757 IIC_SSE_MOVDQ>, Sched<[WriteMove]>;
4759 //===---------------------------------------------------------------------===//
4760 // Move Int Doubleword to Single Scalar
4762 let isCodeGenOnly = 1 in {
4763 def VMOVDI2SSrr : VS2I<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
4764 "movd\t{$src, $dst|$dst, $src}",
4765 [(set FR32:$dst, (bitconvert GR32:$src))],
4766 IIC_SSE_MOVDQ>, VEX, Sched<[WriteMove]>;
4768 def VMOVDI2SSrm : VS2I<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
4769 "movd\t{$src, $dst|$dst, $src}",
4770 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))],
4772 VEX, Sched<[WriteLoad]>;
4773 def MOVDI2SSrr : S2I<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
4774 "movd\t{$src, $dst|$dst, $src}",
4775 [(set FR32:$dst, (bitconvert GR32:$src))],
4776 IIC_SSE_MOVDQ>, Sched<[WriteMove]>;
4778 def MOVDI2SSrm : S2I<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
4779 "movd\t{$src, $dst|$dst, $src}",
4780 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))],
4781 IIC_SSE_MOVDQ>, Sched<[WriteLoad]>;
4784 //===---------------------------------------------------------------------===//
4785 // Move Packed Doubleword Int to Packed Double Int
4787 def VMOVPDI2DIrr : VS2I<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
4788 "movd\t{$src, $dst|$dst, $src}",
4789 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
4790 (iPTR 0)))], IIC_SSE_MOVD_ToGP>, VEX,
4792 def VMOVPDI2DImr : VS2I<0x7E, MRMDestMem, (outs),
4793 (ins i32mem:$dst, VR128:$src),
4794 "movd\t{$src, $dst|$dst, $src}",
4795 [(store (i32 (vector_extract (v4i32 VR128:$src),
4796 (iPTR 0))), addr:$dst)], IIC_SSE_MOVDQ>,
4797 VEX, Sched<[WriteStore]>;
4798 def MOVPDI2DIrr : S2I<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
4799 "movd\t{$src, $dst|$dst, $src}",
4800 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
4801 (iPTR 0)))], IIC_SSE_MOVD_ToGP>,
4803 def MOVPDI2DImr : S2I<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src),
4804 "movd\t{$src, $dst|$dst, $src}",
4805 [(store (i32 (vector_extract (v4i32 VR128:$src),
4806 (iPTR 0))), addr:$dst)],
4807 IIC_SSE_MOVDQ>, Sched<[WriteStore]>;
4809 def : Pat<(v8i32 (X86Vinsert (v8i32 immAllZerosV), GR32:$src2, (iPTR 0))),
4810 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIrr GR32:$src2), sub_xmm)>;
4812 def : Pat<(v4i64 (X86Vinsert (bc_v4i64 (v8i32 immAllZerosV)), GR64:$src2, (iPTR 0))),
4813 (SUBREG_TO_REG (i32 0), (VMOV64toPQIrr GR64:$src2), sub_xmm)>;
4815 def : Pat<(v8i32 (X86Vinsert undef, GR32:$src2, (iPTR 0))),
4816 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIrr GR32:$src2), sub_xmm)>;
4818 def : Pat<(v4i64 (X86Vinsert undef, GR64:$src2, (iPTR 0))),
4819 (SUBREG_TO_REG (i32 0), (VMOV64toPQIrr GR64:$src2), sub_xmm)>;
4821 //===---------------------------------------------------------------------===//
4822 // Move Packed Doubleword Int first element to Doubleword Int
4824 let SchedRW = [WriteMove] in {
4825 def VMOVPQIto64rr : VRS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
4826 "movq\t{$src, $dst|$dst, $src}",
4827 [(set GR64:$dst, (vector_extract (v2i64 VR128:$src),
4832 def MOVPQIto64rr : RS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
4833 "mov{d|q}\t{$src, $dst|$dst, $src}",
4834 [(set GR64:$dst, (vector_extract (v2i64 VR128:$src),
4839 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0, mayStore = 1 in
4840 def VMOVPQIto64rm : VRS2I<0x7E, MRMDestMem, (outs i64mem:$dst),
4841 (ins VR128:$src), "movq\t{$src, $dst|$dst, $src}",
4842 [], IIC_SSE_MOVDQ>, VEX, Sched<[WriteStore]>;
4843 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0, mayStore = 1 in
4844 def MOVPQIto64rm : RS2I<0x7E, MRMDestMem, (outs i64mem:$dst), (ins VR128:$src),
4845 "mov{d|q}\t{$src, $dst|$dst, $src}",
4846 [], IIC_SSE_MOVDQ>, Sched<[WriteStore]>;
4848 //===---------------------------------------------------------------------===//
4849 // Bitcast FR64 <-> GR64
4851 let isCodeGenOnly = 1 in {
4852 let Predicates = [UseAVX] in
4853 def VMOV64toSDrm : VS2SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
4854 "movq\t{$src, $dst|$dst, $src}",
4855 [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>,
4856 VEX, Sched<[WriteLoad]>;
4857 def VMOVSDto64rr : VRS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
4858 "movq\t{$src, $dst|$dst, $src}",
4859 [(set GR64:$dst, (bitconvert FR64:$src))],
4860 IIC_SSE_MOVDQ>, VEX, Sched<[WriteMove]>;
4861 def VMOVSDto64mr : VRS2I<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
4862 "movq\t{$src, $dst|$dst, $src}",
4863 [(store (i64 (bitconvert FR64:$src)), addr:$dst)],
4864 IIC_SSE_MOVDQ>, VEX, Sched<[WriteStore]>;
4866 def MOV64toSDrm : S2SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
4867 "movq\t{$src, $dst|$dst, $src}",
4868 [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))],
4869 IIC_SSE_MOVDQ>, Sched<[WriteLoad]>;
4870 def MOVSDto64rr : RS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
4871 "mov{d|q}\t{$src, $dst|$dst, $src}",
4872 [(set GR64:$dst, (bitconvert FR64:$src))],
4873 IIC_SSE_MOVD_ToGP>, Sched<[WriteMove]>;
4874 def MOVSDto64mr : RS2I<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
4875 "movq\t{$src, $dst|$dst, $src}",
4876 [(store (i64 (bitconvert FR64:$src)), addr:$dst)],
4877 IIC_SSE_MOVDQ>, Sched<[WriteStore]>;
4880 //===---------------------------------------------------------------------===//
4881 // Move Scalar Single to Double Int
4883 let isCodeGenOnly = 1 in {
4884 def VMOVSS2DIrr : VS2I<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
4885 "movd\t{$src, $dst|$dst, $src}",
4886 [(set GR32:$dst, (bitconvert FR32:$src))],
4887 IIC_SSE_MOVD_ToGP>, VEX, Sched<[WriteMove]>;
4888 def VMOVSS2DImr : VS2I<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
4889 "movd\t{$src, $dst|$dst, $src}",
4890 [(store (i32 (bitconvert FR32:$src)), addr:$dst)],
4891 IIC_SSE_MOVDQ>, VEX, Sched<[WriteStore]>;
4892 def MOVSS2DIrr : S2I<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
4893 "movd\t{$src, $dst|$dst, $src}",
4894 [(set GR32:$dst, (bitconvert FR32:$src))],
4895 IIC_SSE_MOVD_ToGP>, Sched<[WriteMove]>;
4896 def MOVSS2DImr : S2I<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
4897 "movd\t{$src, $dst|$dst, $src}",
4898 [(store (i32 (bitconvert FR32:$src)), addr:$dst)],
4899 IIC_SSE_MOVDQ>, Sched<[WriteStore]>;
4902 //===---------------------------------------------------------------------===//
4903 // Patterns and instructions to describe movd/movq to XMM register zero-extends
4905 let isCodeGenOnly = 1, SchedRW = [WriteMove] in {
4906 let AddedComplexity = 15 in {
4907 def VMOVZQI2PQIrr : VS2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
4908 "movq\t{$src, $dst|$dst, $src}", // X86-64 only
4909 [(set VR128:$dst, (v2i64 (X86vzmovl
4910 (v2i64 (scalar_to_vector GR64:$src)))))],
4913 def MOVZQI2PQIrr : RS2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
4914 "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
4915 [(set VR128:$dst, (v2i64 (X86vzmovl
4916 (v2i64 (scalar_to_vector GR64:$src)))))],
4919 } // isCodeGenOnly, SchedRW
4921 let Predicates = [UseAVX] in {
4922 let AddedComplexity = 15 in
4923 def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector GR32:$src)))),
4924 (VMOVDI2PDIrr GR32:$src)>;
4926 // AVX 128-bit movd/movq instructions write zeros in the high 128-bit part.
4927 // These instructions also write zeros in the high part of a 256-bit register.
4928 let AddedComplexity = 20 in {
4929 def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector (loadi32 addr:$src))))),
4930 (VMOVDI2PDIrm addr:$src)>;
4931 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
4932 (VMOVDI2PDIrm addr:$src)>;
4933 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
4934 (VMOVDI2PDIrm addr:$src)>;
4935 def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
4936 (v4i32 (scalar_to_vector (loadi32 addr:$src))), (iPTR 0)))),
4937 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIrm addr:$src), sub_xmm)>;
4939 // Use regular 128-bit instructions to match 256-bit scalar_to_vec+zext.
4940 def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
4941 (v4i32 (scalar_to_vector GR32:$src)),(iPTR 0)))),
4942 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIrr GR32:$src), sub_xmm)>;
4943 def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
4944 (v2i64 (scalar_to_vector GR64:$src)),(iPTR 0)))),
4945 (SUBREG_TO_REG (i64 0), (VMOVZQI2PQIrr GR64:$src), sub_xmm)>;
4948 let Predicates = [UseSSE2] in {
4949 let AddedComplexity = 15 in
4950 def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector GR32:$src)))),
4951 (MOVDI2PDIrr GR32:$src)>;
4953 let AddedComplexity = 20 in {
4954 def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector (loadi32 addr:$src))))),
4955 (MOVDI2PDIrm addr:$src)>;
4956 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
4957 (MOVDI2PDIrm addr:$src)>;
4958 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
4959 (MOVDI2PDIrm addr:$src)>;
4963 // These are the correct encodings of the instructions so that we know how to
4964 // read correct assembly, even though we continue to emit the wrong ones for
4965 // compatibility with Darwin's buggy assembler.
4966 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
4967 (MOV64toPQIrr VR128:$dst, GR64:$src), 0>;
4968 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
4969 (MOVPQIto64rr GR64:$dst, VR128:$src), 0>;
4970 // Allow "vmovd" but print "vmovq" since we don't need compatibility for AVX.
4971 def : InstAlias<"vmovd\t{$src, $dst|$dst, $src}",
4972 (VMOV64toPQIrr VR128:$dst, GR64:$src), 0>;
4973 def : InstAlias<"vmovd\t{$src, $dst|$dst, $src}",
4974 (VMOVPQIto64rr GR64:$dst, VR128:$src), 0>;
4976 //===---------------------------------------------------------------------===//
4977 // SSE2 - Move Quadword
4978 //===---------------------------------------------------------------------===//
4980 //===---------------------------------------------------------------------===//
4981 // Move Quadword Int to Packed Quadword Int
4984 let ExeDomain = SSEPackedInt, SchedRW = [WriteLoad] in {
4985 def VMOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
4986 "vmovq\t{$src, $dst|$dst, $src}",
4988 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
4989 VEX, Requires<[UseAVX]>;
4990 def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
4991 "movq\t{$src, $dst|$dst, $src}",
4993 (v2i64 (scalar_to_vector (loadi64 addr:$src))))],
4995 Requires<[UseSSE2]>; // SSE2 instruction with XS Prefix
4996 } // ExeDomain, SchedRW
4998 //===---------------------------------------------------------------------===//
4999 // Move Packed Quadword Int to Quadword Int
5001 let ExeDomain = SSEPackedInt, SchedRW = [WriteStore] in {
5002 def VMOVPQI2QImr : VS2I<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
5003 "movq\t{$src, $dst|$dst, $src}",
5004 [(store (i64 (vector_extract (v2i64 VR128:$src),
5005 (iPTR 0))), addr:$dst)],
5006 IIC_SSE_MOVDQ>, VEX;
5007 def MOVPQI2QImr : S2I<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
5008 "movq\t{$src, $dst|$dst, $src}",
5009 [(store (i64 (vector_extract (v2i64 VR128:$src),
5010 (iPTR 0))), addr:$dst)],
5012 } // ExeDomain, SchedRW
5014 // For disassembler only
5015 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
5016 SchedRW = [WriteVecLogic] in {
5017 def VMOVPQI2QIrr : VS2I<0xD6, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
5018 "movq\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVQ_RR>, VEX;
5019 def MOVPQI2QIrr : S2I<0xD6, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
5020 "movq\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVQ_RR>;
5023 //===---------------------------------------------------------------------===//
5024 // Store / copy lower 64-bits of a XMM register.
5026 let Predicates = [HasAVX] in
5027 def : Pat<(int_x86_sse2_storel_dq addr:$dst, VR128:$src),
5028 (VMOVPQI2QImr addr:$dst, VR128:$src)>;
5029 let Predicates = [UseSSE2] in
5030 def : Pat<(int_x86_sse2_storel_dq addr:$dst, VR128:$src),
5031 (MOVPQI2QImr addr:$dst, VR128:$src)>;
5033 let ExeDomain = SSEPackedInt, isCodeGenOnly = 1, AddedComplexity = 20 in {
5034 def VMOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
5035 "vmovq\t{$src, $dst|$dst, $src}",
5037 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
5038 (loadi64 addr:$src))))))],
5040 XS, VEX, Requires<[UseAVX]>, Sched<[WriteLoad]>;
5042 def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
5043 "movq\t{$src, $dst|$dst, $src}",
5045 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
5046 (loadi64 addr:$src))))))],
5048 XS, Requires<[UseSSE2]>, Sched<[WriteLoad]>;
5049 } // ExeDomain, isCodeGenOnly, AddedComplexity
5051 let Predicates = [UseAVX], AddedComplexity = 20 in {
5052 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4f32 addr:$src)))),
5053 (VMOVZQI2PQIrm addr:$src)>;
5054 def : Pat<(v2i64 (X86vzload addr:$src)),
5055 (VMOVZQI2PQIrm addr:$src)>;
5056 def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
5057 (v2i64 (scalar_to_vector (loadi64 addr:$src))), (iPTR 0)))),
5058 (SUBREG_TO_REG (i64 0), (VMOVZQI2PQIrm addr:$src), sub_xmm)>;
5061 let Predicates = [UseSSE2], AddedComplexity = 20 in {
5062 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4f32 addr:$src)))),
5063 (MOVZQI2PQIrm addr:$src)>;
5064 def : Pat<(v2i64 (X86vzload addr:$src)), (MOVZQI2PQIrm addr:$src)>;
5067 let Predicates = [HasAVX] in {
5068 def : Pat<(v4i64 (alignedX86vzload addr:$src)),
5069 (SUBREG_TO_REG (i32 0), (VMOVAPSrm addr:$src), sub_xmm)>;
5070 def : Pat<(v4i64 (X86vzload addr:$src)),
5071 (SUBREG_TO_REG (i32 0), (VMOVUPSrm addr:$src), sub_xmm)>;
5074 //===---------------------------------------------------------------------===//
5075 // Moving from XMM to XMM and clear upper 64 bits. Note, there is a bug in
5076 // IA32 document. movq xmm1, xmm2 does clear the high bits.
5078 let ExeDomain = SSEPackedInt, SchedRW = [WriteVecLogic] in {
5079 let AddedComplexity = 15 in
5080 def VMOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
5081 "vmovq\t{$src, $dst|$dst, $src}",
5082 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))],
5084 XS, VEX, Requires<[UseAVX]>;
5085 let AddedComplexity = 15 in
5086 def MOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
5087 "movq\t{$src, $dst|$dst, $src}",
5088 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))],
5090 XS, Requires<[UseSSE2]>;
5091 } // ExeDomain, SchedRW
5093 let ExeDomain = SSEPackedInt, isCodeGenOnly = 1, SchedRW = [WriteVecLogicLd] in {
5094 let AddedComplexity = 20 in
5095 def VMOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
5096 "vmovq\t{$src, $dst|$dst, $src}",
5097 [(set VR128:$dst, (v2i64 (X86vzmovl
5098 (loadv2i64 addr:$src))))],
5100 XS, VEX, Requires<[UseAVX]>;
5101 let AddedComplexity = 20 in {
5102 def MOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
5103 "movq\t{$src, $dst|$dst, $src}",
5104 [(set VR128:$dst, (v2i64 (X86vzmovl
5105 (loadv2i64 addr:$src))))],
5107 XS, Requires<[UseSSE2]>;
5109 } // ExeDomain, isCodeGenOnly, SchedRW
5111 let AddedComplexity = 20 in {
5112 let Predicates = [UseAVX] in {
5113 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
5114 (VMOVZPQILo2PQIrr VR128:$src)>;
5116 let Predicates = [UseSSE2] in {
5117 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
5118 (MOVZPQILo2PQIrr VR128:$src)>;
5122 //===---------------------------------------------------------------------===//
5123 // SSE3 - Replicate Single FP - MOVSHDUP and MOVSLDUP
5124 //===---------------------------------------------------------------------===//
5125 multiclass sse3_replicate_sfp<bits<8> op, SDNode OpNode, string OpcodeStr,
5126 ValueType vt, RegisterClass RC, PatFrag mem_frag,
5127 X86MemOperand x86memop> {
5128 def rr : S3SI<op, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
5129 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5130 [(set RC:$dst, (vt (OpNode RC:$src)))],
5131 IIC_SSE_MOV_LH>, Sched<[WriteFShuffle]>;
5132 def rm : S3SI<op, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
5133 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5134 [(set RC:$dst, (OpNode (mem_frag addr:$src)))],
5135 IIC_SSE_MOV_LH>, Sched<[WriteLoad]>;
5138 let Predicates = [HasAVX, NoVLX] in {
5139 defm VMOVSHDUP : sse3_replicate_sfp<0x16, X86Movshdup, "vmovshdup",
5140 v4f32, VR128, loadv4f32, f128mem>, VEX;
5141 defm VMOVSLDUP : sse3_replicate_sfp<0x12, X86Movsldup, "vmovsldup",
5142 v4f32, VR128, loadv4f32, f128mem>, VEX;
5143 defm VMOVSHDUPY : sse3_replicate_sfp<0x16, X86Movshdup, "vmovshdup",
5144 v8f32, VR256, loadv8f32, f256mem>, VEX, VEX_L;
5145 defm VMOVSLDUPY : sse3_replicate_sfp<0x12, X86Movsldup, "vmovsldup",
5146 v8f32, VR256, loadv8f32, f256mem>, VEX, VEX_L;
5148 defm MOVSHDUP : sse3_replicate_sfp<0x16, X86Movshdup, "movshdup", v4f32, VR128,
5149 memopv4f32, f128mem>;
5150 defm MOVSLDUP : sse3_replicate_sfp<0x12, X86Movsldup, "movsldup", v4f32, VR128,
5151 memopv4f32, f128mem>;
5153 let Predicates = [HasAVX, NoVLX] in {
5154 def : Pat<(v4i32 (X86Movshdup VR128:$src)),
5155 (VMOVSHDUPrr VR128:$src)>;
5156 def : Pat<(v4i32 (X86Movshdup (bc_v4i32 (loadv2i64 addr:$src)))),
5157 (VMOVSHDUPrm addr:$src)>;
5158 def : Pat<(v4i32 (X86Movsldup VR128:$src)),
5159 (VMOVSLDUPrr VR128:$src)>;
5160 def : Pat<(v4i32 (X86Movsldup (bc_v4i32 (loadv2i64 addr:$src)))),
5161 (VMOVSLDUPrm addr:$src)>;
5162 def : Pat<(v8i32 (X86Movshdup VR256:$src)),
5163 (VMOVSHDUPYrr VR256:$src)>;
5164 def : Pat<(v8i32 (X86Movshdup (bc_v8i32 (loadv4i64 addr:$src)))),
5165 (VMOVSHDUPYrm addr:$src)>;
5166 def : Pat<(v8i32 (X86Movsldup VR256:$src)),
5167 (VMOVSLDUPYrr VR256:$src)>;
5168 def : Pat<(v8i32 (X86Movsldup (bc_v8i32 (loadv4i64 addr:$src)))),
5169 (VMOVSLDUPYrm addr:$src)>;
5172 let Predicates = [UseSSE3] in {
5173 def : Pat<(v4i32 (X86Movshdup VR128:$src)),
5174 (MOVSHDUPrr VR128:$src)>;
5175 def : Pat<(v4i32 (X86Movshdup (bc_v4i32 (memopv2i64 addr:$src)))),
5176 (MOVSHDUPrm addr:$src)>;
5177 def : Pat<(v4i32 (X86Movsldup VR128:$src)),
5178 (MOVSLDUPrr VR128:$src)>;
5179 def : Pat<(v4i32 (X86Movsldup (bc_v4i32 (memopv2i64 addr:$src)))),
5180 (MOVSLDUPrm addr:$src)>;
5183 //===---------------------------------------------------------------------===//
5184 // SSE3 - Replicate Double FP - MOVDDUP
5185 //===---------------------------------------------------------------------===//
5187 multiclass sse3_replicate_dfp<string OpcodeStr> {
5188 def rr : S3DI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
5189 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5190 [(set VR128:$dst, (v2f64 (X86Movddup VR128:$src)))],
5191 IIC_SSE_MOV_LH>, Sched<[WriteFShuffle]>;
5192 def rm : S3DI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
5193 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5196 (scalar_to_vector (loadf64 addr:$src)))))],
5197 IIC_SSE_MOV_LH>, Sched<[WriteLoad]>;
5200 // FIXME: Merge with above classe when there're patterns for the ymm version
5201 multiclass sse3_replicate_dfp_y<string OpcodeStr> {
5202 def rr : S3DI<0x12, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
5203 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5204 [(set VR256:$dst, (v4f64 (X86Movddup VR256:$src)))]>,
5205 Sched<[WriteFShuffle]>;
5206 def rm : S3DI<0x12, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
5207 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5209 (v4f64 (X86Movddup (loadv4f64 addr:$src))))]>,
5213 let Predicates = [HasAVX, NoVLX] in {
5214 defm VMOVDDUP : sse3_replicate_dfp<"vmovddup">, VEX;
5215 defm VMOVDDUPY : sse3_replicate_dfp_y<"vmovddup">, VEX, VEX_L;
5218 defm MOVDDUP : sse3_replicate_dfp<"movddup">;
5221 let Predicates = [HasAVX, NoVLX] in {
5222 def : Pat<(X86Movddup (loadv2f64 addr:$src)),
5223 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5226 def : Pat<(X86Movddup (loadv4i64 addr:$src)),
5227 (VMOVDDUPYrm addr:$src)>;
5228 def : Pat<(X86Movddup (v4i64 VR256:$src)),
5229 (VMOVDDUPYrr VR256:$src)>;
5232 let Predicates = [HasAVX] in {
5233 def : Pat<(X86Movddup (bc_v2f64 (loadv4f32 addr:$src))),
5234 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5235 def : Pat<(X86Movddup (bc_v2f64 (loadv2i64 addr:$src))),
5236 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5237 def : Pat<(X86Movddup (bc_v2f64
5238 (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
5239 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5242 let Predicates = [UseAVX, OptForSize] in {
5243 def : Pat<(v2f64 (X86VBroadcast (loadf64 addr:$src))),
5244 (VMOVDDUPrm addr:$src)>;
5245 def : Pat<(v2i64 (X86VBroadcast (loadi64 addr:$src))),
5246 (VMOVDDUPrm addr:$src)>;
5249 let Predicates = [UseSSE3] in {
5250 def : Pat<(X86Movddup (memopv2f64 addr:$src)),
5251 (MOVDDUPrm addr:$src)>;
5252 def : Pat<(X86Movddup (bc_v2f64 (memopv4f32 addr:$src))),
5253 (MOVDDUPrm addr:$src)>;
5254 def : Pat<(X86Movddup (bc_v2f64 (memopv2i64 addr:$src))),
5255 (MOVDDUPrm addr:$src)>;
5256 def : Pat<(X86Movddup (bc_v2f64
5257 (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
5258 (MOVDDUPrm addr:$src)>;
5261 //===---------------------------------------------------------------------===//
5262 // SSE3 - Move Unaligned Integer
5263 //===---------------------------------------------------------------------===//
5265 let SchedRW = [WriteLoad] in {
5266 let Predicates = [HasAVX] in {
5267 def VLDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
5268 "vlddqu\t{$src, $dst|$dst, $src}",
5269 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>, VEX;
5270 def VLDDQUYrm : S3DI<0xF0, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
5271 "vlddqu\t{$src, $dst|$dst, $src}",
5272 [(set VR256:$dst, (int_x86_avx_ldu_dq_256 addr:$src))]>,
5275 def LDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
5276 "lddqu\t{$src, $dst|$dst, $src}",
5277 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))],
5281 //===---------------------------------------------------------------------===//
5282 // SSE3 - Arithmetic
5283 //===---------------------------------------------------------------------===//
5285 multiclass sse3_addsub<Intrinsic Int, string OpcodeStr, RegisterClass RC,
5286 X86MemOperand x86memop, OpndItins itins,
5287 PatFrag ld_frag, bit Is2Addr = 1> {
5288 def rr : I<0xD0, MRMSrcReg,
5289 (outs RC:$dst), (ins RC:$src1, RC:$src2),
5291 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5292 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5293 [(set RC:$dst, (Int RC:$src1, RC:$src2))], itins.rr>,
5294 Sched<[itins.Sched]>;
5295 def rm : I<0xD0, MRMSrcMem,
5296 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
5298 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5299 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5300 [(set RC:$dst, (Int RC:$src1, (ld_frag addr:$src2)))], itins.rr>,
5301 Sched<[itins.Sched.Folded, ReadAfterLd]>;
5304 let Predicates = [HasAVX] in {
5305 let ExeDomain = SSEPackedSingle in {
5306 defm VADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "vaddsubps", VR128,
5307 f128mem, SSE_ALU_F32P, loadv4f32, 0>, XD, VEX_4V;
5308 defm VADDSUBPSY : sse3_addsub<int_x86_avx_addsub_ps_256, "vaddsubps", VR256,
5309 f256mem, SSE_ALU_F32P, loadv8f32, 0>, XD, VEX_4V, VEX_L;
5311 let ExeDomain = SSEPackedDouble in {
5312 defm VADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "vaddsubpd", VR128,
5313 f128mem, SSE_ALU_F64P, loadv2f64, 0>, PD, VEX_4V;
5314 defm VADDSUBPDY : sse3_addsub<int_x86_avx_addsub_pd_256, "vaddsubpd", VR256,
5315 f256mem, SSE_ALU_F64P, loadv4f64, 0>, PD, VEX_4V, VEX_L;
5318 let Constraints = "$src1 = $dst", Predicates = [UseSSE3] in {
5319 let ExeDomain = SSEPackedSingle in
5320 defm ADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "addsubps", VR128,
5321 f128mem, SSE_ALU_F32P, memopv4f32>, XD;
5322 let ExeDomain = SSEPackedDouble in
5323 defm ADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "addsubpd", VR128,
5324 f128mem, SSE_ALU_F64P, memopv2f64>, PD;
5327 // Patterns used to select 'addsub' instructions.
5328 let Predicates = [HasAVX] in {
5329 def : Pat<(v4f32 (X86Addsub (v4f32 VR128:$lhs), (v4f32 VR128:$rhs))),
5330 (VADDSUBPSrr VR128:$lhs, VR128:$rhs)>;
5331 def : Pat<(v4f32 (X86Addsub (v4f32 VR128:$lhs), (loadv4f32 addr:$rhs))),
5332 (VADDSUBPSrm VR128:$lhs, f128mem:$rhs)>;
5333 def : Pat<(v2f64 (X86Addsub (v2f64 VR128:$lhs), (v2f64 VR128:$rhs))),
5334 (VADDSUBPDrr VR128:$lhs, VR128:$rhs)>;
5335 def : Pat<(v2f64 (X86Addsub (v2f64 VR128:$lhs), (loadv2f64 addr:$rhs))),
5336 (VADDSUBPDrm VR128:$lhs, f128mem:$rhs)>;
5338 def : Pat<(v8f32 (X86Addsub (v8f32 VR256:$lhs), (v8f32 VR256:$rhs))),
5339 (VADDSUBPSYrr VR256:$lhs, VR256:$rhs)>;
5340 def : Pat<(v8f32 (X86Addsub (v8f32 VR256:$lhs), (loadv8f32 addr:$rhs))),
5341 (VADDSUBPSYrm VR256:$lhs, f256mem:$rhs)>;
5342 def : Pat<(v4f64 (X86Addsub (v4f64 VR256:$lhs), (v4f64 VR256:$rhs))),
5343 (VADDSUBPDYrr VR256:$lhs, VR256:$rhs)>;
5344 def : Pat<(v4f64 (X86Addsub (v4f64 VR256:$lhs), (loadv4f64 addr:$rhs))),
5345 (VADDSUBPDYrm VR256:$lhs, f256mem:$rhs)>;
5348 let Predicates = [UseSSE3] in {
5349 def : Pat<(v4f32 (X86Addsub (v4f32 VR128:$lhs), (v4f32 VR128:$rhs))),
5350 (ADDSUBPSrr VR128:$lhs, VR128:$rhs)>;
5351 def : Pat<(v4f32 (X86Addsub (v4f32 VR128:$lhs), (memopv4f32 addr:$rhs))),
5352 (ADDSUBPSrm VR128:$lhs, f128mem:$rhs)>;
5353 def : Pat<(v2f64 (X86Addsub (v2f64 VR128:$lhs), (v2f64 VR128:$rhs))),
5354 (ADDSUBPDrr VR128:$lhs, VR128:$rhs)>;
5355 def : Pat<(v2f64 (X86Addsub (v2f64 VR128:$lhs), (memopv2f64 addr:$rhs))),
5356 (ADDSUBPDrm VR128:$lhs, f128mem:$rhs)>;
5359 //===---------------------------------------------------------------------===//
5360 // SSE3 Instructions
5361 //===---------------------------------------------------------------------===//
5364 multiclass S3D_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
5365 X86MemOperand x86memop, SDNode OpNode, PatFrag ld_frag,
5367 def rr : S3DI<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
5369 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5370 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5371 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], IIC_SSE_HADDSUB_RR>,
5374 def rm : S3DI<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
5376 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5377 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5378 [(set RC:$dst, (vt (OpNode RC:$src1, (ld_frag addr:$src2))))],
5379 IIC_SSE_HADDSUB_RM>, Sched<[WriteFAddLd, ReadAfterLd]>;
5381 multiclass S3_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
5382 X86MemOperand x86memop, SDNode OpNode, PatFrag ld_frag,
5384 def rr : S3I<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
5386 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5387 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5388 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], IIC_SSE_HADDSUB_RR>,
5391 def rm : S3I<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
5393 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5394 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5395 [(set RC:$dst, (vt (OpNode RC:$src1, (ld_frag addr:$src2))))],
5396 IIC_SSE_HADDSUB_RM>, Sched<[WriteFAddLd, ReadAfterLd]>;
5399 let Predicates = [HasAVX] in {
5400 let ExeDomain = SSEPackedSingle in {
5401 defm VHADDPS : S3D_Int<0x7C, "vhaddps", v4f32, VR128, f128mem,
5402 X86fhadd, loadv4f32, 0>, VEX_4V;
5403 defm VHSUBPS : S3D_Int<0x7D, "vhsubps", v4f32, VR128, f128mem,
5404 X86fhsub, loadv4f32, 0>, VEX_4V;
5405 defm VHADDPSY : S3D_Int<0x7C, "vhaddps", v8f32, VR256, f256mem,
5406 X86fhadd, loadv8f32, 0>, VEX_4V, VEX_L;
5407 defm VHSUBPSY : S3D_Int<0x7D, "vhsubps", v8f32, VR256, f256mem,
5408 X86fhsub, loadv8f32, 0>, VEX_4V, VEX_L;
5410 let ExeDomain = SSEPackedDouble in {
5411 defm VHADDPD : S3_Int <0x7C, "vhaddpd", v2f64, VR128, f128mem,
5412 X86fhadd, loadv2f64, 0>, VEX_4V;
5413 defm VHSUBPD : S3_Int <0x7D, "vhsubpd", v2f64, VR128, f128mem,
5414 X86fhsub, loadv2f64, 0>, VEX_4V;
5415 defm VHADDPDY : S3_Int <0x7C, "vhaddpd", v4f64, VR256, f256mem,
5416 X86fhadd, loadv4f64, 0>, VEX_4V, VEX_L;
5417 defm VHSUBPDY : S3_Int <0x7D, "vhsubpd", v4f64, VR256, f256mem,
5418 X86fhsub, loadv4f64, 0>, VEX_4V, VEX_L;
5422 let Constraints = "$src1 = $dst" in {
5423 let ExeDomain = SSEPackedSingle in {
5424 defm HADDPS : S3D_Int<0x7C, "haddps", v4f32, VR128, f128mem, X86fhadd,
5426 defm HSUBPS : S3D_Int<0x7D, "hsubps", v4f32, VR128, f128mem, X86fhsub,
5429 let ExeDomain = SSEPackedDouble in {
5430 defm HADDPD : S3_Int<0x7C, "haddpd", v2f64, VR128, f128mem, X86fhadd,
5432 defm HSUBPD : S3_Int<0x7D, "hsubpd", v2f64, VR128, f128mem, X86fhsub,
5437 //===---------------------------------------------------------------------===//
5438 // SSSE3 - Packed Absolute Instructions
5439 //===---------------------------------------------------------------------===//
5442 /// SS3I_unop_rm_int - Simple SSSE3 unary op whose type can be v*{i8,i16,i32}.
5443 multiclass SS3I_unop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId128,
5445 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
5447 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5448 [(set VR128:$dst, (IntId128 VR128:$src))], IIC_SSE_PABS_RR>,
5449 Sched<[WriteVecALU]>;
5451 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
5453 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5456 (bitconvert (ld_frag addr:$src))))], IIC_SSE_PABS_RM>,
5457 Sched<[WriteVecALULd]>;
5460 /// SS3I_unop_rm_int_y - Simple SSSE3 unary op whose type can be v*{i8,i16,i32}.
5461 multiclass SS3I_unop_rm_int_y<bits<8> opc, string OpcodeStr,
5462 Intrinsic IntId256> {
5463 def rr256 : SS38I<opc, MRMSrcReg, (outs VR256:$dst),
5465 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5466 [(set VR256:$dst, (IntId256 VR256:$src))]>,
5467 Sched<[WriteVecALU]>;
5469 def rm256 : SS38I<opc, MRMSrcMem, (outs VR256:$dst),
5471 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5474 (bitconvert (loadv4i64 addr:$src))))]>,
5475 Sched<[WriteVecALULd]>;
5478 // Helper fragments to match sext vXi1 to vXiY.
5479 def v16i1sextv16i8 : PatLeaf<(v16i8 (X86pcmpgt (bc_v16i8 (v4i32 immAllZerosV)),
5481 def v8i1sextv8i16 : PatLeaf<(v8i16 (X86vsrai VR128:$src, (i8 15)))>;
5482 def v4i1sextv4i32 : PatLeaf<(v4i32 (X86vsrai VR128:$src, (i8 31)))>;
5483 def v32i1sextv32i8 : PatLeaf<(v32i8 (X86pcmpgt (bc_v32i8 (v8i32 immAllZerosV)),
5485 def v16i1sextv16i16: PatLeaf<(v16i16 (X86vsrai VR256:$src, (i8 15)))>;
5486 def v8i1sextv8i32 : PatLeaf<(v8i32 (X86vsrai VR256:$src, (i8 31)))>;
5488 let Predicates = [HasAVX] in {
5489 defm VPABSB : SS3I_unop_rm_int<0x1C, "vpabsb", int_x86_ssse3_pabs_b_128,
5491 defm VPABSW : SS3I_unop_rm_int<0x1D, "vpabsw", int_x86_ssse3_pabs_w_128,
5493 defm VPABSD : SS3I_unop_rm_int<0x1E, "vpabsd", int_x86_ssse3_pabs_d_128,
5497 (bc_v2i64 (v16i1sextv16i8)),
5498 (bc_v2i64 (add (v16i8 VR128:$src), (v16i1sextv16i8)))),
5499 (VPABSBrr128 VR128:$src)>;
5501 (bc_v2i64 (v8i1sextv8i16)),
5502 (bc_v2i64 (add (v8i16 VR128:$src), (v8i1sextv8i16)))),
5503 (VPABSWrr128 VR128:$src)>;
5505 (bc_v2i64 (v4i1sextv4i32)),
5506 (bc_v2i64 (add (v4i32 VR128:$src), (v4i1sextv4i32)))),
5507 (VPABSDrr128 VR128:$src)>;
5510 let Predicates = [HasAVX2] in {
5511 defm VPABSB : SS3I_unop_rm_int_y<0x1C, "vpabsb",
5512 int_x86_avx2_pabs_b>, VEX, VEX_L;
5513 defm VPABSW : SS3I_unop_rm_int_y<0x1D, "vpabsw",
5514 int_x86_avx2_pabs_w>, VEX, VEX_L;
5515 defm VPABSD : SS3I_unop_rm_int_y<0x1E, "vpabsd",
5516 int_x86_avx2_pabs_d>, VEX, VEX_L;
5519 (bc_v4i64 (v32i1sextv32i8)),
5520 (bc_v4i64 (add (v32i8 VR256:$src), (v32i1sextv32i8)))),
5521 (VPABSBrr256 VR256:$src)>;
5523 (bc_v4i64 (v16i1sextv16i16)),
5524 (bc_v4i64 (add (v16i16 VR256:$src), (v16i1sextv16i16)))),
5525 (VPABSWrr256 VR256:$src)>;
5527 (bc_v4i64 (v8i1sextv8i32)),
5528 (bc_v4i64 (add (v8i32 VR256:$src), (v8i1sextv8i32)))),
5529 (VPABSDrr256 VR256:$src)>;
5532 defm PABSB : SS3I_unop_rm_int<0x1C, "pabsb", int_x86_ssse3_pabs_b_128,
5534 defm PABSW : SS3I_unop_rm_int<0x1D, "pabsw", int_x86_ssse3_pabs_w_128,
5536 defm PABSD : SS3I_unop_rm_int<0x1E, "pabsd", int_x86_ssse3_pabs_d_128,
5539 let Predicates = [HasSSSE3] in {
5541 (bc_v2i64 (v16i1sextv16i8)),
5542 (bc_v2i64 (add (v16i8 VR128:$src), (v16i1sextv16i8)))),
5543 (PABSBrr128 VR128:$src)>;
5545 (bc_v2i64 (v8i1sextv8i16)),
5546 (bc_v2i64 (add (v8i16 VR128:$src), (v8i1sextv8i16)))),
5547 (PABSWrr128 VR128:$src)>;
5549 (bc_v2i64 (v4i1sextv4i32)),
5550 (bc_v2i64 (add (v4i32 VR128:$src), (v4i1sextv4i32)))),
5551 (PABSDrr128 VR128:$src)>;
5554 //===---------------------------------------------------------------------===//
5555 // SSSE3 - Packed Binary Operator Instructions
5556 //===---------------------------------------------------------------------===//
5558 let Sched = WriteVecALU in {
5559 def SSE_PHADDSUBD : OpndItins<
5560 IIC_SSE_PHADDSUBD_RR, IIC_SSE_PHADDSUBD_RM
5562 def SSE_PHADDSUBSW : OpndItins<
5563 IIC_SSE_PHADDSUBSW_RR, IIC_SSE_PHADDSUBSW_RM
5565 def SSE_PHADDSUBW : OpndItins<
5566 IIC_SSE_PHADDSUBW_RR, IIC_SSE_PHADDSUBW_RM
5569 let Sched = WriteShuffle in
5570 def SSE_PSHUFB : OpndItins<
5571 IIC_SSE_PSHUFB_RR, IIC_SSE_PSHUFB_RM
5573 let Sched = WriteVecALU in
5574 def SSE_PSIGN : OpndItins<
5575 IIC_SSE_PSIGN_RR, IIC_SSE_PSIGN_RM
5577 let Sched = WriteVecIMul in
5578 def SSE_PMULHRSW : OpndItins<
5579 IIC_SSE_PMULHRSW, IIC_SSE_PMULHRSW
5582 /// SS3I_binop_rm - Simple SSSE3 bin op
5583 multiclass SS3I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
5584 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
5585 X86MemOperand x86memop, OpndItins itins,
5587 let isCommutable = 1 in
5588 def rr : SS38I<opc, MRMSrcReg, (outs RC:$dst),
5589 (ins RC:$src1, RC:$src2),
5591 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5592 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5593 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2)))], itins.rr>,
5594 Sched<[itins.Sched]>;
5595 def rm : SS38I<opc, MRMSrcMem, (outs RC:$dst),
5596 (ins RC:$src1, x86memop:$src2),
5598 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5599 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5601 (OpVT (OpNode RC:$src1,
5602 (bitconvert (memop_frag addr:$src2)))))], itins.rm>,
5603 Sched<[itins.Sched.Folded, ReadAfterLd]>;
5606 /// SS3I_binop_rm_int - Simple SSSE3 bin op whose type can be v*{i8,i16,i32}.
5607 multiclass SS3I_binop_rm_int<bits<8> opc, string OpcodeStr,
5608 Intrinsic IntId128, OpndItins itins,
5609 PatFrag ld_frag, bit Is2Addr = 1> {
5610 let isCommutable = 1 in
5611 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
5612 (ins VR128:$src1, VR128:$src2),
5614 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5615 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5616 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
5617 Sched<[itins.Sched]>;
5618 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
5619 (ins VR128:$src1, i128mem:$src2),
5621 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5622 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5624 (IntId128 VR128:$src1,
5625 (bitconvert (ld_frag addr:$src2))))]>,
5626 Sched<[itins.Sched.Folded, ReadAfterLd]>;
5629 multiclass SS3I_binop_rm_int_y<bits<8> opc, string OpcodeStr,
5631 X86FoldableSchedWrite Sched> {
5632 let isCommutable = 1 in
5633 def rr256 : SS38I<opc, MRMSrcReg, (outs VR256:$dst),
5634 (ins VR256:$src1, VR256:$src2),
5635 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5636 [(set VR256:$dst, (IntId256 VR256:$src1, VR256:$src2))]>,
5638 def rm256 : SS38I<opc, MRMSrcMem, (outs VR256:$dst),
5639 (ins VR256:$src1, i256mem:$src2),
5640 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5642 (IntId256 VR256:$src1, (bitconvert (loadv4i64 addr:$src2))))]>,
5643 Sched<[Sched.Folded, ReadAfterLd]>;
5646 let ImmT = NoImm, Predicates = [HasAVX] in {
5647 let isCommutable = 0 in {
5648 defm VPHADDW : SS3I_binop_rm<0x01, "vphaddw", X86hadd, v8i16, VR128,
5650 SSE_PHADDSUBW, 0>, VEX_4V;
5651 defm VPHADDD : SS3I_binop_rm<0x02, "vphaddd", X86hadd, v4i32, VR128,
5653 SSE_PHADDSUBD, 0>, VEX_4V;
5654 defm VPHSUBW : SS3I_binop_rm<0x05, "vphsubw", X86hsub, v8i16, VR128,
5656 SSE_PHADDSUBW, 0>, VEX_4V;
5657 defm VPHSUBD : SS3I_binop_rm<0x06, "vphsubd", X86hsub, v4i32, VR128,
5659 SSE_PHADDSUBD, 0>, VEX_4V;
5660 defm VPSIGNB : SS3I_binop_rm<0x08, "vpsignb", X86psign, v16i8, VR128,
5662 SSE_PSIGN, 0>, VEX_4V;
5663 defm VPSIGNW : SS3I_binop_rm<0x09, "vpsignw", X86psign, v8i16, VR128,
5665 SSE_PSIGN, 0>, VEX_4V;
5666 defm VPSIGND : SS3I_binop_rm<0x0A, "vpsignd", X86psign, v4i32, VR128,
5668 SSE_PSIGN, 0>, VEX_4V;
5669 defm VPSHUFB : SS3I_binop_rm<0x00, "vpshufb", X86pshufb, v16i8, VR128,
5671 SSE_PSHUFB, 0>, VEX_4V;
5672 defm VPHADDSW : SS3I_binop_rm_int<0x03, "vphaddsw",
5673 int_x86_ssse3_phadd_sw_128,
5674 SSE_PHADDSUBSW, loadv2i64, 0>, VEX_4V;
5675 defm VPHSUBSW : SS3I_binop_rm_int<0x07, "vphsubsw",
5676 int_x86_ssse3_phsub_sw_128,
5677 SSE_PHADDSUBSW, loadv2i64, 0>, VEX_4V;
5678 defm VPMADDUBSW : SS3I_binop_rm_int<0x04, "vpmaddubsw",
5679 int_x86_ssse3_pmadd_ub_sw_128,
5680 SSE_PMADD, loadv2i64, 0>, VEX_4V;
5682 defm VPMULHRSW : SS3I_binop_rm_int<0x0B, "vpmulhrsw",
5683 int_x86_ssse3_pmul_hr_sw_128,
5684 SSE_PMULHRSW, loadv2i64, 0>, VEX_4V;
5687 let ImmT = NoImm, Predicates = [HasAVX2] in {
5688 let isCommutable = 0 in {
5689 defm VPHADDWY : SS3I_binop_rm<0x01, "vphaddw", X86hadd, v16i16, VR256,
5691 SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
5692 defm VPHADDDY : SS3I_binop_rm<0x02, "vphaddd", X86hadd, v8i32, VR256,
5694 SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
5695 defm VPHSUBWY : SS3I_binop_rm<0x05, "vphsubw", X86hsub, v16i16, VR256,
5697 SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
5698 defm VPHSUBDY : SS3I_binop_rm<0x06, "vphsubd", X86hsub, v8i32, VR256,
5700 SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
5701 defm VPSIGNBY : SS3I_binop_rm<0x08, "vpsignb", X86psign, v32i8, VR256,
5703 SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
5704 defm VPSIGNWY : SS3I_binop_rm<0x09, "vpsignw", X86psign, v16i16, VR256,
5706 SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
5707 defm VPSIGNDY : SS3I_binop_rm<0x0A, "vpsignd", X86psign, v8i32, VR256,
5709 SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
5710 defm VPSHUFBY : SS3I_binop_rm<0x00, "vpshufb", X86pshufb, v32i8, VR256,
5712 SSE_PSHUFB, 0>, VEX_4V, VEX_L;
5713 defm VPHADDSW : SS3I_binop_rm_int_y<0x03, "vphaddsw",
5714 int_x86_avx2_phadd_sw,
5715 WriteVecALU>, VEX_4V, VEX_L;
5716 defm VPHSUBSW : SS3I_binop_rm_int_y<0x07, "vphsubsw",
5717 int_x86_avx2_phsub_sw,
5718 WriteVecALU>, VEX_4V, VEX_L;
5719 defm VPMADDUBSW : SS3I_binop_rm_int_y<0x04, "vpmaddubsw",
5720 int_x86_avx2_pmadd_ub_sw,
5721 WriteVecIMul>, VEX_4V, VEX_L;
5723 defm VPMULHRSW : SS3I_binop_rm_int_y<0x0B, "vpmulhrsw",
5724 int_x86_avx2_pmul_hr_sw,
5725 WriteVecIMul>, VEX_4V, VEX_L;
5728 // None of these have i8 immediate fields.
5729 let ImmT = NoImm, Constraints = "$src1 = $dst" in {
5730 let isCommutable = 0 in {
5731 defm PHADDW : SS3I_binop_rm<0x01, "phaddw", X86hadd, v8i16, VR128,
5732 memopv2i64, i128mem, SSE_PHADDSUBW>;
5733 defm PHADDD : SS3I_binop_rm<0x02, "phaddd", X86hadd, v4i32, VR128,
5734 memopv2i64, i128mem, SSE_PHADDSUBD>;
5735 defm PHSUBW : SS3I_binop_rm<0x05, "phsubw", X86hsub, v8i16, VR128,
5736 memopv2i64, i128mem, SSE_PHADDSUBW>;
5737 defm PHSUBD : SS3I_binop_rm<0x06, "phsubd", X86hsub, v4i32, VR128,
5738 memopv2i64, i128mem, SSE_PHADDSUBD>;
5739 defm PSIGNB : SS3I_binop_rm<0x08, "psignb", X86psign, v16i8, VR128,
5740 memopv2i64, i128mem, SSE_PSIGN>;
5741 defm PSIGNW : SS3I_binop_rm<0x09, "psignw", X86psign, v8i16, VR128,
5742 memopv2i64, i128mem, SSE_PSIGN>;
5743 defm PSIGND : SS3I_binop_rm<0x0A, "psignd", X86psign, v4i32, VR128,
5744 memopv2i64, i128mem, SSE_PSIGN>;
5745 defm PSHUFB : SS3I_binop_rm<0x00, "pshufb", X86pshufb, v16i8, VR128,
5746 memopv2i64, i128mem, SSE_PSHUFB>;
5747 defm PHADDSW : SS3I_binop_rm_int<0x03, "phaddsw",
5748 int_x86_ssse3_phadd_sw_128,
5749 SSE_PHADDSUBSW, memopv2i64>;
5750 defm PHSUBSW : SS3I_binop_rm_int<0x07, "phsubsw",
5751 int_x86_ssse3_phsub_sw_128,
5752 SSE_PHADDSUBSW, memopv2i64>;
5753 defm PMADDUBSW : SS3I_binop_rm_int<0x04, "pmaddubsw",
5754 int_x86_ssse3_pmadd_ub_sw_128,
5755 SSE_PMADD, memopv2i64>;
5757 defm PMULHRSW : SS3I_binop_rm_int<0x0B, "pmulhrsw",
5758 int_x86_ssse3_pmul_hr_sw_128,
5759 SSE_PMULHRSW, memopv2i64>;
5762 //===---------------------------------------------------------------------===//
5763 // SSSE3 - Packed Align Instruction Patterns
5764 //===---------------------------------------------------------------------===//
5766 multiclass ssse3_palignr<string asm, bit Is2Addr = 1> {
5767 let hasSideEffects = 0 in {
5768 def R128rr : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
5769 (ins VR128:$src1, VR128:$src2, u8imm:$src3),
5771 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5773 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5774 [], IIC_SSE_PALIGNRR>, Sched<[WriteShuffle]>;
5776 def R128rm : SS3AI<0x0F, MRMSrcMem, (outs VR128:$dst),
5777 (ins VR128:$src1, i128mem:$src2, u8imm:$src3),
5779 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5781 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5782 [], IIC_SSE_PALIGNRM>, Sched<[WriteShuffleLd, ReadAfterLd]>;
5786 multiclass ssse3_palignr_y<string asm, bit Is2Addr = 1> {
5787 let hasSideEffects = 0 in {
5788 def R256rr : SS3AI<0x0F, MRMSrcReg, (outs VR256:$dst),
5789 (ins VR256:$src1, VR256:$src2, u8imm:$src3),
5791 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
5792 []>, Sched<[WriteShuffle]>;
5794 def R256rm : SS3AI<0x0F, MRMSrcMem, (outs VR256:$dst),
5795 (ins VR256:$src1, i256mem:$src2, u8imm:$src3),
5797 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
5798 []>, Sched<[WriteShuffleLd, ReadAfterLd]>;
5802 let Predicates = [HasAVX] in
5803 defm VPALIGN : ssse3_palignr<"vpalignr", 0>, VEX_4V;
5804 let Predicates = [HasAVX2] in
5805 defm VPALIGN : ssse3_palignr_y<"vpalignr", 0>, VEX_4V, VEX_L;
5806 let Constraints = "$src1 = $dst", Predicates = [UseSSSE3] in
5807 defm PALIGN : ssse3_palignr<"palignr">;
5809 let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in {
5810 def : Pat<(v8i32 (X86PAlignr VR256:$src1, VR256:$src2, (i8 imm:$imm))),
5811 (VPALIGNR256rr VR256:$src1, VR256:$src2, imm:$imm)>;
5812 def : Pat<(v8f32 (X86PAlignr VR256:$src1, VR256:$src2, (i8 imm:$imm))),
5813 (VPALIGNR256rr VR256:$src1, VR256:$src2, imm:$imm)>;
5814 def : Pat<(v16i16 (X86PAlignr VR256:$src1, VR256:$src2, (i8 imm:$imm))),
5815 (VPALIGNR256rr VR256:$src1, VR256:$src2, imm:$imm)>;
5816 def : Pat<(v32i8 (X86PAlignr VR256:$src1, VR256:$src2, (i8 imm:$imm))),
5817 (VPALIGNR256rr VR256:$src1, VR256:$src2, imm:$imm)>;
5820 let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
5821 def : Pat<(v4i32 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5822 (VPALIGNR128rr VR128:$src1, VR128:$src2, imm:$imm)>;
5823 def : Pat<(v4f32 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5824 (VPALIGNR128rr VR128:$src1, VR128:$src2, imm:$imm)>;
5825 def : Pat<(v8i16 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5826 (VPALIGNR128rr VR128:$src1, VR128:$src2, imm:$imm)>;
5827 def : Pat<(v16i8 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5828 (VPALIGNR128rr VR128:$src1, VR128:$src2, imm:$imm)>;
5831 let Predicates = [UseSSSE3] in {
5832 def : Pat<(v4i32 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5833 (PALIGNR128rr VR128:$src1, VR128:$src2, imm:$imm)>;
5834 def : Pat<(v4f32 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5835 (PALIGNR128rr VR128:$src1, VR128:$src2, imm:$imm)>;
5836 def : Pat<(v8i16 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5837 (PALIGNR128rr VR128:$src1, VR128:$src2, imm:$imm)>;
5838 def : Pat<(v16i8 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5839 (PALIGNR128rr VR128:$src1, VR128:$src2, imm:$imm)>;
5842 //===---------------------------------------------------------------------===//
5843 // SSSE3 - Thread synchronization
5844 //===---------------------------------------------------------------------===//
5846 let SchedRW = [WriteSystem] in {
5847 let usesCustomInserter = 1 in {
5848 def MONITOR : PseudoI<(outs), (ins i32mem:$src1, GR32:$src2, GR32:$src3),
5849 [(int_x86_sse3_monitor addr:$src1, GR32:$src2, GR32:$src3)]>,
5850 Requires<[HasSSE3]>;
5853 let Uses = [EAX, ECX, EDX] in
5854 def MONITORrrr : I<0x01, MRM_C8, (outs), (ins), "monitor", [], IIC_SSE_MONITOR>,
5855 TB, Requires<[HasSSE3]>;
5856 let Uses = [ECX, EAX] in
5857 def MWAITrr : I<0x01, MRM_C9, (outs), (ins), "mwait",
5858 [(int_x86_sse3_mwait ECX, EAX)], IIC_SSE_MWAIT>,
5859 TB, Requires<[HasSSE3]>;
5862 def : InstAlias<"mwait\t{%eax, %ecx|ecx, eax}", (MWAITrr)>, Requires<[Not64BitMode]>;
5863 def : InstAlias<"mwait\t{%rax, %rcx|rcx, rax}", (MWAITrr)>, Requires<[In64BitMode]>;
5865 def : InstAlias<"monitor\t{%eax, %ecx, %edx|edx, ecx, eax}", (MONITORrrr)>,
5866 Requires<[Not64BitMode]>;
5867 def : InstAlias<"monitor\t{%rax, %rcx, %rdx|rdx, rcx, rax}", (MONITORrrr)>,
5868 Requires<[In64BitMode]>;
5870 //===----------------------------------------------------------------------===//
5871 // SSE4.1 - Packed Move with Sign/Zero Extend
5872 //===----------------------------------------------------------------------===//
5874 multiclass SS41I_pmovx_rrrm<bits<8> opc, string OpcodeStr, X86MemOperand MemOp,
5875 RegisterClass OutRC, RegisterClass InRC,
5877 def rr : SS48I<opc, MRMSrcReg, (outs OutRC:$dst), (ins InRC:$src),
5878 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5880 Sched<[itins.Sched]>;
5882 def rm : SS48I<opc, MRMSrcMem, (outs OutRC:$dst), (ins MemOp:$src),
5883 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5885 itins.rm>, Sched<[itins.Sched.Folded]>;
5888 multiclass SS41I_pmovx_rm_all<bits<8> opc, string OpcodeStr,
5889 X86MemOperand MemOp, X86MemOperand MemYOp,
5890 OpndItins SSEItins, OpndItins AVXItins,
5891 OpndItins AVX2Itins> {
5892 defm NAME : SS41I_pmovx_rrrm<opc, OpcodeStr, MemOp, VR128, VR128, SSEItins>;
5893 let Predicates = [HasAVX, NoVLX] in
5894 defm V#NAME : SS41I_pmovx_rrrm<opc, !strconcat("v", OpcodeStr), MemOp,
5895 VR128, VR128, AVXItins>, VEX;
5896 let Predicates = [HasAVX2, NoVLX] in
5897 defm V#NAME#Y : SS41I_pmovx_rrrm<opc, !strconcat("v", OpcodeStr), MemYOp,
5898 VR256, VR128, AVX2Itins>, VEX, VEX_L;
5901 multiclass SS41I_pmovx_rm<bits<8> opc, string OpcodeStr,
5902 X86MemOperand MemOp, X86MemOperand MemYOp> {
5903 defm PMOVSX#NAME : SS41I_pmovx_rm_all<opc, !strconcat("pmovsx", OpcodeStr),
5905 SSE_INTALU_ITINS_SHUFF_P,
5906 DEFAULT_ITINS_SHUFFLESCHED,
5907 DEFAULT_ITINS_SHUFFLESCHED>;
5908 defm PMOVZX#NAME : SS41I_pmovx_rm_all<!add(opc, 0x10),
5909 !strconcat("pmovzx", OpcodeStr),
5911 SSE_INTALU_ITINS_SHUFF_P,
5912 DEFAULT_ITINS_SHUFFLESCHED,
5913 DEFAULT_ITINS_SHUFFLESCHED>;
5916 defm BW : SS41I_pmovx_rm<0x20, "bw", i64mem, i128mem>;
5917 defm WD : SS41I_pmovx_rm<0x23, "wd", i64mem, i128mem>;
5918 defm DQ : SS41I_pmovx_rm<0x25, "dq", i64mem, i128mem>;
5920 defm BD : SS41I_pmovx_rm<0x21, "bd", i32mem, i64mem>;
5921 defm WQ : SS41I_pmovx_rm<0x24, "wq", i32mem, i64mem>;
5923 defm BQ : SS41I_pmovx_rm<0x22, "bq", i16mem, i32mem>;
5926 multiclass SS41I_pmovx_avx2_patterns<string OpcPrefix, string ExtTy, SDNode ExtOp> {
5927 // Register-Register patterns
5928 def : Pat<(v16i16 (ExtOp (v16i8 VR128:$src))),
5929 (!cast<I>(OpcPrefix#BWYrr) VR128:$src)>;
5930 def : Pat<(v8i32 (ExtOp (v16i8 VR128:$src))),
5931 (!cast<I>(OpcPrefix#BDYrr) VR128:$src)>;
5932 def : Pat<(v4i64 (ExtOp (v16i8 VR128:$src))),
5933 (!cast<I>(OpcPrefix#BQYrr) VR128:$src)>;
5935 def : Pat<(v8i32 (ExtOp (v8i16 VR128:$src))),
5936 (!cast<I>(OpcPrefix#WDYrr) VR128:$src)>;
5937 def : Pat<(v4i64 (ExtOp (v8i16 VR128:$src))),
5938 (!cast<I>(OpcPrefix#WQYrr) VR128:$src)>;
5940 def : Pat<(v4i64 (ExtOp (v4i32 VR128:$src))),
5941 (!cast<I>(OpcPrefix#DQYrr) VR128:$src)>;
5943 // On AVX2, we also support 256bit inputs.
5944 def : Pat<(v16i16 (ExtOp (v32i8 VR256:$src))),
5945 (!cast<I>(OpcPrefix#BWYrr) (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
5946 def : Pat<(v8i32 (ExtOp (v32i8 VR256:$src))),
5947 (!cast<I>(OpcPrefix#BDYrr) (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
5948 def : Pat<(v4i64 (ExtOp (v32i8 VR256:$src))),
5949 (!cast<I>(OpcPrefix#BQYrr) (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
5951 def : Pat<(v8i32 (ExtOp (v16i16 VR256:$src))),
5952 (!cast<I>(OpcPrefix#WDYrr) (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
5953 def : Pat<(v4i64 (ExtOp (v16i16 VR256:$src))),
5954 (!cast<I>(OpcPrefix#WQYrr) (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
5956 def : Pat<(v4i64 (ExtOp (v8i32 VR256:$src))),
5957 (!cast<I>(OpcPrefix#DQYrr) (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
5959 // Simple Register-Memory patterns
5960 def : Pat<(v16i16 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
5961 (!cast<I>(OpcPrefix#BWYrm) addr:$src)>;
5962 def : Pat<(v8i32 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
5963 (!cast<I>(OpcPrefix#BDYrm) addr:$src)>;
5964 def : Pat<(v4i64 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
5965 (!cast<I>(OpcPrefix#BQYrm) addr:$src)>;
5967 def : Pat<(v8i32 (!cast<PatFrag>(ExtTy#"extloadvi16") addr:$src)),
5968 (!cast<I>(OpcPrefix#WDYrm) addr:$src)>;
5969 def : Pat<(v4i64 (!cast<PatFrag>(ExtTy#"extloadvi16") addr:$src)),
5970 (!cast<I>(OpcPrefix#WQYrm) addr:$src)>;
5972 def : Pat<(v4i64 (!cast<PatFrag>(ExtTy#"extloadvi32") addr:$src)),
5973 (!cast<I>(OpcPrefix#DQYrm) addr:$src)>;
5975 // AVX2 Register-Memory patterns
5976 def : Pat<(v16i16 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
5977 (!cast<I>(OpcPrefix#BWYrm) addr:$src)>;
5978 def : Pat<(v16i16 (ExtOp (v16i8 (vzmovl_v2i64 addr:$src)))),
5979 (!cast<I>(OpcPrefix#BWYrm) addr:$src)>;
5980 def : Pat<(v16i16 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))),
5981 (!cast<I>(OpcPrefix#BWYrm) addr:$src)>;
5982 def : Pat<(v16i16 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
5983 (!cast<I>(OpcPrefix#BWYrm) addr:$src)>;
5985 def : Pat<(v8i32 (ExtOp (bc_v16i8 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
5986 (!cast<I>(OpcPrefix#BDYrm) addr:$src)>;
5987 def : Pat<(v8i32 (ExtOp (v16i8 (vzmovl_v2i64 addr:$src)))),
5988 (!cast<I>(OpcPrefix#BDYrm) addr:$src)>;
5989 def : Pat<(v8i32 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))),
5990 (!cast<I>(OpcPrefix#BDYrm) addr:$src)>;
5991 def : Pat<(v8i32 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
5992 (!cast<I>(OpcPrefix#BDYrm) addr:$src)>;
5994 def : Pat<(v4i64 (ExtOp (bc_v16i8 (v4i32 (scalar_to_vector (loadi32 addr:$src)))))),
5995 (!cast<I>(OpcPrefix#BQYrm) addr:$src)>;
5996 def : Pat<(v4i64 (ExtOp (v16i8 (vzmovl_v4i32 addr:$src)))),
5997 (!cast<I>(OpcPrefix#BQYrm) addr:$src)>;
5998 def : Pat<(v4i64 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))),
5999 (!cast<I>(OpcPrefix#BQYrm) addr:$src)>;
6000 def : Pat<(v4i64 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
6001 (!cast<I>(OpcPrefix#BQYrm) addr:$src)>;
6003 def : Pat<(v8i32 (ExtOp (bc_v8i16 (loadv2i64 addr:$src)))),
6004 (!cast<I>(OpcPrefix#WDYrm) addr:$src)>;
6005 def : Pat<(v8i32 (ExtOp (v8i16 (vzmovl_v2i64 addr:$src)))),
6006 (!cast<I>(OpcPrefix#WDYrm) addr:$src)>;
6007 def : Pat<(v8i32 (ExtOp (v8i16 (vzload_v2i64 addr:$src)))),
6008 (!cast<I>(OpcPrefix#WDYrm) addr:$src)>;
6009 def : Pat<(v8i32 (ExtOp (bc_v8i16 (loadv2i64 addr:$src)))),
6010 (!cast<I>(OpcPrefix#WDYrm) addr:$src)>;
6012 def : Pat<(v4i64 (ExtOp (bc_v8i16 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
6013 (!cast<I>(OpcPrefix#WQYrm) addr:$src)>;
6014 def : Pat<(v4i64 (ExtOp (v8i16 (vzmovl_v2i64 addr:$src)))),
6015 (!cast<I>(OpcPrefix#WQYrm) addr:$src)>;
6016 def : Pat<(v4i64 (ExtOp (v8i16 (vzload_v2i64 addr:$src)))),
6017 (!cast<I>(OpcPrefix#WQYrm) addr:$src)>;
6018 def : Pat<(v4i64 (ExtOp (bc_v8i16 (loadv2i64 addr:$src)))),
6019 (!cast<I>(OpcPrefix#WQYrm) addr:$src)>;
6021 def : Pat<(v4i64 (ExtOp (bc_v4i32 (loadv2i64 addr:$src)))),
6022 (!cast<I>(OpcPrefix#DQYrm) addr:$src)>;
6023 def : Pat<(v4i64 (ExtOp (v4i32 (vzmovl_v2i64 addr:$src)))),
6024 (!cast<I>(OpcPrefix#DQYrm) addr:$src)>;
6025 def : Pat<(v4i64 (ExtOp (v4i32 (vzload_v2i64 addr:$src)))),
6026 (!cast<I>(OpcPrefix#DQYrm) addr:$src)>;
6027 def : Pat<(v4i64 (ExtOp (bc_v4i32 (loadv2i64 addr:$src)))),
6028 (!cast<I>(OpcPrefix#DQYrm) addr:$src)>;
6031 let Predicates = [HasAVX2, NoVLX] in {
6032 defm : SS41I_pmovx_avx2_patterns<"VPMOVSX", "s", X86vsext>;
6033 defm : SS41I_pmovx_avx2_patterns<"VPMOVZX", "z", X86vzext>;
6036 // SSE4.1/AVX patterns.
6037 multiclass SS41I_pmovx_patterns<string OpcPrefix, string ExtTy,
6038 SDNode ExtOp, PatFrag ExtLoad16> {
6039 def : Pat<(v8i16 (ExtOp (v16i8 VR128:$src))),
6040 (!cast<I>(OpcPrefix#BWrr) VR128:$src)>;
6041 def : Pat<(v4i32 (ExtOp (v16i8 VR128:$src))),
6042 (!cast<I>(OpcPrefix#BDrr) VR128:$src)>;
6043 def : Pat<(v2i64 (ExtOp (v16i8 VR128:$src))),
6044 (!cast<I>(OpcPrefix#BQrr) VR128:$src)>;
6046 def : Pat<(v4i32 (ExtOp (v8i16 VR128:$src))),
6047 (!cast<I>(OpcPrefix#WDrr) VR128:$src)>;
6048 def : Pat<(v2i64 (ExtOp (v8i16 VR128:$src))),
6049 (!cast<I>(OpcPrefix#WQrr) VR128:$src)>;
6051 def : Pat<(v2i64 (ExtOp (v4i32 VR128:$src))),
6052 (!cast<I>(OpcPrefix#DQrr) VR128:$src)>;
6054 def : Pat<(v8i16 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
6055 (!cast<I>(OpcPrefix#BWrm) addr:$src)>;
6056 def : Pat<(v4i32 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
6057 (!cast<I>(OpcPrefix#BDrm) addr:$src)>;
6058 def : Pat<(v2i64 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
6059 (!cast<I>(OpcPrefix#BQrm) addr:$src)>;
6061 def : Pat<(v4i32 (!cast<PatFrag>(ExtTy#"extloadvi16") addr:$src)),
6062 (!cast<I>(OpcPrefix#WDrm) addr:$src)>;
6063 def : Pat<(v2i64 (!cast<PatFrag>(ExtTy#"extloadvi16") addr:$src)),
6064 (!cast<I>(OpcPrefix#WQrm) addr:$src)>;
6066 def : Pat<(v2i64 (!cast<PatFrag>(ExtTy#"extloadvi32") addr:$src)),
6067 (!cast<I>(OpcPrefix#DQrm) addr:$src)>;
6069 def : Pat<(v8i16 (ExtOp (bc_v16i8 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
6070 (!cast<I>(OpcPrefix#BWrm) addr:$src)>;
6071 def : Pat<(v8i16 (ExtOp (bc_v16i8 (v2f64 (scalar_to_vector (loadf64 addr:$src)))))),
6072 (!cast<I>(OpcPrefix#BWrm) addr:$src)>;
6073 def : Pat<(v8i16 (ExtOp (v16i8 (vzmovl_v2i64 addr:$src)))),
6074 (!cast<I>(OpcPrefix#BWrm) addr:$src)>;
6075 def : Pat<(v8i16 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))),
6076 (!cast<I>(OpcPrefix#BWrm) addr:$src)>;
6077 def : Pat<(v8i16 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
6078 (!cast<I>(OpcPrefix#BWrm) addr:$src)>;
6080 def : Pat<(v4i32 (ExtOp (bc_v16i8 (v4i32 (scalar_to_vector (loadi32 addr:$src)))))),
6081 (!cast<I>(OpcPrefix#BDrm) addr:$src)>;
6082 def : Pat<(v4i32 (ExtOp (v16i8 (vzmovl_v4i32 addr:$src)))),
6083 (!cast<I>(OpcPrefix#BDrm) addr:$src)>;
6084 def : Pat<(v4i32 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))),
6085 (!cast<I>(OpcPrefix#BDrm) addr:$src)>;
6086 def : Pat<(v4i32 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
6087 (!cast<I>(OpcPrefix#BDrm) addr:$src)>;
6089 def : Pat<(v2i64 (ExtOp (bc_v16i8 (v4i32 (scalar_to_vector (ExtLoad16 addr:$src)))))),
6090 (!cast<I>(OpcPrefix#BQrm) addr:$src)>;
6091 def : Pat<(v2i64 (ExtOp (v16i8 (vzmovl_v4i32 addr:$src)))),
6092 (!cast<I>(OpcPrefix#BQrm) addr:$src)>;
6093 def : Pat<(v2i64 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))),
6094 (!cast<I>(OpcPrefix#BQrm) addr:$src)>;
6095 def : Pat<(v2i64 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
6096 (!cast<I>(OpcPrefix#BQrm) addr:$src)>;
6098 def : Pat<(v4i32 (ExtOp (bc_v8i16 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
6099 (!cast<I>(OpcPrefix#WDrm) addr:$src)>;
6100 def : Pat<(v4i32 (ExtOp (bc_v8i16 (v2f64 (scalar_to_vector (loadf64 addr:$src)))))),
6101 (!cast<I>(OpcPrefix#WDrm) addr:$src)>;
6102 def : Pat<(v4i32 (ExtOp (v8i16 (vzmovl_v2i64 addr:$src)))),
6103 (!cast<I>(OpcPrefix#WDrm) addr:$src)>;
6104 def : Pat<(v4i32 (ExtOp (v8i16 (vzload_v2i64 addr:$src)))),
6105 (!cast<I>(OpcPrefix#WDrm) addr:$src)>;
6106 def : Pat<(v4i32 (ExtOp (bc_v8i16 (loadv2i64 addr:$src)))),
6107 (!cast<I>(OpcPrefix#WDrm) addr:$src)>;
6109 def : Pat<(v2i64 (ExtOp (bc_v8i16 (v4i32 (scalar_to_vector (loadi32 addr:$src)))))),
6110 (!cast<I>(OpcPrefix#WQrm) addr:$src)>;
6111 def : Pat<(v2i64 (ExtOp (v8i16 (vzmovl_v4i32 addr:$src)))),
6112 (!cast<I>(OpcPrefix#WQrm) addr:$src)>;
6113 def : Pat<(v2i64 (ExtOp (v8i16 (vzload_v2i64 addr:$src)))),
6114 (!cast<I>(OpcPrefix#WQrm) addr:$src)>;
6115 def : Pat<(v2i64 (ExtOp (bc_v8i16 (loadv2i64 addr:$src)))),
6116 (!cast<I>(OpcPrefix#WQrm) addr:$src)>;
6118 def : Pat<(v2i64 (ExtOp (bc_v4i32 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
6119 (!cast<I>(OpcPrefix#DQrm) addr:$src)>;
6120 def : Pat<(v2i64 (ExtOp (bc_v4i32 (v2f64 (scalar_to_vector (loadf64 addr:$src)))))),
6121 (!cast<I>(OpcPrefix#DQrm) addr:$src)>;
6122 def : Pat<(v2i64 (ExtOp (v4i32 (vzmovl_v2i64 addr:$src)))),
6123 (!cast<I>(OpcPrefix#DQrm) addr:$src)>;
6124 def : Pat<(v2i64 (ExtOp (v4i32 (vzload_v2i64 addr:$src)))),
6125 (!cast<I>(OpcPrefix#DQrm) addr:$src)>;
6126 def : Pat<(v2i64 (ExtOp (bc_v4i32 (loadv2i64 addr:$src)))),
6127 (!cast<I>(OpcPrefix#DQrm) addr:$src)>;
6130 let Predicates = [HasAVX, NoVLX] in {
6131 defm : SS41I_pmovx_patterns<"VPMOVSX", "s", X86vsext, extloadi32i16>;
6132 defm : SS41I_pmovx_patterns<"VPMOVZX", "z", X86vzext, loadi16_anyext>;
6135 let Predicates = [UseSSE41] in {
6136 defm : SS41I_pmovx_patterns<"PMOVSX", "s", X86vsext, extloadi32i16>;
6137 defm : SS41I_pmovx_patterns<"PMOVZX", "z", X86vzext, loadi16_anyext>;
6140 //===----------------------------------------------------------------------===//
6141 // SSE4.1 - Extract Instructions
6142 //===----------------------------------------------------------------------===//
6144 /// SS41I_binop_ext8 - SSE 4.1 extract 8 bits to 32 bit reg or 8 bit mem
6145 multiclass SS41I_extract8<bits<8> opc, string OpcodeStr> {
6146 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32orGR64:$dst),
6147 (ins VR128:$src1, u8imm:$src2),
6148 !strconcat(OpcodeStr,
6149 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6150 [(set GR32orGR64:$dst, (X86pextrb (v16i8 VR128:$src1),
6152 Sched<[WriteShuffle]>;
6153 let hasSideEffects = 0, mayStore = 1,
6154 SchedRW = [WriteShuffleLd, WriteRMW] in
6155 def mr : SS4AIi8<opc, MRMDestMem, (outs),
6156 (ins i8mem:$dst, VR128:$src1, u8imm:$src2),
6157 !strconcat(OpcodeStr,
6158 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6159 [(store (i8 (trunc (assertzext (X86pextrb (v16i8 VR128:$src1),
6160 imm:$src2)))), addr:$dst)]>;
6163 let Predicates = [HasAVX, NoBWI] in
6164 defm VPEXTRB : SS41I_extract8<0x14, "vpextrb">, VEX;
6166 defm PEXTRB : SS41I_extract8<0x14, "pextrb">;
6169 /// SS41I_extract16 - SSE 4.1 extract 16 bits to memory destination
6170 multiclass SS41I_extract16<bits<8> opc, string OpcodeStr> {
6171 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in
6172 def rr_REV : SS4AIi8<opc, MRMDestReg, (outs GR32orGR64:$dst),
6173 (ins VR128:$src1, u8imm:$src2),
6174 !strconcat(OpcodeStr,
6175 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6176 []>, Sched<[WriteShuffle]>;
6178 let hasSideEffects = 0, mayStore = 1,
6179 SchedRW = [WriteShuffleLd, WriteRMW] in
6180 def mr : SS4AIi8<opc, MRMDestMem, (outs),
6181 (ins i16mem:$dst, VR128:$src1, u8imm:$src2),
6182 !strconcat(OpcodeStr,
6183 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6184 [(store (i16 (trunc (assertzext (X86pextrw (v8i16 VR128:$src1),
6185 imm:$src2)))), addr:$dst)]>;
6188 let Predicates = [HasAVX, NoBWI] in
6189 defm VPEXTRW : SS41I_extract16<0x15, "vpextrw">, VEX;
6191 defm PEXTRW : SS41I_extract16<0x15, "pextrw">;
6194 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
6195 multiclass SS41I_extract32<bits<8> opc, string OpcodeStr> {
6196 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
6197 (ins VR128:$src1, u8imm:$src2),
6198 !strconcat(OpcodeStr,
6199 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6201 (extractelt (v4i32 VR128:$src1), imm:$src2))]>,
6202 Sched<[WriteShuffle]>;
6203 let SchedRW = [WriteShuffleLd, WriteRMW] in
6204 def mr : SS4AIi8<opc, MRMDestMem, (outs),
6205 (ins i32mem:$dst, VR128:$src1, u8imm:$src2),
6206 !strconcat(OpcodeStr,
6207 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6208 [(store (extractelt (v4i32 VR128:$src1), imm:$src2),
6212 let Predicates = [HasAVX, NoDQI] in
6213 defm VPEXTRD : SS41I_extract32<0x16, "vpextrd">, VEX;
6215 defm PEXTRD : SS41I_extract32<0x16, "pextrd">;
6217 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
6218 multiclass SS41I_extract64<bits<8> opc, string OpcodeStr> {
6219 def rr : SS4AIi8<opc, MRMDestReg, (outs GR64:$dst),
6220 (ins VR128:$src1, u8imm:$src2),
6221 !strconcat(OpcodeStr,
6222 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6224 (extractelt (v2i64 VR128:$src1), imm:$src2))]>,
6225 Sched<[WriteShuffle]>, REX_W;
6226 let SchedRW = [WriteShuffleLd, WriteRMW] in
6227 def mr : SS4AIi8<opc, MRMDestMem, (outs),
6228 (ins i64mem:$dst, VR128:$src1, u8imm:$src2),
6229 !strconcat(OpcodeStr,
6230 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6231 [(store (extractelt (v2i64 VR128:$src1), imm:$src2),
6232 addr:$dst)]>, REX_W;
6235 let Predicates = [HasAVX, NoDQI] in
6236 defm VPEXTRQ : SS41I_extract64<0x16, "vpextrq">, VEX, VEX_W;
6238 defm PEXTRQ : SS41I_extract64<0x16, "pextrq">;
6240 /// SS41I_extractf32 - SSE 4.1 extract 32 bits fp value to int reg or memory
6242 multiclass SS41I_extractf32<bits<8> opc, string OpcodeStr,
6243 OpndItins itins = DEFAULT_ITINS> {
6244 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32orGR64:$dst),
6245 (ins VR128:$src1, u8imm:$src2),
6246 !strconcat(OpcodeStr,
6247 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6248 [(set GR32orGR64:$dst,
6249 (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2))],
6250 itins.rr>, Sched<[WriteFBlend]>;
6251 let SchedRW = [WriteFBlendLd, WriteRMW] in
6252 def mr : SS4AIi8<opc, MRMDestMem, (outs),
6253 (ins f32mem:$dst, VR128:$src1, u8imm:$src2),
6254 !strconcat(OpcodeStr,
6255 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6256 [(store (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2),
6257 addr:$dst)], itins.rm>;
6260 let ExeDomain = SSEPackedSingle in {
6261 let Predicates = [UseAVX] in
6262 defm VEXTRACTPS : SS41I_extractf32<0x17, "vextractps">, VEX;
6263 defm EXTRACTPS : SS41I_extractf32<0x17, "extractps", SSE_EXTRACT_ITINS>;
6266 // Also match an EXTRACTPS store when the store is done as f32 instead of i32.
6267 def : Pat<(store (f32 (bitconvert (extractelt (bc_v4i32 (v4f32 VR128:$src1)),
6270 (VEXTRACTPSmr addr:$dst, VR128:$src1, imm:$src2)>,
6272 def : Pat<(store (f32 (bitconvert (extractelt (bc_v4i32 (v4f32 VR128:$src1)),
6275 (EXTRACTPSmr addr:$dst, VR128:$src1, imm:$src2)>,
6276 Requires<[UseSSE41]>;
6278 //===----------------------------------------------------------------------===//
6279 // SSE4.1 - Insert Instructions
6280 //===----------------------------------------------------------------------===//
6282 multiclass SS41I_insert8<bits<8> opc, string asm, bit Is2Addr = 1> {
6283 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
6284 (ins VR128:$src1, GR32orGR64:$src2, u8imm:$src3),
6286 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6288 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6290 (X86pinsrb VR128:$src1, GR32orGR64:$src2, imm:$src3))]>,
6291 Sched<[WriteShuffle]>;
6292 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
6293 (ins VR128:$src1, i8mem:$src2, u8imm:$src3),
6295 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6297 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6299 (X86pinsrb VR128:$src1, (extloadi8 addr:$src2),
6300 imm:$src3))]>, Sched<[WriteShuffleLd, ReadAfterLd]>;
6303 let Predicates = [HasAVX, NoBWI] in
6304 defm VPINSRB : SS41I_insert8<0x20, "vpinsrb", 0>, VEX_4V;
6305 let Constraints = "$src1 = $dst" in
6306 defm PINSRB : SS41I_insert8<0x20, "pinsrb">;
6308 multiclass SS41I_insert32<bits<8> opc, string asm, bit Is2Addr = 1> {
6309 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
6310 (ins VR128:$src1, GR32:$src2, u8imm:$src3),
6312 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6314 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6316 (v4i32 (insertelt VR128:$src1, GR32:$src2, imm:$src3)))]>,
6317 Sched<[WriteShuffle]>;
6318 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
6319 (ins VR128:$src1, i32mem:$src2, u8imm:$src3),
6321 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6323 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6325 (v4i32 (insertelt VR128:$src1, (loadi32 addr:$src2),
6326 imm:$src3)))]>, Sched<[WriteShuffleLd, ReadAfterLd]>;
6329 let Predicates = [HasAVX, NoDQI] in
6330 defm VPINSRD : SS41I_insert32<0x22, "vpinsrd", 0>, VEX_4V;
6331 let Constraints = "$src1 = $dst" in
6332 defm PINSRD : SS41I_insert32<0x22, "pinsrd">;
6334 multiclass SS41I_insert64<bits<8> opc, string asm, bit Is2Addr = 1> {
6335 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
6336 (ins VR128:$src1, GR64:$src2, u8imm:$src3),
6338 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6340 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6342 (v2i64 (insertelt VR128:$src1, GR64:$src2, imm:$src3)))]>,
6343 Sched<[WriteShuffle]>;
6344 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
6345 (ins VR128:$src1, i64mem:$src2, u8imm:$src3),
6347 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6349 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6351 (v2i64 (insertelt VR128:$src1, (loadi64 addr:$src2),
6352 imm:$src3)))]>, Sched<[WriteShuffleLd, ReadAfterLd]>;
6355 let Predicates = [HasAVX, NoDQI] in
6356 defm VPINSRQ : SS41I_insert64<0x22, "vpinsrq", 0>, VEX_4V, VEX_W;
6357 let Constraints = "$src1 = $dst" in
6358 defm PINSRQ : SS41I_insert64<0x22, "pinsrq">, REX_W;
6360 // insertps has a few different modes, there's the first two here below which
6361 // are optimized inserts that won't zero arbitrary elements in the destination
6362 // vector. The next one matches the intrinsic and could zero arbitrary elements
6363 // in the target vector.
6364 multiclass SS41I_insertf32<bits<8> opc, string asm, bit Is2Addr = 1,
6365 OpndItins itins = DEFAULT_ITINS> {
6366 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
6367 (ins VR128:$src1, VR128:$src2, u8imm:$src3),
6369 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6371 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6373 (X86insertps VR128:$src1, VR128:$src2, imm:$src3))], itins.rr>,
6374 Sched<[WriteFShuffle]>;
6375 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
6376 (ins VR128:$src1, f32mem:$src2, u8imm:$src3),
6378 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6380 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6382 (X86insertps VR128:$src1,
6383 (v4f32 (scalar_to_vector (loadf32 addr:$src2))),
6384 imm:$src3))], itins.rm>,
6385 Sched<[WriteFShuffleLd, ReadAfterLd]>;
6388 let ExeDomain = SSEPackedSingle in {
6389 let Predicates = [UseAVX] in
6390 defm VINSERTPS : SS41I_insertf32<0x21, "vinsertps", 0>, VEX_4V;
6391 let Constraints = "$src1 = $dst" in
6392 defm INSERTPS : SS41I_insertf32<0x21, "insertps", 1, SSE_INSERT_ITINS>;
6395 let Predicates = [UseSSE41] in {
6396 // If we're inserting an element from a load or a null pshuf of a load,
6397 // fold the load into the insertps instruction.
6398 def : Pat<(v4f32 (X86insertps (v4f32 VR128:$src1), (X86PShufd (v4f32
6399 (scalar_to_vector (loadf32 addr:$src2))), (i8 0)),
6401 (INSERTPSrm VR128:$src1, addr:$src2, imm:$src3)>;
6402 def : Pat<(v4f32 (X86insertps (v4f32 VR128:$src1), (X86PShufd
6403 (loadv4f32 addr:$src2), (i8 0)), imm:$src3)),
6404 (INSERTPSrm VR128:$src1, addr:$src2, imm:$src3)>;
6407 let Predicates = [UseAVX] in {
6408 // If we're inserting an element from a vbroadcast of a load, fold the
6409 // load into the X86insertps instruction.
6410 def : Pat<(v4f32 (X86insertps (v4f32 VR128:$src1),
6411 (X86VBroadcast (loadf32 addr:$src2)), imm:$src3)),
6412 (VINSERTPSrm VR128:$src1, addr:$src2, imm:$src3)>;
6413 def : Pat<(v4f32 (X86insertps (v4f32 VR128:$src1),
6414 (X86VBroadcast (loadv4f32 addr:$src2)), imm:$src3)),
6415 (VINSERTPSrm VR128:$src1, addr:$src2, imm:$src3)>;
6418 //===----------------------------------------------------------------------===//
6419 // SSE4.1 - Round Instructions
6420 //===----------------------------------------------------------------------===//
6422 multiclass sse41_fp_unop_rm<bits<8> opcps, bits<8> opcpd, string OpcodeStr,
6423 X86MemOperand x86memop, RegisterClass RC,
6424 PatFrag mem_frag32, PatFrag mem_frag64,
6425 Intrinsic V4F32Int, Intrinsic V2F64Int> {
6426 let ExeDomain = SSEPackedSingle in {
6427 // Intrinsic operation, reg.
6428 // Vector intrinsic operation, reg
6429 def PSr : SS4AIi8<opcps, MRMSrcReg,
6430 (outs RC:$dst), (ins RC:$src1, i32u8imm:$src2),
6431 !strconcat(OpcodeStr,
6432 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6433 [(set RC:$dst, (V4F32Int RC:$src1, imm:$src2))],
6434 IIC_SSE_ROUNDPS_REG>, Sched<[WriteFAdd]>;
6436 // Vector intrinsic operation, mem
6437 def PSm : SS4AIi8<opcps, MRMSrcMem,
6438 (outs RC:$dst), (ins x86memop:$src1, i32u8imm:$src2),
6439 !strconcat(OpcodeStr,
6440 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6442 (V4F32Int (mem_frag32 addr:$src1),imm:$src2))],
6443 IIC_SSE_ROUNDPS_MEM>, Sched<[WriteFAddLd]>;
6444 } // ExeDomain = SSEPackedSingle
6446 let ExeDomain = SSEPackedDouble in {
6447 // Vector intrinsic operation, reg
6448 def PDr : SS4AIi8<opcpd, MRMSrcReg,
6449 (outs RC:$dst), (ins RC:$src1, i32u8imm:$src2),
6450 !strconcat(OpcodeStr,
6451 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6452 [(set RC:$dst, (V2F64Int RC:$src1, imm:$src2))],
6453 IIC_SSE_ROUNDPS_REG>, Sched<[WriteFAdd]>;
6455 // Vector intrinsic operation, mem
6456 def PDm : SS4AIi8<opcpd, MRMSrcMem,
6457 (outs RC:$dst), (ins x86memop:$src1, i32u8imm:$src2),
6458 !strconcat(OpcodeStr,
6459 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6461 (V2F64Int (mem_frag64 addr:$src1),imm:$src2))],
6462 IIC_SSE_ROUNDPS_REG>, Sched<[WriteFAddLd]>;
6463 } // ExeDomain = SSEPackedDouble
6466 multiclass sse41_fp_binop_rm<bits<8> opcss, bits<8> opcsd,
6469 Intrinsic F64Int, bit Is2Addr = 1> {
6470 let ExeDomain = GenericDomain in {
6472 let hasSideEffects = 0 in
6473 def SSr : SS4AIi8<opcss, MRMSrcReg,
6474 (outs FR32:$dst), (ins FR32:$src1, FR32:$src2, i32u8imm:$src3),
6476 !strconcat(OpcodeStr,
6477 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6478 !strconcat(OpcodeStr,
6479 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6480 []>, Sched<[WriteFAdd]>;
6482 // Intrinsic operation, reg.
6483 let isCodeGenOnly = 1 in
6484 def SSr_Int : SS4AIi8<opcss, MRMSrcReg,
6485 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32u8imm:$src3),
6487 !strconcat(OpcodeStr,
6488 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6489 !strconcat(OpcodeStr,
6490 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6491 [(set VR128:$dst, (F32Int VR128:$src1, VR128:$src2, imm:$src3))]>,
6494 // Intrinsic operation, mem.
6495 def SSm : SS4AIi8<opcss, MRMSrcMem,
6496 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32u8imm:$src3),
6498 !strconcat(OpcodeStr,
6499 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6500 !strconcat(OpcodeStr,
6501 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6503 (F32Int VR128:$src1, sse_load_f32:$src2, imm:$src3))]>,
6504 Sched<[WriteFAddLd, ReadAfterLd]>;
6507 let hasSideEffects = 0 in
6508 def SDr : SS4AIi8<opcsd, MRMSrcReg,
6509 (outs FR64:$dst), (ins FR64:$src1, FR64:$src2, i32u8imm:$src3),
6511 !strconcat(OpcodeStr,
6512 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6513 !strconcat(OpcodeStr,
6514 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6515 []>, Sched<[WriteFAdd]>;
6517 // Intrinsic operation, reg.
6518 let isCodeGenOnly = 1 in
6519 def SDr_Int : SS4AIi8<opcsd, MRMSrcReg,
6520 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32u8imm:$src3),
6522 !strconcat(OpcodeStr,
6523 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6524 !strconcat(OpcodeStr,
6525 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6526 [(set VR128:$dst, (F64Int VR128:$src1, VR128:$src2, imm:$src3))]>,
6529 // Intrinsic operation, mem.
6530 def SDm : SS4AIi8<opcsd, MRMSrcMem,
6531 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32u8imm:$src3),
6533 !strconcat(OpcodeStr,
6534 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6535 !strconcat(OpcodeStr,
6536 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6538 (F64Int VR128:$src1, sse_load_f64:$src2, imm:$src3))]>,
6539 Sched<[WriteFAddLd, ReadAfterLd]>;
6540 } // ExeDomain = GenericDomain
6543 // FP round - roundss, roundps, roundsd, roundpd
6544 let Predicates = [HasAVX] in {
6546 defm VROUND : sse41_fp_unop_rm<0x08, 0x09, "vround", f128mem, VR128,
6547 loadv4f32, loadv2f64,
6548 int_x86_sse41_round_ps,
6549 int_x86_sse41_round_pd>, VEX;
6550 defm VROUNDY : sse41_fp_unop_rm<0x08, 0x09, "vround", f256mem, VR256,
6551 loadv8f32, loadv4f64,
6552 int_x86_avx_round_ps_256,
6553 int_x86_avx_round_pd_256>, VEX, VEX_L;
6554 defm VROUND : sse41_fp_binop_rm<0x0A, 0x0B, "vround",
6555 int_x86_sse41_round_ss,
6556 int_x86_sse41_round_sd, 0>, VEX_4V, VEX_LIG;
6559 let Predicates = [UseAVX] in {
6560 def : Pat<(ffloor FR32:$src),
6561 (VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x9))>;
6562 def : Pat<(f64 (ffloor FR64:$src)),
6563 (VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x9))>;
6564 def : Pat<(f32 (fnearbyint FR32:$src)),
6565 (VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0xC))>;
6566 def : Pat<(f64 (fnearbyint FR64:$src)),
6567 (VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0xC))>;
6568 def : Pat<(f32 (fceil FR32:$src)),
6569 (VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0xA))>;
6570 def : Pat<(f64 (fceil FR64:$src)),
6571 (VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0xA))>;
6572 def : Pat<(f32 (frint FR32:$src)),
6573 (VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x4))>;
6574 def : Pat<(f64 (frint FR64:$src)),
6575 (VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x4))>;
6576 def : Pat<(f32 (ftrunc FR32:$src)),
6577 (VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0xB))>;
6578 def : Pat<(f64 (ftrunc FR64:$src)),
6579 (VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0xB))>;
6582 let Predicates = [HasAVX] in {
6583 def : Pat<(v4f32 (ffloor VR128:$src)),
6584 (VROUNDPSr VR128:$src, (i32 0x9))>;
6585 def : Pat<(v4f32 (fnearbyint VR128:$src)),
6586 (VROUNDPSr VR128:$src, (i32 0xC))>;
6587 def : Pat<(v4f32 (fceil VR128:$src)),
6588 (VROUNDPSr VR128:$src, (i32 0xA))>;
6589 def : Pat<(v4f32 (frint VR128:$src)),
6590 (VROUNDPSr VR128:$src, (i32 0x4))>;
6591 def : Pat<(v4f32 (ftrunc VR128:$src)),
6592 (VROUNDPSr VR128:$src, (i32 0xB))>;
6594 def : Pat<(v2f64 (ffloor VR128:$src)),
6595 (VROUNDPDr VR128:$src, (i32 0x9))>;
6596 def : Pat<(v2f64 (fnearbyint VR128:$src)),
6597 (VROUNDPDr VR128:$src, (i32 0xC))>;
6598 def : Pat<(v2f64 (fceil VR128:$src)),
6599 (VROUNDPDr VR128:$src, (i32 0xA))>;
6600 def : Pat<(v2f64 (frint VR128:$src)),
6601 (VROUNDPDr VR128:$src, (i32 0x4))>;
6602 def : Pat<(v2f64 (ftrunc VR128:$src)),
6603 (VROUNDPDr VR128:$src, (i32 0xB))>;
6605 def : Pat<(v8f32 (ffloor VR256:$src)),
6606 (VROUNDYPSr VR256:$src, (i32 0x9))>;
6607 def : Pat<(v8f32 (fnearbyint VR256:$src)),
6608 (VROUNDYPSr VR256:$src, (i32 0xC))>;
6609 def : Pat<(v8f32 (fceil VR256:$src)),
6610 (VROUNDYPSr VR256:$src, (i32 0xA))>;
6611 def : Pat<(v8f32 (frint VR256:$src)),
6612 (VROUNDYPSr VR256:$src, (i32 0x4))>;
6613 def : Pat<(v8f32 (ftrunc VR256:$src)),
6614 (VROUNDYPSr VR256:$src, (i32 0xB))>;
6616 def : Pat<(v4f64 (ffloor VR256:$src)),
6617 (VROUNDYPDr VR256:$src, (i32 0x9))>;
6618 def : Pat<(v4f64 (fnearbyint VR256:$src)),
6619 (VROUNDYPDr VR256:$src, (i32 0xC))>;
6620 def : Pat<(v4f64 (fceil VR256:$src)),
6621 (VROUNDYPDr VR256:$src, (i32 0xA))>;
6622 def : Pat<(v4f64 (frint VR256:$src)),
6623 (VROUNDYPDr VR256:$src, (i32 0x4))>;
6624 def : Pat<(v4f64 (ftrunc VR256:$src)),
6625 (VROUNDYPDr VR256:$src, (i32 0xB))>;
6628 defm ROUND : sse41_fp_unop_rm<0x08, 0x09, "round", f128mem, VR128,
6629 memopv4f32, memopv2f64,
6630 int_x86_sse41_round_ps, int_x86_sse41_round_pd>;
6631 let Constraints = "$src1 = $dst" in
6632 defm ROUND : sse41_fp_binop_rm<0x0A, 0x0B, "round",
6633 int_x86_sse41_round_ss, int_x86_sse41_round_sd>;
6635 let Predicates = [UseSSE41] in {
6636 def : Pat<(ffloor FR32:$src),
6637 (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x9))>;
6638 def : Pat<(f64 (ffloor FR64:$src)),
6639 (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x9))>;
6640 def : Pat<(f32 (fnearbyint FR32:$src)),
6641 (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0xC))>;
6642 def : Pat<(f64 (fnearbyint FR64:$src)),
6643 (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0xC))>;
6644 def : Pat<(f32 (fceil FR32:$src)),
6645 (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0xA))>;
6646 def : Pat<(f64 (fceil FR64:$src)),
6647 (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0xA))>;
6648 def : Pat<(f32 (frint FR32:$src)),
6649 (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x4))>;
6650 def : Pat<(f64 (frint FR64:$src)),
6651 (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x4))>;
6652 def : Pat<(f32 (ftrunc FR32:$src)),
6653 (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0xB))>;
6654 def : Pat<(f64 (ftrunc FR64:$src)),
6655 (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0xB))>;
6657 def : Pat<(v4f32 (ffloor VR128:$src)),
6658 (ROUNDPSr VR128:$src, (i32 0x9))>;
6659 def : Pat<(v4f32 (fnearbyint VR128:$src)),
6660 (ROUNDPSr VR128:$src, (i32 0xC))>;
6661 def : Pat<(v4f32 (fceil VR128:$src)),
6662 (ROUNDPSr VR128:$src, (i32 0xA))>;
6663 def : Pat<(v4f32 (frint VR128:$src)),
6664 (ROUNDPSr VR128:$src, (i32 0x4))>;
6665 def : Pat<(v4f32 (ftrunc VR128:$src)),
6666 (ROUNDPSr VR128:$src, (i32 0xB))>;
6668 def : Pat<(v2f64 (ffloor VR128:$src)),
6669 (ROUNDPDr VR128:$src, (i32 0x9))>;
6670 def : Pat<(v2f64 (fnearbyint VR128:$src)),
6671 (ROUNDPDr VR128:$src, (i32 0xC))>;
6672 def : Pat<(v2f64 (fceil VR128:$src)),
6673 (ROUNDPDr VR128:$src, (i32 0xA))>;
6674 def : Pat<(v2f64 (frint VR128:$src)),
6675 (ROUNDPDr VR128:$src, (i32 0x4))>;
6676 def : Pat<(v2f64 (ftrunc VR128:$src)),
6677 (ROUNDPDr VR128:$src, (i32 0xB))>;
6680 //===----------------------------------------------------------------------===//
6681 // SSE4.1 - Packed Bit Test
6682 //===----------------------------------------------------------------------===//
6684 // ptest instruction we'll lower to this in X86ISelLowering primarily from
6685 // the intel intrinsic that corresponds to this.
6686 let Defs = [EFLAGS], Predicates = [HasAVX] in {
6687 def VPTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
6688 "vptest\t{$src2, $src1|$src1, $src2}",
6689 [(set EFLAGS, (X86ptest VR128:$src1, (v2i64 VR128:$src2)))]>,
6690 Sched<[WriteVecLogic]>, VEX;
6691 def VPTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
6692 "vptest\t{$src2, $src1|$src1, $src2}",
6693 [(set EFLAGS,(X86ptest VR128:$src1, (loadv2i64 addr:$src2)))]>,
6694 Sched<[WriteVecLogicLd, ReadAfterLd]>, VEX;
6696 def VPTESTYrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR256:$src1, VR256:$src2),
6697 "vptest\t{$src2, $src1|$src1, $src2}",
6698 [(set EFLAGS, (X86ptest VR256:$src1, (v4i64 VR256:$src2)))]>,
6699 Sched<[WriteVecLogic]>, VEX, VEX_L;
6700 def VPTESTYrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR256:$src1, i256mem:$src2),
6701 "vptest\t{$src2, $src1|$src1, $src2}",
6702 [(set EFLAGS,(X86ptest VR256:$src1, (loadv4i64 addr:$src2)))]>,
6703 Sched<[WriteVecLogicLd, ReadAfterLd]>, VEX, VEX_L;
6706 let Defs = [EFLAGS] in {
6707 def PTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
6708 "ptest\t{$src2, $src1|$src1, $src2}",
6709 [(set EFLAGS, (X86ptest VR128:$src1, (v2i64 VR128:$src2)))]>,
6710 Sched<[WriteVecLogic]>;
6711 def PTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
6712 "ptest\t{$src2, $src1|$src1, $src2}",
6713 [(set EFLAGS, (X86ptest VR128:$src1, (memopv2i64 addr:$src2)))]>,
6714 Sched<[WriteVecLogicLd, ReadAfterLd]>;
6717 // The bit test instructions below are AVX only
6718 multiclass avx_bittest<bits<8> opc, string OpcodeStr, RegisterClass RC,
6719 X86MemOperand x86memop, PatFrag mem_frag, ValueType vt> {
6720 def rr : SS48I<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
6721 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
6722 [(set EFLAGS, (X86testp RC:$src1, (vt RC:$src2)))]>,
6723 Sched<[WriteVecLogic]>, VEX;
6724 def rm : SS48I<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
6725 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
6726 [(set EFLAGS, (X86testp RC:$src1, (mem_frag addr:$src2)))]>,
6727 Sched<[WriteVecLogicLd, ReadAfterLd]>, VEX;
6730 let Defs = [EFLAGS], Predicates = [HasAVX] in {
6731 let ExeDomain = SSEPackedSingle in {
6732 defm VTESTPS : avx_bittest<0x0E, "vtestps", VR128, f128mem, loadv4f32, v4f32>;
6733 defm VTESTPSY : avx_bittest<0x0E, "vtestps", VR256, f256mem, loadv8f32, v8f32>,
6736 let ExeDomain = SSEPackedDouble in {
6737 defm VTESTPD : avx_bittest<0x0F, "vtestpd", VR128, f128mem, loadv2f64, v2f64>;
6738 defm VTESTPDY : avx_bittest<0x0F, "vtestpd", VR256, f256mem, loadv4f64, v4f64>,
6743 //===----------------------------------------------------------------------===//
6744 // SSE4.1 - Misc Instructions
6745 //===----------------------------------------------------------------------===//
6747 let Defs = [EFLAGS], Predicates = [HasPOPCNT] in {
6748 def POPCNT16rr : I<0xB8, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
6749 "popcnt{w}\t{$src, $dst|$dst, $src}",
6750 [(set GR16:$dst, (ctpop GR16:$src)), (implicit EFLAGS)],
6751 IIC_SSE_POPCNT_RR>, Sched<[WriteFAdd]>,
6753 def POPCNT16rm : I<0xB8, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
6754 "popcnt{w}\t{$src, $dst|$dst, $src}",
6755 [(set GR16:$dst, (ctpop (loadi16 addr:$src))),
6756 (implicit EFLAGS)], IIC_SSE_POPCNT_RM>,
6757 Sched<[WriteFAddLd]>, OpSize16, XS;
6759 def POPCNT32rr : I<0xB8, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
6760 "popcnt{l}\t{$src, $dst|$dst, $src}",
6761 [(set GR32:$dst, (ctpop GR32:$src)), (implicit EFLAGS)],
6762 IIC_SSE_POPCNT_RR>, Sched<[WriteFAdd]>,
6765 def POPCNT32rm : I<0xB8, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
6766 "popcnt{l}\t{$src, $dst|$dst, $src}",
6767 [(set GR32:$dst, (ctpop (loadi32 addr:$src))),
6768 (implicit EFLAGS)], IIC_SSE_POPCNT_RM>,
6769 Sched<[WriteFAddLd]>, OpSize32, XS;
6771 def POPCNT64rr : RI<0xB8, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
6772 "popcnt{q}\t{$src, $dst|$dst, $src}",
6773 [(set GR64:$dst, (ctpop GR64:$src)), (implicit EFLAGS)],
6774 IIC_SSE_POPCNT_RR>, Sched<[WriteFAdd]>, XS;
6775 def POPCNT64rm : RI<0xB8, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
6776 "popcnt{q}\t{$src, $dst|$dst, $src}",
6777 [(set GR64:$dst, (ctpop (loadi64 addr:$src))),
6778 (implicit EFLAGS)], IIC_SSE_POPCNT_RM>,
6779 Sched<[WriteFAddLd]>, XS;
6784 // SS41I_unop_rm_int_v16 - SSE 4.1 unary operator whose type is v8i16.
6785 multiclass SS41I_unop_rm_int_v16<bits<8> opc, string OpcodeStr,
6786 Intrinsic IntId128, PatFrag ld_frag,
6787 X86FoldableSchedWrite Sched> {
6788 def rr128 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
6790 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
6791 [(set VR128:$dst, (IntId128 VR128:$src))]>,
6793 def rm128 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
6795 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
6797 (IntId128 (bitconvert (ld_frag addr:$src))))]>,
6798 Sched<[Sched.Folded]>;
6801 // PHMIN has the same profile as PSAD, thus we use the same scheduling
6802 // model, although the naming is misleading.
6803 let Predicates = [HasAVX] in
6804 defm VPHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "vphminposuw",
6805 int_x86_sse41_phminposuw, loadv2i64,
6807 defm PHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "phminposuw",
6808 int_x86_sse41_phminposuw, memopv2i64,
6811 /// SS48I_binop_rm - Simple SSE41 binary operator.
6812 multiclass SS48I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
6813 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
6814 X86MemOperand x86memop, bit Is2Addr = 1,
6815 OpndItins itins = SSE_INTALU_ITINS_P> {
6816 let isCommutable = 1 in
6817 def rr : SS48I<opc, MRMSrcReg, (outs RC:$dst),
6818 (ins RC:$src1, RC:$src2),
6820 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
6821 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
6822 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2)))]>,
6823 Sched<[itins.Sched]>;
6824 def rm : SS48I<opc, MRMSrcMem, (outs RC:$dst),
6825 (ins RC:$src1, x86memop:$src2),
6827 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
6828 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
6830 (OpVT (OpNode RC:$src1, (bitconvert (memop_frag addr:$src2)))))]>,
6831 Sched<[itins.Sched.Folded, ReadAfterLd]>;
6834 /// SS48I_binop_rm2 - Simple SSE41 binary operator with different src and dst
6836 multiclass SS48I_binop_rm2<bits<8> opc, string OpcodeStr, SDNode OpNode,
6837 ValueType DstVT, ValueType SrcVT, RegisterClass RC,
6838 PatFrag memop_frag, X86MemOperand x86memop,
6840 bit IsCommutable = 0, bit Is2Addr = 1> {
6841 let isCommutable = IsCommutable in
6842 def rr : SS48I<opc, MRMSrcReg, (outs RC:$dst),
6843 (ins RC:$src1, RC:$src2),
6845 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
6846 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
6847 [(set RC:$dst, (DstVT (OpNode (SrcVT RC:$src1), RC:$src2)))]>,
6848 Sched<[itins.Sched]>;
6849 def rm : SS48I<opc, MRMSrcMem, (outs RC:$dst),
6850 (ins RC:$src1, x86memop:$src2),
6852 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
6853 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
6854 [(set RC:$dst, (DstVT (OpNode (SrcVT RC:$src1),
6855 (bitconvert (memop_frag addr:$src2)))))]>,
6856 Sched<[itins.Sched.Folded, ReadAfterLd]>;
6859 let Predicates = [HasAVX, NoVLX] in {
6860 defm VPMINSB : SS48I_binop_rm<0x38, "vpminsb", smin, v16i8, VR128,
6861 loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
6863 defm VPMINSD : SS48I_binop_rm<0x39, "vpminsd", smin, v4i32, VR128,
6864 loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
6866 defm VPMINUD : SS48I_binop_rm<0x3B, "vpminud", umin, v4i32, VR128,
6867 loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
6869 defm VPMINUW : SS48I_binop_rm<0x3A, "vpminuw", umin, v8i16, VR128,
6870 loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
6872 defm VPMAXSB : SS48I_binop_rm<0x3C, "vpmaxsb", smax, v16i8, VR128,
6873 loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
6875 defm VPMAXSD : SS48I_binop_rm<0x3D, "vpmaxsd", smax, v4i32, VR128,
6876 loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
6878 defm VPMAXUD : SS48I_binop_rm<0x3F, "vpmaxud", umax, v4i32, VR128,
6879 loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
6881 defm VPMAXUW : SS48I_binop_rm<0x3E, "vpmaxuw", umax, v8i16, VR128,
6882 loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
6884 defm VPMULDQ : SS48I_binop_rm2<0x28, "vpmuldq", X86pmuldq, v2i64, v4i32,
6885 VR128, loadv2i64, i128mem,
6886 SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V;
6889 let Predicates = [HasAVX2, NoVLX] in {
6890 defm VPMINSBY : SS48I_binop_rm<0x38, "vpminsb", smin, v32i8, VR256,
6891 loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
6893 defm VPMINSDY : SS48I_binop_rm<0x39, "vpminsd", smin, v8i32, VR256,
6894 loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
6896 defm VPMINUDY : SS48I_binop_rm<0x3B, "vpminud", umin, v8i32, VR256,
6897 loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
6899 defm VPMINUWY : SS48I_binop_rm<0x3A, "vpminuw", umin, v16i16, VR256,
6900 loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
6902 defm VPMAXSBY : SS48I_binop_rm<0x3C, "vpmaxsb", smax, v32i8, VR256,
6903 loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
6905 defm VPMAXSDY : SS48I_binop_rm<0x3D, "vpmaxsd", smax, v8i32, VR256,
6906 loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
6908 defm VPMAXUDY : SS48I_binop_rm<0x3F, "vpmaxud", umax, v8i32, VR256,
6909 loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
6911 defm VPMAXUWY : SS48I_binop_rm<0x3E, "vpmaxuw", umax, v16i16, VR256,
6912 loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
6914 defm VPMULDQY : SS48I_binop_rm2<0x28, "vpmuldq", X86pmuldq, v4i64, v8i32,
6915 VR256, loadv4i64, i256mem,
6916 SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V, VEX_L;
6919 let Constraints = "$src1 = $dst" in {
6920 defm PMINSB : SS48I_binop_rm<0x38, "pminsb", smin, v16i8, VR128,
6921 memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
6922 defm PMINSD : SS48I_binop_rm<0x39, "pminsd", smin, v4i32, VR128,
6923 memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
6924 defm PMINUD : SS48I_binop_rm<0x3B, "pminud", umin, v4i32, VR128,
6925 memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
6926 defm PMINUW : SS48I_binop_rm<0x3A, "pminuw", umin, v8i16, VR128,
6927 memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
6928 defm PMAXSB : SS48I_binop_rm<0x3C, "pmaxsb", smax, v16i8, VR128,
6929 memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
6930 defm PMAXSD : SS48I_binop_rm<0x3D, "pmaxsd", smax, v4i32, VR128,
6931 memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
6932 defm PMAXUD : SS48I_binop_rm<0x3F, "pmaxud", umax, v4i32, VR128,
6933 memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
6934 defm PMAXUW : SS48I_binop_rm<0x3E, "pmaxuw", umax, v8i16, VR128,
6935 memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
6936 defm PMULDQ : SS48I_binop_rm2<0x28, "pmuldq", X86pmuldq, v2i64, v4i32,
6937 VR128, memopv2i64, i128mem,
6938 SSE_INTMUL_ITINS_P, 1>;
6941 let Predicates = [HasAVX, NoVLX] in {
6942 defm VPMULLD : SS48I_binop_rm<0x40, "vpmulld", mul, v4i32, VR128,
6943 memopv2i64, i128mem, 0, SSE_PMULLD_ITINS>,
6945 defm VPCMPEQQ : SS48I_binop_rm<0x29, "vpcmpeqq", X86pcmpeq, v2i64, VR128,
6946 memopv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
6949 let Predicates = [HasAVX2] in {
6950 defm VPMULLDY : SS48I_binop_rm<0x40, "vpmulld", mul, v8i32, VR256,
6951 loadv4i64, i256mem, 0, SSE_PMULLD_ITINS>,
6953 defm VPCMPEQQY : SS48I_binop_rm<0x29, "vpcmpeqq", X86pcmpeq, v4i64, VR256,
6954 loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
6958 let Constraints = "$src1 = $dst" in {
6959 defm PMULLD : SS48I_binop_rm<0x40, "pmulld", mul, v4i32, VR128,
6960 memopv2i64, i128mem, 1, SSE_PMULLD_ITINS>;
6961 defm PCMPEQQ : SS48I_binop_rm<0x29, "pcmpeqq", X86pcmpeq, v2i64, VR128,
6962 memopv2i64, i128mem, 1, SSE_INTALUQ_ITINS_P>;
6965 /// SS41I_binop_rmi_int - SSE 4.1 binary operator with 8-bit immediate
6966 multiclass SS41I_binop_rmi_int<bits<8> opc, string OpcodeStr,
6967 Intrinsic IntId, RegisterClass RC, PatFrag memop_frag,
6968 X86MemOperand x86memop, bit Is2Addr = 1,
6969 OpndItins itins = DEFAULT_ITINS> {
6970 let isCommutable = 1 in
6971 def rri : SS4AIi8<opc, MRMSrcReg, (outs RC:$dst),
6972 (ins RC:$src1, RC:$src2, u8imm:$src3),
6974 !strconcat(OpcodeStr,
6975 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6976 !strconcat(OpcodeStr,
6977 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6978 [(set RC:$dst, (IntId RC:$src1, RC:$src2, imm:$src3))], itins.rr>,
6979 Sched<[itins.Sched]>;
6980 def rmi : SS4AIi8<opc, MRMSrcMem, (outs RC:$dst),
6981 (ins RC:$src1, x86memop:$src2, u8imm:$src3),
6983 !strconcat(OpcodeStr,
6984 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6985 !strconcat(OpcodeStr,
6986 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6989 (bitconvert (memop_frag addr:$src2)), imm:$src3))], itins.rm>,
6990 Sched<[itins.Sched.Folded, ReadAfterLd]>;
6993 /// SS41I_binop_rmi - SSE 4.1 binary operator with 8-bit immediate
6994 multiclass SS41I_binop_rmi<bits<8> opc, string OpcodeStr, SDNode OpNode,
6995 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
6996 X86MemOperand x86memop, bit Is2Addr = 1,
6997 OpndItins itins = DEFAULT_ITINS> {
6998 let isCommutable = 1 in
6999 def rri : SS4AIi8<opc, MRMSrcReg, (outs RC:$dst),
7000 (ins RC:$src1, RC:$src2, u8imm:$src3),
7002 !strconcat(OpcodeStr,
7003 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
7004 !strconcat(OpcodeStr,
7005 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
7006 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2, imm:$src3)))],
7007 itins.rr>, Sched<[itins.Sched]>;
7008 def rmi : SS4AIi8<opc, MRMSrcMem, (outs RC:$dst),
7009 (ins RC:$src1, x86memop:$src2, u8imm:$src3),
7011 !strconcat(OpcodeStr,
7012 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
7013 !strconcat(OpcodeStr,
7014 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
7016 (OpVT (OpNode RC:$src1,
7017 (bitconvert (memop_frag addr:$src2)), imm:$src3)))], itins.rm>,
7018 Sched<[itins.Sched.Folded, ReadAfterLd]>;
7021 let Predicates = [HasAVX] in {
7022 let isCommutable = 0 in {
7023 defm VMPSADBW : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_sse41_mpsadbw,
7024 VR128, loadv2i64, i128mem, 0,
7025 DEFAULT_ITINS_MPSADSCHED>, VEX_4V;
7028 let ExeDomain = SSEPackedSingle in {
7029 defm VBLENDPS : SS41I_binop_rmi<0x0C, "vblendps", X86Blendi, v4f32,
7030 VR128, loadv4f32, f128mem, 0,
7031 DEFAULT_ITINS_FBLENDSCHED>, VEX_4V;
7032 defm VBLENDPSY : SS41I_binop_rmi<0x0C, "vblendps", X86Blendi, v8f32,
7033 VR256, loadv8f32, f256mem, 0,
7034 DEFAULT_ITINS_FBLENDSCHED>, VEX_4V, VEX_L;
7036 let ExeDomain = SSEPackedDouble in {
7037 defm VBLENDPD : SS41I_binop_rmi<0x0D, "vblendpd", X86Blendi, v2f64,
7038 VR128, loadv2f64, f128mem, 0,
7039 DEFAULT_ITINS_FBLENDSCHED>, VEX_4V;
7040 defm VBLENDPDY : SS41I_binop_rmi<0x0D, "vblendpd", X86Blendi, v4f64,
7041 VR256, loadv4f64, f256mem, 0,
7042 DEFAULT_ITINS_FBLENDSCHED>, VEX_4V, VEX_L;
7044 defm VPBLENDW : SS41I_binop_rmi<0x0E, "vpblendw", X86Blendi, v8i16,
7045 VR128, loadv2i64, i128mem, 0,
7046 DEFAULT_ITINS_BLENDSCHED>, VEX_4V;
7048 let ExeDomain = SSEPackedSingle in
7049 defm VDPPS : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_sse41_dpps,
7050 VR128, loadv4f32, f128mem, 0,
7051 SSE_DPPS_ITINS>, VEX_4V;
7052 let ExeDomain = SSEPackedDouble in
7053 defm VDPPD : SS41I_binop_rmi_int<0x41, "vdppd", int_x86_sse41_dppd,
7054 VR128, loadv2f64, f128mem, 0,
7055 SSE_DPPS_ITINS>, VEX_4V;
7056 let ExeDomain = SSEPackedSingle in
7057 defm VDPPSY : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_avx_dp_ps_256,
7058 VR256, loadv8f32, i256mem, 0,
7059 SSE_DPPS_ITINS>, VEX_4V, VEX_L;
7062 let Predicates = [HasAVX2] in {
7063 let isCommutable = 0 in {
7064 defm VMPSADBWY : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_avx2_mpsadbw,
7065 VR256, loadv4i64, i256mem, 0,
7066 DEFAULT_ITINS_MPSADSCHED>, VEX_4V, VEX_L;
7068 defm VPBLENDWY : SS41I_binop_rmi<0x0E, "vpblendw", X86Blendi, v16i16,
7069 VR256, loadv4i64, i256mem, 0,
7070 DEFAULT_ITINS_BLENDSCHED>, VEX_4V, VEX_L;
7073 let Constraints = "$src1 = $dst" in {
7074 let isCommutable = 0 in {
7075 defm MPSADBW : SS41I_binop_rmi_int<0x42, "mpsadbw", int_x86_sse41_mpsadbw,
7076 VR128, memopv2i64, i128mem,
7077 1, SSE_MPSADBW_ITINS>;
7079 let ExeDomain = SSEPackedSingle in
7080 defm BLENDPS : SS41I_binop_rmi<0x0C, "blendps", X86Blendi, v4f32,
7081 VR128, memopv4f32, f128mem,
7082 1, SSE_INTALU_ITINS_FBLEND_P>;
7083 let ExeDomain = SSEPackedDouble in
7084 defm BLENDPD : SS41I_binop_rmi<0x0D, "blendpd", X86Blendi, v2f64,
7085 VR128, memopv2f64, f128mem,
7086 1, SSE_INTALU_ITINS_FBLEND_P>;
7087 defm PBLENDW : SS41I_binop_rmi<0x0E, "pblendw", X86Blendi, v8i16,
7088 VR128, memopv2i64, i128mem,
7089 1, SSE_INTALU_ITINS_BLEND_P>;
7090 let ExeDomain = SSEPackedSingle in
7091 defm DPPS : SS41I_binop_rmi_int<0x40, "dpps", int_x86_sse41_dpps,
7092 VR128, memopv4f32, f128mem, 1,
7094 let ExeDomain = SSEPackedDouble in
7095 defm DPPD : SS41I_binop_rmi_int<0x41, "dppd", int_x86_sse41_dppd,
7096 VR128, memopv2f64, f128mem, 1,
7100 /// SS41I_quaternary_int_avx - AVX SSE 4.1 with 4 operators
7101 multiclass SS41I_quaternary_int_avx<bits<8> opc, string OpcodeStr,
7102 RegisterClass RC, X86MemOperand x86memop,
7103 PatFrag mem_frag, Intrinsic IntId,
7104 X86FoldableSchedWrite Sched> {
7105 def rr : Ii8<opc, MRMSrcReg, (outs RC:$dst),
7106 (ins RC:$src1, RC:$src2, RC:$src3),
7107 !strconcat(OpcodeStr,
7108 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
7109 [(set RC:$dst, (IntId RC:$src1, RC:$src2, RC:$src3))],
7110 NoItinerary, SSEPackedInt>, TAPD, VEX_4V, VEX_I8IMM,
7113 def rm : Ii8<opc, MRMSrcMem, (outs RC:$dst),
7114 (ins RC:$src1, x86memop:$src2, RC:$src3),
7115 !strconcat(OpcodeStr,
7116 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
7118 (IntId RC:$src1, (bitconvert (mem_frag addr:$src2)),
7120 NoItinerary, SSEPackedInt>, TAPD, VEX_4V, VEX_I8IMM,
7121 Sched<[Sched.Folded, ReadAfterLd]>;
7124 let Predicates = [HasAVX] in {
7125 let ExeDomain = SSEPackedDouble in {
7126 defm VBLENDVPD : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR128, f128mem,
7127 loadv2f64, int_x86_sse41_blendvpd,
7129 defm VBLENDVPDY : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR256, f256mem,
7130 loadv4f64, int_x86_avx_blendv_pd_256,
7131 WriteFVarBlend>, VEX_L;
7132 } // ExeDomain = SSEPackedDouble
7133 let ExeDomain = SSEPackedSingle in {
7134 defm VBLENDVPS : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR128, f128mem,
7135 loadv4f32, int_x86_sse41_blendvps,
7137 defm VBLENDVPSY : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR256, f256mem,
7138 loadv8f32, int_x86_avx_blendv_ps_256,
7139 WriteFVarBlend>, VEX_L;
7140 } // ExeDomain = SSEPackedSingle
7141 defm VPBLENDVB : SS41I_quaternary_int_avx<0x4C, "vpblendvb", VR128, i128mem,
7142 loadv2i64, int_x86_sse41_pblendvb,
7146 let Predicates = [HasAVX2] in {
7147 defm VPBLENDVBY : SS41I_quaternary_int_avx<0x4C, "vpblendvb", VR256, i256mem,
7148 loadv4i64, int_x86_avx2_pblendvb,
7149 WriteVarBlend>, VEX_L;
7152 let Predicates = [HasAVX] in {
7153 def : Pat<(v16i8 (vselect (v16i8 VR128:$mask), (v16i8 VR128:$src1),
7154 (v16i8 VR128:$src2))),
7155 (VPBLENDVBrr VR128:$src2, VR128:$src1, VR128:$mask)>;
7156 def : Pat<(v4i32 (vselect (v4i32 VR128:$mask), (v4i32 VR128:$src1),
7157 (v4i32 VR128:$src2))),
7158 (VBLENDVPSrr VR128:$src2, VR128:$src1, VR128:$mask)>;
7159 def : Pat<(v4f32 (vselect (v4i32 VR128:$mask), (v4f32 VR128:$src1),
7160 (v4f32 VR128:$src2))),
7161 (VBLENDVPSrr VR128:$src2, VR128:$src1, VR128:$mask)>;
7162 def : Pat<(v2i64 (vselect (v2i64 VR128:$mask), (v2i64 VR128:$src1),
7163 (v2i64 VR128:$src2))),
7164 (VBLENDVPDrr VR128:$src2, VR128:$src1, VR128:$mask)>;
7165 def : Pat<(v2f64 (vselect (v2i64 VR128:$mask), (v2f64 VR128:$src1),
7166 (v2f64 VR128:$src2))),
7167 (VBLENDVPDrr VR128:$src2, VR128:$src1, VR128:$mask)>;
7168 def : Pat<(v8i32 (vselect (v8i32 VR256:$mask), (v8i32 VR256:$src1),
7169 (v8i32 VR256:$src2))),
7170 (VBLENDVPSYrr VR256:$src2, VR256:$src1, VR256:$mask)>;
7171 def : Pat<(v8f32 (vselect (v8i32 VR256:$mask), (v8f32 VR256:$src1),
7172 (v8f32 VR256:$src2))),
7173 (VBLENDVPSYrr VR256:$src2, VR256:$src1, VR256:$mask)>;
7174 def : Pat<(v4i64 (vselect (v4i64 VR256:$mask), (v4i64 VR256:$src1),
7175 (v4i64 VR256:$src2))),
7176 (VBLENDVPDYrr VR256:$src2, VR256:$src1, VR256:$mask)>;
7177 def : Pat<(v4f64 (vselect (v4i64 VR256:$mask), (v4f64 VR256:$src1),
7178 (v4f64 VR256:$src2))),
7179 (VBLENDVPDYrr VR256:$src2, VR256:$src1, VR256:$mask)>;
7182 let Predicates = [HasAVX2] in {
7183 def : Pat<(v32i8 (vselect (v32i8 VR256:$mask), (v32i8 VR256:$src1),
7184 (v32i8 VR256:$src2))),
7185 (VPBLENDVBYrr VR256:$src2, VR256:$src1, VR256:$mask)>;
7189 // FIXME: Prefer a movss or movsd over a blendps when optimizing for size or
7190 // on targets where they have equal performance. These were changed to use
7191 // blends because blends have better throughput on SandyBridge and Haswell, but
7192 // movs[s/d] are 1-2 byte shorter instructions.
7193 let Predicates = [UseAVX] in {
7194 let AddedComplexity = 15 in {
7195 // Move scalar to XMM zero-extended, zeroing a VR128 then do a
7196 // MOVS{S,D} to the lower bits.
7197 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),
7198 (VMOVSSrr (v4f32 (V_SET0)), FR32:$src)>;
7199 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
7200 (VBLENDPSrri (v4f32 (V_SET0)), VR128:$src, (i8 1))>;
7201 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
7202 (VPBLENDWrri (v4i32 (V_SET0)), VR128:$src, (i8 3))>;
7203 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))),
7204 (VMOVSDrr (v2f64 (V_SET0)), FR64:$src)>;
7206 // Move low f32 and clear high bits.
7207 def : Pat<(v8f32 (X86vzmovl (v8f32 VR256:$src))),
7208 (VBLENDPSYrri (v8f32 (AVX_SET0)), VR256:$src, (i8 1))>;
7210 // Move low f64 and clear high bits.
7211 def : Pat<(v4f64 (X86vzmovl (v4f64 VR256:$src))),
7212 (VBLENDPDYrri (v4f64 (AVX_SET0)), VR256:$src, (i8 1))>;
7215 def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
7216 (v4f32 (scalar_to_vector FR32:$src)), (iPTR 0)))),
7217 (SUBREG_TO_REG (i32 0),
7218 (v4f32 (VMOVSSrr (v4f32 (V_SET0)), FR32:$src)),
7220 def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,
7221 (v2f64 (scalar_to_vector FR64:$src)), (iPTR 0)))),
7222 (SUBREG_TO_REG (i64 0),
7223 (v2f64 (VMOVSDrr (v2f64 (V_SET0)), FR64:$src)),
7226 // These will incur an FP/int domain crossing penalty, but it may be the only
7227 // way without AVX2. Do not add any complexity because we may be able to match
7228 // more optimal patterns defined earlier in this file.
7229 def : Pat<(v8i32 (X86vzmovl (v8i32 VR256:$src))),
7230 (VBLENDPSYrri (v8i32 (AVX_SET0)), VR256:$src, (i8 1))>;
7231 def : Pat<(v4i64 (X86vzmovl (v4i64 VR256:$src))),
7232 (VBLENDPDYrri (v4i64 (AVX_SET0)), VR256:$src, (i8 1))>;
7235 // FIXME: Prefer a movss or movsd over a blendps when optimizing for size or
7236 // on targets where they have equal performance. These were changed to use
7237 // blends because blends have better throughput on SandyBridge and Haswell, but
7238 // movs[s/d] are 1-2 byte shorter instructions.
7239 let Predicates = [UseSSE41] in {
7240 // With SSE41 we can use blends for these patterns.
7241 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
7242 (BLENDPSrri (v4f32 (V_SET0)), VR128:$src, (i8 1))>;
7243 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
7244 (PBLENDWrri (v4i32 (V_SET0)), VR128:$src, (i8 3))>;
7245 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
7246 (BLENDPDrri (v2f64 (V_SET0)), VR128:$src, (i8 1))>;
7250 /// SS41I_ternary_int - SSE 4.1 ternary operator
7251 let Uses = [XMM0], Constraints = "$src1 = $dst" in {
7252 multiclass SS41I_ternary_int<bits<8> opc, string OpcodeStr, PatFrag mem_frag,
7253 X86MemOperand x86memop, Intrinsic IntId,
7254 OpndItins itins = DEFAULT_ITINS> {
7255 def rr0 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
7256 (ins VR128:$src1, VR128:$src2),
7257 !strconcat(OpcodeStr,
7258 "\t{$src2, $dst|$dst, $src2}"),
7259 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2, XMM0))],
7260 itins.rr>, Sched<[itins.Sched]>;
7262 def rm0 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
7263 (ins VR128:$src1, x86memop:$src2),
7264 !strconcat(OpcodeStr,
7265 "\t{$src2, $dst|$dst, $src2}"),
7268 (bitconvert (mem_frag addr:$src2)), XMM0))],
7269 itins.rm>, Sched<[itins.Sched.Folded, ReadAfterLd]>;
7273 let ExeDomain = SSEPackedDouble in
7274 defm BLENDVPD : SS41I_ternary_int<0x15, "blendvpd", memopv2f64, f128mem,
7275 int_x86_sse41_blendvpd,
7276 DEFAULT_ITINS_FBLENDSCHED>;
7277 let ExeDomain = SSEPackedSingle in
7278 defm BLENDVPS : SS41I_ternary_int<0x14, "blendvps", memopv4f32, f128mem,
7279 int_x86_sse41_blendvps,
7280 DEFAULT_ITINS_FBLENDSCHED>;
7281 defm PBLENDVB : SS41I_ternary_int<0x10, "pblendvb", memopv2i64, i128mem,
7282 int_x86_sse41_pblendvb,
7283 DEFAULT_ITINS_VARBLENDSCHED>;
7285 // Aliases with the implicit xmm0 argument
7286 def : InstAlias<"blendvpd\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}",
7287 (BLENDVPDrr0 VR128:$dst, VR128:$src2)>;
7288 def : InstAlias<"blendvpd\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}",
7289 (BLENDVPDrm0 VR128:$dst, f128mem:$src2)>;
7290 def : InstAlias<"blendvps\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}",
7291 (BLENDVPSrr0 VR128:$dst, VR128:$src2)>;
7292 def : InstAlias<"blendvps\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}",
7293 (BLENDVPSrm0 VR128:$dst, f128mem:$src2)>;
7294 def : InstAlias<"pblendvb\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}",
7295 (PBLENDVBrr0 VR128:$dst, VR128:$src2)>;
7296 def : InstAlias<"pblendvb\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}",
7297 (PBLENDVBrm0 VR128:$dst, i128mem:$src2)>;
7299 let Predicates = [UseSSE41] in {
7300 def : Pat<(v16i8 (vselect (v16i8 XMM0), (v16i8 VR128:$src1),
7301 (v16i8 VR128:$src2))),
7302 (PBLENDVBrr0 VR128:$src2, VR128:$src1)>;
7303 def : Pat<(v4i32 (vselect (v4i32 XMM0), (v4i32 VR128:$src1),
7304 (v4i32 VR128:$src2))),
7305 (BLENDVPSrr0 VR128:$src2, VR128:$src1)>;
7306 def : Pat<(v4f32 (vselect (v4i32 XMM0), (v4f32 VR128:$src1),
7307 (v4f32 VR128:$src2))),
7308 (BLENDVPSrr0 VR128:$src2, VR128:$src1)>;
7309 def : Pat<(v2i64 (vselect (v2i64 XMM0), (v2i64 VR128:$src1),
7310 (v2i64 VR128:$src2))),
7311 (BLENDVPDrr0 VR128:$src2, VR128:$src1)>;
7312 def : Pat<(v2f64 (vselect (v2i64 XMM0), (v2f64 VR128:$src1),
7313 (v2f64 VR128:$src2))),
7314 (BLENDVPDrr0 VR128:$src2, VR128:$src1)>;
7317 let SchedRW = [WriteLoad] in {
7318 let Predicates = [HasAVX] in
7319 def VMOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
7320 "vmovntdqa\t{$src, $dst|$dst, $src}",
7321 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
7323 let Predicates = [HasAVX2] in
7324 def VMOVNTDQAYrm : SS48I<0x2A, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
7325 "vmovntdqa\t{$src, $dst|$dst, $src}",
7326 [(set VR256:$dst, (int_x86_avx2_movntdqa addr:$src))]>,
7328 def MOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
7329 "movntdqa\t{$src, $dst|$dst, $src}",
7330 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>;
7333 //===----------------------------------------------------------------------===//
7334 // SSE4.2 - Compare Instructions
7335 //===----------------------------------------------------------------------===//
7337 /// SS42I_binop_rm - Simple SSE 4.2 binary operator
7338 multiclass SS42I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
7339 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
7340 X86MemOperand x86memop, bit Is2Addr = 1> {
7341 def rr : SS428I<opc, MRMSrcReg, (outs RC:$dst),
7342 (ins RC:$src1, RC:$src2),
7344 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
7345 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
7346 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2)))]>;
7347 def rm : SS428I<opc, MRMSrcMem, (outs RC:$dst),
7348 (ins RC:$src1, x86memop:$src2),
7350 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
7351 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
7353 (OpVT (OpNode RC:$src1, (memop_frag addr:$src2))))]>;
7356 let Predicates = [HasAVX] in
7357 defm VPCMPGTQ : SS42I_binop_rm<0x37, "vpcmpgtq", X86pcmpgt, v2i64, VR128,
7358 loadv2i64, i128mem, 0>, VEX_4V;
7360 let Predicates = [HasAVX2] in
7361 defm VPCMPGTQY : SS42I_binop_rm<0x37, "vpcmpgtq", X86pcmpgt, v4i64, VR256,
7362 loadv4i64, i256mem, 0>, VEX_4V, VEX_L;
7364 let Constraints = "$src1 = $dst" in
7365 defm PCMPGTQ : SS42I_binop_rm<0x37, "pcmpgtq", X86pcmpgt, v2i64, VR128,
7366 memopv2i64, i128mem>;
7368 //===----------------------------------------------------------------------===//
7369 // SSE4.2 - String/text Processing Instructions
7370 //===----------------------------------------------------------------------===//
7372 // Packed Compare Implicit Length Strings, Return Mask
7373 multiclass pseudo_pcmpistrm<string asm, PatFrag ld_frag> {
7374 def REG : PseudoI<(outs VR128:$dst),
7375 (ins VR128:$src1, VR128:$src2, u8imm:$src3),
7376 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128 VR128:$src1, VR128:$src2,
7378 def MEM : PseudoI<(outs VR128:$dst),
7379 (ins VR128:$src1, i128mem:$src2, u8imm:$src3),
7380 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128 VR128:$src1,
7381 (bc_v16i8 (ld_frag addr:$src2)), imm:$src3))]>;
7384 let Defs = [EFLAGS], usesCustomInserter = 1 in {
7385 defm VPCMPISTRM128 : pseudo_pcmpistrm<"#VPCMPISTRM128", loadv2i64>,
7387 defm PCMPISTRM128 : pseudo_pcmpistrm<"#PCMPISTRM128", memopv2i64>,
7388 Requires<[UseSSE42]>;
7391 multiclass pcmpistrm_SS42AI<string asm> {
7392 def rr : SS42AI<0x62, MRMSrcReg, (outs),
7393 (ins VR128:$src1, VR128:$src2, u8imm:$src3),
7394 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
7395 []>, Sched<[WritePCmpIStrM]>;
7397 def rm :SS42AI<0x62, MRMSrcMem, (outs),
7398 (ins VR128:$src1, i128mem:$src2, u8imm:$src3),
7399 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
7400 []>, Sched<[WritePCmpIStrMLd, ReadAfterLd]>;
7403 let Defs = [XMM0, EFLAGS], hasSideEffects = 0 in {
7404 let Predicates = [HasAVX] in
7405 defm VPCMPISTRM128 : pcmpistrm_SS42AI<"vpcmpistrm">, VEX;
7406 defm PCMPISTRM128 : pcmpistrm_SS42AI<"pcmpistrm"> ;
7409 // Packed Compare Explicit Length Strings, Return Mask
7410 multiclass pseudo_pcmpestrm<string asm, PatFrag ld_frag> {
7411 def REG : PseudoI<(outs VR128:$dst),
7412 (ins VR128:$src1, VR128:$src3, u8imm:$src5),
7413 [(set VR128:$dst, (int_x86_sse42_pcmpestrm128
7414 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5))]>;
7415 def MEM : PseudoI<(outs VR128:$dst),
7416 (ins VR128:$src1, i128mem:$src3, u8imm:$src5),
7417 [(set VR128:$dst, (int_x86_sse42_pcmpestrm128 VR128:$src1, EAX,
7418 (bc_v16i8 (ld_frag addr:$src3)), EDX, imm:$src5))]>;
7421 let Defs = [EFLAGS], Uses = [EAX, EDX], usesCustomInserter = 1 in {
7422 defm VPCMPESTRM128 : pseudo_pcmpestrm<"#VPCMPESTRM128", loadv2i64>,
7424 defm PCMPESTRM128 : pseudo_pcmpestrm<"#PCMPESTRM128", memopv2i64>,
7425 Requires<[UseSSE42]>;
7428 multiclass SS42AI_pcmpestrm<string asm> {
7429 def rr : SS42AI<0x60, MRMSrcReg, (outs),
7430 (ins VR128:$src1, VR128:$src3, u8imm:$src5),
7431 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
7432 []>, Sched<[WritePCmpEStrM]>;
7434 def rm : SS42AI<0x60, MRMSrcMem, (outs),
7435 (ins VR128:$src1, i128mem:$src3, u8imm:$src5),
7436 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
7437 []>, Sched<[WritePCmpEStrMLd, ReadAfterLd]>;
7440 let Defs = [XMM0, EFLAGS], Uses = [EAX, EDX], hasSideEffects = 0 in {
7441 let Predicates = [HasAVX] in
7442 defm VPCMPESTRM128 : SS42AI_pcmpestrm<"vpcmpestrm">, VEX;
7443 defm PCMPESTRM128 : SS42AI_pcmpestrm<"pcmpestrm">;
7446 // Packed Compare Implicit Length Strings, Return Index
7447 multiclass pseudo_pcmpistri<string asm, PatFrag ld_frag> {
7448 def REG : PseudoI<(outs GR32:$dst),
7449 (ins VR128:$src1, VR128:$src2, u8imm:$src3),
7450 [(set GR32:$dst, EFLAGS,
7451 (X86pcmpistri VR128:$src1, VR128:$src2, imm:$src3))]>;
7452 def MEM : PseudoI<(outs GR32:$dst),
7453 (ins VR128:$src1, i128mem:$src2, u8imm:$src3),
7454 [(set GR32:$dst, EFLAGS, (X86pcmpistri VR128:$src1,
7455 (bc_v16i8 (ld_frag addr:$src2)), imm:$src3))]>;
7458 let Defs = [EFLAGS], usesCustomInserter = 1 in {
7459 defm VPCMPISTRI : pseudo_pcmpistri<"#VPCMPISTRI", loadv2i64>,
7461 defm PCMPISTRI : pseudo_pcmpistri<"#PCMPISTRI", memopv2i64>,
7462 Requires<[UseSSE42]>;
7465 multiclass SS42AI_pcmpistri<string asm> {
7466 def rr : SS42AI<0x63, MRMSrcReg, (outs),
7467 (ins VR128:$src1, VR128:$src2, u8imm:$src3),
7468 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
7469 []>, Sched<[WritePCmpIStrI]>;
7471 def rm : SS42AI<0x63, MRMSrcMem, (outs),
7472 (ins VR128:$src1, i128mem:$src2, u8imm:$src3),
7473 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
7474 []>, Sched<[WritePCmpIStrILd, ReadAfterLd]>;
7477 let Defs = [ECX, EFLAGS], hasSideEffects = 0 in {
7478 let Predicates = [HasAVX] in
7479 defm VPCMPISTRI : SS42AI_pcmpistri<"vpcmpistri">, VEX;
7480 defm PCMPISTRI : SS42AI_pcmpistri<"pcmpistri">;
7483 // Packed Compare Explicit Length Strings, Return Index
7484 multiclass pseudo_pcmpestri<string asm, PatFrag ld_frag> {
7485 def REG : PseudoI<(outs GR32:$dst),
7486 (ins VR128:$src1, VR128:$src3, u8imm:$src5),
7487 [(set GR32:$dst, EFLAGS,
7488 (X86pcmpestri VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5))]>;
7489 def MEM : PseudoI<(outs GR32:$dst),
7490 (ins VR128:$src1, i128mem:$src3, u8imm:$src5),
7491 [(set GR32:$dst, EFLAGS,
7492 (X86pcmpestri VR128:$src1, EAX, (bc_v16i8 (ld_frag addr:$src3)), EDX,
7496 let Defs = [EFLAGS], Uses = [EAX, EDX], usesCustomInserter = 1 in {
7497 defm VPCMPESTRI : pseudo_pcmpestri<"#VPCMPESTRI", loadv2i64>,
7499 defm PCMPESTRI : pseudo_pcmpestri<"#PCMPESTRI", memopv2i64>,
7500 Requires<[UseSSE42]>;
7503 multiclass SS42AI_pcmpestri<string asm> {
7504 def rr : SS42AI<0x61, MRMSrcReg, (outs),
7505 (ins VR128:$src1, VR128:$src3, u8imm:$src5),
7506 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
7507 []>, Sched<[WritePCmpEStrI]>;
7509 def rm : SS42AI<0x61, MRMSrcMem, (outs),
7510 (ins VR128:$src1, i128mem:$src3, u8imm:$src5),
7511 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
7512 []>, Sched<[WritePCmpEStrILd, ReadAfterLd]>;
7515 let Defs = [ECX, EFLAGS], Uses = [EAX, EDX], hasSideEffects = 0 in {
7516 let Predicates = [HasAVX] in
7517 defm VPCMPESTRI : SS42AI_pcmpestri<"vpcmpestri">, VEX;
7518 defm PCMPESTRI : SS42AI_pcmpestri<"pcmpestri">;
7521 //===----------------------------------------------------------------------===//
7522 // SSE4.2 - CRC Instructions
7523 //===----------------------------------------------------------------------===//
7525 // No CRC instructions have AVX equivalents
7527 // crc intrinsic instruction
7528 // This set of instructions are only rm, the only difference is the size
7530 class SS42I_crc32r<bits<8> opc, string asm, RegisterClass RCOut,
7531 RegisterClass RCIn, SDPatternOperator Int> :
7532 SS42FI<opc, MRMSrcReg, (outs RCOut:$dst), (ins RCOut:$src1, RCIn:$src2),
7533 !strconcat(asm, "\t{$src2, $src1|$src1, $src2}"),
7534 [(set RCOut:$dst, (Int RCOut:$src1, RCIn:$src2))], IIC_CRC32_REG>,
7537 class SS42I_crc32m<bits<8> opc, string asm, RegisterClass RCOut,
7538 X86MemOperand x86memop, SDPatternOperator Int> :
7539 SS42FI<opc, MRMSrcMem, (outs RCOut:$dst), (ins RCOut:$src1, x86memop:$src2),
7540 !strconcat(asm, "\t{$src2, $src1|$src1, $src2}"),
7541 [(set RCOut:$dst, (Int RCOut:$src1, (load addr:$src2)))],
7542 IIC_CRC32_MEM>, Sched<[WriteFAddLd, ReadAfterLd]>;
7544 let Constraints = "$src1 = $dst" in {
7545 def CRC32r32m8 : SS42I_crc32m<0xF0, "crc32{b}", GR32, i8mem,
7546 int_x86_sse42_crc32_32_8>;
7547 def CRC32r32r8 : SS42I_crc32r<0xF0, "crc32{b}", GR32, GR8,
7548 int_x86_sse42_crc32_32_8>;
7549 def CRC32r32m16 : SS42I_crc32m<0xF1, "crc32{w}", GR32, i16mem,
7550 int_x86_sse42_crc32_32_16>, OpSize16;
7551 def CRC32r32r16 : SS42I_crc32r<0xF1, "crc32{w}", GR32, GR16,
7552 int_x86_sse42_crc32_32_16>, OpSize16;
7553 def CRC32r32m32 : SS42I_crc32m<0xF1, "crc32{l}", GR32, i32mem,
7554 int_x86_sse42_crc32_32_32>, OpSize32;
7555 def CRC32r32r32 : SS42I_crc32r<0xF1, "crc32{l}", GR32, GR32,
7556 int_x86_sse42_crc32_32_32>, OpSize32;
7557 def CRC32r64m64 : SS42I_crc32m<0xF1, "crc32{q}", GR64, i64mem,
7558 int_x86_sse42_crc32_64_64>, REX_W;
7559 def CRC32r64r64 : SS42I_crc32r<0xF1, "crc32{q}", GR64, GR64,
7560 int_x86_sse42_crc32_64_64>, REX_W;
7561 let hasSideEffects = 0 in {
7563 def CRC32r64m8 : SS42I_crc32m<0xF0, "crc32{b}", GR64, i8mem,
7565 def CRC32r64r8 : SS42I_crc32r<0xF0, "crc32{b}", GR64, GR8,
7570 //===----------------------------------------------------------------------===//
7571 // SHA-NI Instructions
7572 //===----------------------------------------------------------------------===//
7574 multiclass SHAI_binop<bits<8> Opc, string OpcodeStr, Intrinsic IntId,
7576 def rr : I<Opc, MRMSrcReg, (outs VR128:$dst),
7577 (ins VR128:$src1, VR128:$src2),
7578 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
7580 (set VR128:$dst, (IntId VR128:$src1, VR128:$src2, XMM0)),
7581 (set VR128:$dst, (IntId VR128:$src1, VR128:$src2)))]>, T8;
7583 def rm : I<Opc, MRMSrcMem, (outs VR128:$dst),
7584 (ins VR128:$src1, i128mem:$src2),
7585 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
7587 (set VR128:$dst, (IntId VR128:$src1,
7588 (bc_v4i32 (memopv2i64 addr:$src2)), XMM0)),
7589 (set VR128:$dst, (IntId VR128:$src1,
7590 (bc_v4i32 (memopv2i64 addr:$src2)))))]>, T8;
7593 let Constraints = "$src1 = $dst", Predicates = [HasSHA] in {
7594 def SHA1RNDS4rri : Ii8<0xCC, MRMSrcReg, (outs VR128:$dst),
7595 (ins VR128:$src1, VR128:$src2, u8imm:$src3),
7596 "sha1rnds4\t{$src3, $src2, $dst|$dst, $src2, $src3}",
7598 (int_x86_sha1rnds4 VR128:$src1, VR128:$src2,
7599 (i8 imm:$src3)))]>, TA;
7600 def SHA1RNDS4rmi : Ii8<0xCC, MRMSrcMem, (outs VR128:$dst),
7601 (ins VR128:$src1, i128mem:$src2, u8imm:$src3),
7602 "sha1rnds4\t{$src3, $src2, $dst|$dst, $src2, $src3}",
7604 (int_x86_sha1rnds4 VR128:$src1,
7605 (bc_v4i32 (memopv2i64 addr:$src2)),
7606 (i8 imm:$src3)))]>, TA;
7608 defm SHA1NEXTE : SHAI_binop<0xC8, "sha1nexte", int_x86_sha1nexte>;
7609 defm SHA1MSG1 : SHAI_binop<0xC9, "sha1msg1", int_x86_sha1msg1>;
7610 defm SHA1MSG2 : SHAI_binop<0xCA, "sha1msg2", int_x86_sha1msg2>;
7613 defm SHA256RNDS2 : SHAI_binop<0xCB, "sha256rnds2", int_x86_sha256rnds2, 1>;
7615 defm SHA256MSG1 : SHAI_binop<0xCC, "sha256msg1", int_x86_sha256msg1>;
7616 defm SHA256MSG2 : SHAI_binop<0xCD, "sha256msg2", int_x86_sha256msg2>;
7619 // Aliases with explicit %xmm0
7620 def : InstAlias<"sha256rnds2\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}",
7621 (SHA256RNDS2rr VR128:$dst, VR128:$src2)>;
7622 def : InstAlias<"sha256rnds2\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}",
7623 (SHA256RNDS2rm VR128:$dst, i128mem:$src2)>;
7625 //===----------------------------------------------------------------------===//
7626 // AES-NI Instructions
7627 //===----------------------------------------------------------------------===//
7629 multiclass AESI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId128,
7630 PatFrag ld_frag, bit Is2Addr = 1> {
7631 def rr : AES8I<opc, MRMSrcReg, (outs VR128:$dst),
7632 (ins VR128:$src1, VR128:$src2),
7634 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
7635 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
7636 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
7637 Sched<[WriteAESDecEnc]>;
7638 def rm : AES8I<opc, MRMSrcMem, (outs VR128:$dst),
7639 (ins VR128:$src1, i128mem:$src2),
7641 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
7642 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
7644 (IntId128 VR128:$src1, (ld_frag addr:$src2)))]>,
7645 Sched<[WriteAESDecEncLd, ReadAfterLd]>;
7648 // Perform One Round of an AES Encryption/Decryption Flow
7649 let Predicates = [HasAVX, HasAES] in {
7650 defm VAESENC : AESI_binop_rm_int<0xDC, "vaesenc",
7651 int_x86_aesni_aesenc, loadv2i64, 0>, VEX_4V;
7652 defm VAESENCLAST : AESI_binop_rm_int<0xDD, "vaesenclast",
7653 int_x86_aesni_aesenclast, loadv2i64, 0>, VEX_4V;
7654 defm VAESDEC : AESI_binop_rm_int<0xDE, "vaesdec",
7655 int_x86_aesni_aesdec, loadv2i64, 0>, VEX_4V;
7656 defm VAESDECLAST : AESI_binop_rm_int<0xDF, "vaesdeclast",
7657 int_x86_aesni_aesdeclast, loadv2i64, 0>, VEX_4V;
7660 let Constraints = "$src1 = $dst" in {
7661 defm AESENC : AESI_binop_rm_int<0xDC, "aesenc",
7662 int_x86_aesni_aesenc, memopv2i64>;
7663 defm AESENCLAST : AESI_binop_rm_int<0xDD, "aesenclast",
7664 int_x86_aesni_aesenclast, memopv2i64>;
7665 defm AESDEC : AESI_binop_rm_int<0xDE, "aesdec",
7666 int_x86_aesni_aesdec, memopv2i64>;
7667 defm AESDECLAST : AESI_binop_rm_int<0xDF, "aesdeclast",
7668 int_x86_aesni_aesdeclast, memopv2i64>;
7671 // Perform the AES InvMixColumn Transformation
7672 let Predicates = [HasAVX, HasAES] in {
7673 def VAESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
7675 "vaesimc\t{$src1, $dst|$dst, $src1}",
7677 (int_x86_aesni_aesimc VR128:$src1))]>, Sched<[WriteAESIMC]>,
7679 def VAESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
7680 (ins i128mem:$src1),
7681 "vaesimc\t{$src1, $dst|$dst, $src1}",
7682 [(set VR128:$dst, (int_x86_aesni_aesimc (loadv2i64 addr:$src1)))]>,
7683 Sched<[WriteAESIMCLd]>, VEX;
7685 def AESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
7687 "aesimc\t{$src1, $dst|$dst, $src1}",
7689 (int_x86_aesni_aesimc VR128:$src1))]>, Sched<[WriteAESIMC]>;
7690 def AESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
7691 (ins i128mem:$src1),
7692 "aesimc\t{$src1, $dst|$dst, $src1}",
7693 [(set VR128:$dst, (int_x86_aesni_aesimc (memopv2i64 addr:$src1)))]>,
7694 Sched<[WriteAESIMCLd]>;
7696 // AES Round Key Generation Assist
7697 let Predicates = [HasAVX, HasAES] in {
7698 def VAESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
7699 (ins VR128:$src1, u8imm:$src2),
7700 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7702 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
7703 Sched<[WriteAESKeyGen]>, VEX;
7704 def VAESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
7705 (ins i128mem:$src1, u8imm:$src2),
7706 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7708 (int_x86_aesni_aeskeygenassist (loadv2i64 addr:$src1), imm:$src2))]>,
7709 Sched<[WriteAESKeyGenLd]>, VEX;
7711 def AESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
7712 (ins VR128:$src1, u8imm:$src2),
7713 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7715 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
7716 Sched<[WriteAESKeyGen]>;
7717 def AESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
7718 (ins i128mem:$src1, u8imm:$src2),
7719 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7721 (int_x86_aesni_aeskeygenassist (memopv2i64 addr:$src1), imm:$src2))]>,
7722 Sched<[WriteAESKeyGenLd]>;
7724 //===----------------------------------------------------------------------===//
7725 // PCLMUL Instructions
7726 //===----------------------------------------------------------------------===//
7728 // AVX carry-less Multiplication instructions
7729 let isCommutable = 1 in
7730 def VPCLMULQDQrr : AVXPCLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
7731 (ins VR128:$src1, VR128:$src2, u8imm:$src3),
7732 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
7734 (int_x86_pclmulqdq VR128:$src1, VR128:$src2, imm:$src3))]>,
7735 Sched<[WriteCLMul]>;
7737 def VPCLMULQDQrm : AVXPCLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
7738 (ins VR128:$src1, i128mem:$src2, u8imm:$src3),
7739 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
7740 [(set VR128:$dst, (int_x86_pclmulqdq VR128:$src1,
7741 (loadv2i64 addr:$src2), imm:$src3))]>,
7742 Sched<[WriteCLMulLd, ReadAfterLd]>;
7744 // Carry-less Multiplication instructions
7745 let Constraints = "$src1 = $dst" in {
7746 let isCommutable = 1 in
7747 def PCLMULQDQrr : PCLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
7748 (ins VR128:$src1, VR128:$src2, u8imm:$src3),
7749 "pclmulqdq\t{$src3, $src2, $dst|$dst, $src2, $src3}",
7751 (int_x86_pclmulqdq VR128:$src1, VR128:$src2, imm:$src3))],
7752 IIC_SSE_PCLMULQDQ_RR>, Sched<[WriteCLMul]>;
7754 def PCLMULQDQrm : PCLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
7755 (ins VR128:$src1, i128mem:$src2, u8imm:$src3),
7756 "pclmulqdq\t{$src3, $src2, $dst|$dst, $src2, $src3}",
7757 [(set VR128:$dst, (int_x86_pclmulqdq VR128:$src1,
7758 (memopv2i64 addr:$src2), imm:$src3))],
7759 IIC_SSE_PCLMULQDQ_RM>,
7760 Sched<[WriteCLMulLd, ReadAfterLd]>;
7761 } // Constraints = "$src1 = $dst"
7764 multiclass pclmul_alias<string asm, int immop> {
7765 def : InstAlias<!strconcat("pclmul", asm, "dq {$src, $dst|$dst, $src}"),
7766 (PCLMULQDQrr VR128:$dst, VR128:$src, immop), 0>;
7768 def : InstAlias<!strconcat("pclmul", asm, "dq {$src, $dst|$dst, $src}"),
7769 (PCLMULQDQrm VR128:$dst, i128mem:$src, immop), 0>;
7771 def : InstAlias<!strconcat("vpclmul", asm,
7772 "dq {$src2, $src1, $dst|$dst, $src1, $src2}"),
7773 (VPCLMULQDQrr VR128:$dst, VR128:$src1, VR128:$src2, immop),
7776 def : InstAlias<!strconcat("vpclmul", asm,
7777 "dq {$src2, $src1, $dst|$dst, $src1, $src2}"),
7778 (VPCLMULQDQrm VR128:$dst, VR128:$src1, i128mem:$src2, immop),
7781 defm : pclmul_alias<"hqhq", 0x11>;
7782 defm : pclmul_alias<"hqlq", 0x01>;
7783 defm : pclmul_alias<"lqhq", 0x10>;
7784 defm : pclmul_alias<"lqlq", 0x00>;
7786 //===----------------------------------------------------------------------===//
7787 // SSE4A Instructions
7788 //===----------------------------------------------------------------------===//
7790 let Predicates = [HasSSE4A] in {
7792 let Constraints = "$src = $dst" in {
7793 def EXTRQI : Ii8<0x78, MRMXr, (outs VR128:$dst),
7794 (ins VR128:$src, u8imm:$len, u8imm:$idx),
7795 "extrq\t{$idx, $len, $src|$src, $len, $idx}",
7796 [(set VR128:$dst, (X86extrqi VR128:$src, imm:$len,
7798 def EXTRQ : I<0x79, MRMSrcReg, (outs VR128:$dst),
7799 (ins VR128:$src, VR128:$mask),
7800 "extrq\t{$mask, $src|$src, $mask}",
7801 [(set VR128:$dst, (int_x86_sse4a_extrq VR128:$src,
7802 VR128:$mask))]>, PD;
7804 def INSERTQI : Ii8<0x78, MRMSrcReg, (outs VR128:$dst),
7805 (ins VR128:$src, VR128:$src2, u8imm:$len, u8imm:$idx),
7806 "insertq\t{$idx, $len, $src2, $src|$src, $src2, $len, $idx}",
7807 [(set VR128:$dst, (X86insertqi VR128:$src, VR128:$src2,
7808 imm:$len, imm:$idx))]>, XD;
7809 def INSERTQ : I<0x79, MRMSrcReg, (outs VR128:$dst),
7810 (ins VR128:$src, VR128:$mask),
7811 "insertq\t{$mask, $src|$src, $mask}",
7812 [(set VR128:$dst, (int_x86_sse4a_insertq VR128:$src,
7813 VR128:$mask))]>, XD;
7816 def MOVNTSS : I<0x2B, MRMDestMem, (outs), (ins f32mem:$dst, VR128:$src),
7817 "movntss\t{$src, $dst|$dst, $src}",
7818 [(int_x86_sse4a_movnt_ss addr:$dst, VR128:$src)]>, XS;
7820 def MOVNTSD : I<0x2B, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
7821 "movntsd\t{$src, $dst|$dst, $src}",
7822 [(int_x86_sse4a_movnt_sd addr:$dst, VR128:$src)]>, XD;
7825 //===----------------------------------------------------------------------===//
7827 //===----------------------------------------------------------------------===//
7829 //===----------------------------------------------------------------------===//
7830 // VBROADCAST - Load from memory and broadcast to all elements of the
7831 // destination operand
7833 class avx_broadcast_rm<bits<8> opc, string OpcodeStr, RegisterClass RC,
7834 X86MemOperand x86memop, ValueType VT,
7835 PatFrag ld_frag, SchedWrite Sched> :
7836 AVX8I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
7837 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
7838 [(set RC:$dst, (VT (X86VBroadcast (ld_frag addr:$src))))]>,
7839 Sched<[Sched]>, VEX {
7843 // AVX2 adds register forms
7844 class avx2_broadcast_rr<bits<8> opc, string OpcodeStr, RegisterClass RC,
7845 ValueType ResVT, ValueType OpVT, SchedWrite Sched> :
7846 AVX28I<opc, MRMSrcReg, (outs RC:$dst), (ins VR128:$src),
7847 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
7848 [(set RC:$dst, (ResVT (X86VBroadcast (OpVT VR128:$src))))]>,
7849 Sched<[Sched]>, VEX;
7851 let ExeDomain = SSEPackedSingle in {
7852 def VBROADCASTSSrm : avx_broadcast_rm<0x18, "vbroadcastss", VR128,
7853 f32mem, v4f32, loadf32, WriteLoad>;
7854 def VBROADCASTSSYrm : avx_broadcast_rm<0x18, "vbroadcastss", VR256,
7855 f32mem, v8f32, loadf32,
7856 WriteFShuffleLd>, VEX_L;
7858 let ExeDomain = SSEPackedDouble in
7859 def VBROADCASTSDYrm : avx_broadcast_rm<0x19, "vbroadcastsd", VR256, f64mem,
7860 v4f64, loadf64, WriteFShuffleLd>, VEX_L;
7862 let ExeDomain = SSEPackedSingle in {
7863 def VBROADCASTSSrr : avx2_broadcast_rr<0x18, "vbroadcastss", VR128,
7864 v4f32, v4f32, WriteFShuffle>;
7865 def VBROADCASTSSYrr : avx2_broadcast_rr<0x18, "vbroadcastss", VR256,
7866 v8f32, v4f32, WriteFShuffle256>, VEX_L;
7868 let ExeDomain = SSEPackedDouble in
7869 def VBROADCASTSDYrr : avx2_broadcast_rr<0x19, "vbroadcastsd", VR256,
7870 v4f64, v2f64, WriteFShuffle256>, VEX_L;
7872 let mayLoad = 1, Predicates = [HasAVX2] in
7873 def VBROADCASTI128 : AVX8I<0x5A, MRMSrcMem, (outs VR256:$dst),
7875 "vbroadcasti128\t{$src, $dst|$dst, $src}", []>,
7876 Sched<[WriteLoad]>, VEX, VEX_L;
7878 def VBROADCASTF128 : AVX8I<0x1A, MRMSrcMem, (outs VR256:$dst),
7880 "vbroadcastf128\t{$src, $dst|$dst, $src}",
7882 (int_x86_avx_vbroadcastf128_pd_256 addr:$src))]>,
7883 Sched<[WriteFShuffleLd]>, VEX, VEX_L;
7885 let Predicates = [HasAVX] in
7886 def : Pat<(int_x86_avx_vbroadcastf128_ps_256 addr:$src),
7887 (VBROADCASTF128 addr:$src)>;
7890 //===----------------------------------------------------------------------===//
7891 // VINSERTF128 - Insert packed floating-point values
7893 let hasSideEffects = 0, ExeDomain = SSEPackedSingle in {
7894 def VINSERTF128rr : AVXAIi8<0x18, MRMSrcReg, (outs VR256:$dst),
7895 (ins VR256:$src1, VR128:$src2, u8imm:$src3),
7896 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
7897 []>, Sched<[WriteFShuffle]>, VEX_4V, VEX_L;
7899 def VINSERTF128rm : AVXAIi8<0x18, MRMSrcMem, (outs VR256:$dst),
7900 (ins VR256:$src1, f128mem:$src2, u8imm:$src3),
7901 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
7902 []>, Sched<[WriteFShuffleLd, ReadAfterLd]>, VEX_4V, VEX_L;
7905 let Predicates = [HasAVX, NoVLX] in {
7906 def : Pat<(vinsert128_insert:$ins (v8f32 VR256:$src1), (v4f32 VR128:$src2),
7908 (VINSERTF128rr VR256:$src1, VR128:$src2,
7909 (INSERT_get_vinsert128_imm VR256:$ins))>;
7910 def : Pat<(vinsert128_insert:$ins (v4f64 VR256:$src1), (v2f64 VR128:$src2),
7912 (VINSERTF128rr VR256:$src1, VR128:$src2,
7913 (INSERT_get_vinsert128_imm VR256:$ins))>;
7915 def : Pat<(vinsert128_insert:$ins (v8f32 VR256:$src1), (loadv4f32 addr:$src2),
7917 (VINSERTF128rm VR256:$src1, addr:$src2,
7918 (INSERT_get_vinsert128_imm VR256:$ins))>;
7919 def : Pat<(vinsert128_insert:$ins (v4f64 VR256:$src1), (loadv2f64 addr:$src2),
7921 (VINSERTF128rm VR256:$src1, addr:$src2,
7922 (INSERT_get_vinsert128_imm VR256:$ins))>;
7925 let Predicates = [HasAVX1Only] in {
7926 def : Pat<(vinsert128_insert:$ins (v4i64 VR256:$src1), (v2i64 VR128:$src2),
7928 (VINSERTF128rr VR256:$src1, VR128:$src2,
7929 (INSERT_get_vinsert128_imm VR256:$ins))>;
7930 def : Pat<(vinsert128_insert:$ins (v8i32 VR256:$src1), (v4i32 VR128:$src2),
7932 (VINSERTF128rr VR256:$src1, VR128:$src2,
7933 (INSERT_get_vinsert128_imm VR256:$ins))>;
7934 def : Pat<(vinsert128_insert:$ins (v32i8 VR256:$src1), (v16i8 VR128:$src2),
7936 (VINSERTF128rr VR256:$src1, VR128:$src2,
7937 (INSERT_get_vinsert128_imm VR256:$ins))>;
7938 def : Pat<(vinsert128_insert:$ins (v16i16 VR256:$src1), (v8i16 VR128:$src2),
7940 (VINSERTF128rr VR256:$src1, VR128:$src2,
7941 (INSERT_get_vinsert128_imm VR256:$ins))>;
7943 def : Pat<(vinsert128_insert:$ins (v4i64 VR256:$src1), (loadv2i64 addr:$src2),
7945 (VINSERTF128rm VR256:$src1, addr:$src2,
7946 (INSERT_get_vinsert128_imm VR256:$ins))>;
7947 def : Pat<(vinsert128_insert:$ins (v8i32 VR256:$src1),
7948 (bc_v4i32 (loadv2i64 addr:$src2)),
7950 (VINSERTF128rm VR256:$src1, addr:$src2,
7951 (INSERT_get_vinsert128_imm VR256:$ins))>;
7952 def : Pat<(vinsert128_insert:$ins (v32i8 VR256:$src1),
7953 (bc_v16i8 (loadv2i64 addr:$src2)),
7955 (VINSERTF128rm VR256:$src1, addr:$src2,
7956 (INSERT_get_vinsert128_imm VR256:$ins))>;
7957 def : Pat<(vinsert128_insert:$ins (v16i16 VR256:$src1),
7958 (bc_v8i16 (loadv2i64 addr:$src2)),
7960 (VINSERTF128rm VR256:$src1, addr:$src2,
7961 (INSERT_get_vinsert128_imm VR256:$ins))>;
7964 //===----------------------------------------------------------------------===//
7965 // VEXTRACTF128 - Extract packed floating-point values
7967 let hasSideEffects = 0, ExeDomain = SSEPackedSingle in {
7968 def VEXTRACTF128rr : AVXAIi8<0x19, MRMDestReg, (outs VR128:$dst),
7969 (ins VR256:$src1, u8imm:$src2),
7970 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7971 []>, Sched<[WriteFShuffle]>, VEX, VEX_L;
7973 def VEXTRACTF128mr : AVXAIi8<0x19, MRMDestMem, (outs),
7974 (ins f128mem:$dst, VR256:$src1, u8imm:$src2),
7975 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7976 []>, Sched<[WriteStore]>, VEX, VEX_L;
7980 let Predicates = [HasAVX] in {
7981 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
7982 (v4f32 (VEXTRACTF128rr
7983 (v8f32 VR256:$src1),
7984 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
7985 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
7986 (v2f64 (VEXTRACTF128rr
7987 (v4f64 VR256:$src1),
7988 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
7990 def : Pat<(store (v4f32 (vextract128_extract:$ext (v8f32 VR256:$src1),
7991 (iPTR imm))), addr:$dst),
7992 (VEXTRACTF128mr addr:$dst, VR256:$src1,
7993 (EXTRACT_get_vextract128_imm VR128:$ext))>;
7994 def : Pat<(store (v2f64 (vextract128_extract:$ext (v4f64 VR256:$src1),
7995 (iPTR imm))), addr:$dst),
7996 (VEXTRACTF128mr addr:$dst, VR256:$src1,
7997 (EXTRACT_get_vextract128_imm VR128:$ext))>;
8000 let Predicates = [HasAVX1Only] in {
8001 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
8002 (v2i64 (VEXTRACTF128rr
8003 (v4i64 VR256:$src1),
8004 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
8005 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
8006 (v4i32 (VEXTRACTF128rr
8007 (v8i32 VR256:$src1),
8008 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
8009 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
8010 (v8i16 (VEXTRACTF128rr
8011 (v16i16 VR256:$src1),
8012 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
8013 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
8014 (v16i8 (VEXTRACTF128rr
8015 (v32i8 VR256:$src1),
8016 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
8018 def : Pat<(alignedstore (v2i64 (vextract128_extract:$ext (v4i64 VR256:$src1),
8019 (iPTR imm))), addr:$dst),
8020 (VEXTRACTF128mr addr:$dst, VR256:$src1,
8021 (EXTRACT_get_vextract128_imm VR128:$ext))>;
8022 def : Pat<(alignedstore (v4i32 (vextract128_extract:$ext (v8i32 VR256:$src1),
8023 (iPTR imm))), addr:$dst),
8024 (VEXTRACTF128mr addr:$dst, VR256:$src1,
8025 (EXTRACT_get_vextract128_imm VR128:$ext))>;
8026 def : Pat<(alignedstore (v8i16 (vextract128_extract:$ext (v16i16 VR256:$src1),
8027 (iPTR imm))), addr:$dst),
8028 (VEXTRACTF128mr addr:$dst, VR256:$src1,
8029 (EXTRACT_get_vextract128_imm VR128:$ext))>;
8030 def : Pat<(alignedstore (v16i8 (vextract128_extract:$ext (v32i8 VR256:$src1),
8031 (iPTR imm))), addr:$dst),
8032 (VEXTRACTF128mr addr:$dst, VR256:$src1,
8033 (EXTRACT_get_vextract128_imm VR128:$ext))>;
8036 //===----------------------------------------------------------------------===//
8037 // VMASKMOV - Conditional SIMD Packed Loads and Stores
8039 multiclass avx_movmask_rm<bits<8> opc_rm, bits<8> opc_mr, string OpcodeStr,
8040 Intrinsic IntLd, Intrinsic IntLd256,
8041 Intrinsic IntSt, Intrinsic IntSt256> {
8042 def rm : AVX8I<opc_rm, MRMSrcMem, (outs VR128:$dst),
8043 (ins VR128:$src1, f128mem:$src2),
8044 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8045 [(set VR128:$dst, (IntLd addr:$src2, VR128:$src1))]>,
8047 def Yrm : AVX8I<opc_rm, MRMSrcMem, (outs VR256:$dst),
8048 (ins VR256:$src1, f256mem:$src2),
8049 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8050 [(set VR256:$dst, (IntLd256 addr:$src2, VR256:$src1))]>,
8052 def mr : AVX8I<opc_mr, MRMDestMem, (outs),
8053 (ins f128mem:$dst, VR128:$src1, VR128:$src2),
8054 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8055 [(IntSt addr:$dst, VR128:$src1, VR128:$src2)]>, VEX_4V;
8056 def Ymr : AVX8I<opc_mr, MRMDestMem, (outs),
8057 (ins f256mem:$dst, VR256:$src1, VR256:$src2),
8058 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8059 [(IntSt256 addr:$dst, VR256:$src1, VR256:$src2)]>, VEX_4V, VEX_L;
8062 let ExeDomain = SSEPackedSingle in
8063 defm VMASKMOVPS : avx_movmask_rm<0x2C, 0x2E, "vmaskmovps",
8064 int_x86_avx_maskload_ps,
8065 int_x86_avx_maskload_ps_256,
8066 int_x86_avx_maskstore_ps,
8067 int_x86_avx_maskstore_ps_256>;
8068 let ExeDomain = SSEPackedDouble in
8069 defm VMASKMOVPD : avx_movmask_rm<0x2D, 0x2F, "vmaskmovpd",
8070 int_x86_avx_maskload_pd,
8071 int_x86_avx_maskload_pd_256,
8072 int_x86_avx_maskstore_pd,
8073 int_x86_avx_maskstore_pd_256>;
8075 //===----------------------------------------------------------------------===//
8076 // VPERMIL - Permute Single and Double Floating-Point Values
8078 multiclass avx_permil<bits<8> opc_rm, bits<8> opc_rmi, string OpcodeStr,
8079 RegisterClass RC, X86MemOperand x86memop_f,
8080 X86MemOperand x86memop_i, PatFrag i_frag,
8081 Intrinsic IntVar, ValueType vt> {
8082 def rr : AVX8I<opc_rm, MRMSrcReg, (outs RC:$dst),
8083 (ins RC:$src1, RC:$src2),
8084 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8085 [(set RC:$dst, (IntVar RC:$src1, RC:$src2))]>, VEX_4V,
8086 Sched<[WriteFShuffle]>;
8087 def rm : AVX8I<opc_rm, MRMSrcMem, (outs RC:$dst),
8088 (ins RC:$src1, x86memop_i:$src2),
8089 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8090 [(set RC:$dst, (IntVar RC:$src1,
8091 (bitconvert (i_frag addr:$src2))))]>, VEX_4V,
8092 Sched<[WriteFShuffleLd, ReadAfterLd]>;
8094 let Predicates = [HasAVX, NoVLX] in {
8095 def ri : AVXAIi8<opc_rmi, MRMSrcReg, (outs RC:$dst),
8096 (ins RC:$src1, u8imm:$src2),
8097 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8098 [(set RC:$dst, (vt (X86VPermilpi RC:$src1, (i8 imm:$src2))))]>, VEX,
8099 Sched<[WriteFShuffle]>;
8100 def mi : AVXAIi8<opc_rmi, MRMSrcMem, (outs RC:$dst),
8101 (ins x86memop_f:$src1, u8imm:$src2),
8102 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8104 (vt (X86VPermilpi (load addr:$src1), (i8 imm:$src2))))]>, VEX,
8105 Sched<[WriteFShuffleLd]>;
8106 }// Predicates = [HasAVX, NoVLX]
8109 let ExeDomain = SSEPackedSingle in {
8110 defm VPERMILPS : avx_permil<0x0C, 0x04, "vpermilps", VR128, f128mem, i128mem,
8111 loadv2i64, int_x86_avx_vpermilvar_ps, v4f32>;
8112 defm VPERMILPSY : avx_permil<0x0C, 0x04, "vpermilps", VR256, f256mem, i256mem,
8113 loadv4i64, int_x86_avx_vpermilvar_ps_256, v8f32>, VEX_L;
8115 let ExeDomain = SSEPackedDouble in {
8116 defm VPERMILPD : avx_permil<0x0D, 0x05, "vpermilpd", VR128, f128mem, i128mem,
8117 loadv2i64, int_x86_avx_vpermilvar_pd, v2f64>;
8118 defm VPERMILPDY : avx_permil<0x0D, 0x05, "vpermilpd", VR256, f256mem, i256mem,
8119 loadv4i64, int_x86_avx_vpermilvar_pd_256, v4f64>, VEX_L;
8122 let Predicates = [HasAVX, NoVLX] in {
8123 def : Pat<(v8f32 (X86VPermilpv VR256:$src1, (v8i32 VR256:$src2))),
8124 (VPERMILPSYrr VR256:$src1, VR256:$src2)>;
8125 def : Pat<(v8f32 (X86VPermilpv VR256:$src1, (bc_v8i32 (loadv4i64 addr:$src2)))),
8126 (VPERMILPSYrm VR256:$src1, addr:$src2)>;
8127 def : Pat<(v4f64 (X86VPermilpv VR256:$src1, (v4i64 VR256:$src2))),
8128 (VPERMILPDYrr VR256:$src1, VR256:$src2)>;
8129 def : Pat<(v4f64 (X86VPermilpv VR256:$src1, (loadv4i64 addr:$src2))),
8130 (VPERMILPDYrm VR256:$src1, addr:$src2)>;
8132 def : Pat<(v8i32 (X86VPermilpi VR256:$src1, (i8 imm:$imm))),
8133 (VPERMILPSYri VR256:$src1, imm:$imm)>;
8134 def : Pat<(v4i64 (X86VPermilpi VR256:$src1, (i8 imm:$imm))),
8135 (VPERMILPDYri VR256:$src1, imm:$imm)>;
8136 def : Pat<(v8i32 (X86VPermilpi (bc_v8i32 (loadv4i64 addr:$src1)),
8138 (VPERMILPSYmi addr:$src1, imm:$imm)>;
8139 def : Pat<(v4i64 (X86VPermilpi (loadv4i64 addr:$src1), (i8 imm:$imm))),
8140 (VPERMILPDYmi addr:$src1, imm:$imm)>;
8142 def : Pat<(v4f32 (X86VPermilpv VR128:$src1, (v4i32 VR128:$src2))),
8143 (VPERMILPSrr VR128:$src1, VR128:$src2)>;
8144 def : Pat<(v4f32 (X86VPermilpv VR128:$src1, (bc_v4i32 (loadv2i64 addr:$src2)))),
8145 (VPERMILPSrm VR128:$src1, addr:$src2)>;
8146 def : Pat<(v2f64 (X86VPermilpv VR128:$src1, (v2i64 VR128:$src2))),
8147 (VPERMILPDrr VR128:$src1, VR128:$src2)>;
8148 def : Pat<(v2f64 (X86VPermilpv VR128:$src1, (loadv2i64 addr:$src2))),
8149 (VPERMILPDrm VR128:$src1, addr:$src2)>;
8151 def : Pat<(v2i64 (X86VPermilpi VR128:$src1, (i8 imm:$imm))),
8152 (VPERMILPDri VR128:$src1, imm:$imm)>;
8153 def : Pat<(v2i64 (X86VPermilpi (loadv2i64 addr:$src1), (i8 imm:$imm))),
8154 (VPERMILPDmi addr:$src1, imm:$imm)>;
8157 //===----------------------------------------------------------------------===//
8158 // VPERM2F128 - Permute Floating-Point Values in 128-bit chunks
8160 let ExeDomain = SSEPackedSingle in {
8161 def VPERM2F128rr : AVXAIi8<0x06, MRMSrcReg, (outs VR256:$dst),
8162 (ins VR256:$src1, VR256:$src2, u8imm:$src3),
8163 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
8164 [(set VR256:$dst, (v8f32 (X86VPerm2x128 VR256:$src1, VR256:$src2,
8165 (i8 imm:$src3))))]>, VEX_4V, VEX_L,
8166 Sched<[WriteFShuffle]>;
8167 def VPERM2F128rm : AVXAIi8<0x06, MRMSrcMem, (outs VR256:$dst),
8168 (ins VR256:$src1, f256mem:$src2, u8imm:$src3),
8169 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
8170 [(set VR256:$dst, (X86VPerm2x128 VR256:$src1, (loadv8f32 addr:$src2),
8171 (i8 imm:$src3)))]>, VEX_4V, VEX_L,
8172 Sched<[WriteFShuffleLd, ReadAfterLd]>;
8175 let Predicates = [HasAVX] in {
8176 def : Pat<(v4f64 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
8177 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
8178 def : Pat<(v4f64 (X86VPerm2x128 VR256:$src1,
8179 (loadv4f64 addr:$src2), (i8 imm:$imm))),
8180 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
8183 let Predicates = [HasAVX1Only] in {
8184 def : Pat<(v8i32 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
8185 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
8186 def : Pat<(v4i64 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
8187 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
8188 def : Pat<(v32i8 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
8189 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
8190 def : Pat<(v16i16 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
8191 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
8193 def : Pat<(v8i32 (X86VPerm2x128 VR256:$src1,
8194 (bc_v8i32 (loadv4i64 addr:$src2)), (i8 imm:$imm))),
8195 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
8196 def : Pat<(v4i64 (X86VPerm2x128 VR256:$src1,
8197 (loadv4i64 addr:$src2), (i8 imm:$imm))),
8198 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
8199 def : Pat<(v32i8 (X86VPerm2x128 VR256:$src1,
8200 (bc_v32i8 (loadv4i64 addr:$src2)), (i8 imm:$imm))),
8201 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
8202 def : Pat<(v16i16 (X86VPerm2x128 VR256:$src1,
8203 (bc_v16i16 (loadv4i64 addr:$src2)), (i8 imm:$imm))),
8204 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
8207 //===----------------------------------------------------------------------===//
8208 // VZERO - Zero YMM registers
8210 let Defs = [YMM0, YMM1, YMM2, YMM3, YMM4, YMM5, YMM6, YMM7,
8211 YMM8, YMM9, YMM10, YMM11, YMM12, YMM13, YMM14, YMM15] in {
8212 // Zero All YMM registers
8213 def VZEROALL : I<0x77, RawFrm, (outs), (ins), "vzeroall",
8214 [(int_x86_avx_vzeroall)]>, PS, VEX, VEX_L, Requires<[HasAVX]>;
8216 // Zero Upper bits of YMM registers
8217 def VZEROUPPER : I<0x77, RawFrm, (outs), (ins), "vzeroupper",
8218 [(int_x86_avx_vzeroupper)]>, PS, VEX, Requires<[HasAVX]>;
8221 //===----------------------------------------------------------------------===//
8222 // Half precision conversion instructions
8223 //===----------------------------------------------------------------------===//
8224 multiclass f16c_ph2ps<RegisterClass RC, X86MemOperand x86memop, Intrinsic Int> {
8225 def rr : I<0x13, MRMSrcReg, (outs RC:$dst), (ins VR128:$src),
8226 "vcvtph2ps\t{$src, $dst|$dst, $src}",
8227 [(set RC:$dst, (Int VR128:$src))]>,
8228 T8PD, VEX, Sched<[WriteCvtF2F]>;
8229 let hasSideEffects = 0, mayLoad = 1 in
8230 def rm : I<0x13, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
8231 "vcvtph2ps\t{$src, $dst|$dst, $src}", []>, T8PD, VEX,
8232 Sched<[WriteCvtF2FLd]>;
8235 multiclass f16c_ps2ph<RegisterClass RC, X86MemOperand x86memop, Intrinsic Int> {
8236 def rr : Ii8<0x1D, MRMDestReg, (outs VR128:$dst),
8237 (ins RC:$src1, i32u8imm:$src2),
8238 "vcvtps2ph\t{$src2, $src1, $dst|$dst, $src1, $src2}",
8239 [(set VR128:$dst, (Int RC:$src1, imm:$src2))]>,
8240 TAPD, VEX, Sched<[WriteCvtF2F]>;
8241 let hasSideEffects = 0, mayStore = 1,
8242 SchedRW = [WriteCvtF2FLd, WriteRMW] in
8243 def mr : Ii8<0x1D, MRMDestMem, (outs),
8244 (ins x86memop:$dst, RC:$src1, i32u8imm:$src2),
8245 "vcvtps2ph\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
8249 let Predicates = [HasF16C] in {
8250 defm VCVTPH2PS : f16c_ph2ps<VR128, f64mem, int_x86_vcvtph2ps_128>;
8251 defm VCVTPH2PSY : f16c_ph2ps<VR256, f128mem, int_x86_vcvtph2ps_256>, VEX_L;
8252 defm VCVTPS2PH : f16c_ps2ph<VR128, f64mem, int_x86_vcvtps2ph_128>;
8253 defm VCVTPS2PHY : f16c_ps2ph<VR256, f128mem, int_x86_vcvtps2ph_256>, VEX_L;
8255 // Pattern match vcvtph2ps of a scalar i64 load.
8256 def : Pat<(int_x86_vcvtph2ps_128 (vzmovl_v2i64 addr:$src)),
8257 (VCVTPH2PSrm addr:$src)>;
8258 def : Pat<(int_x86_vcvtph2ps_128 (vzload_v2i64 addr:$src)),
8259 (VCVTPH2PSrm addr:$src)>;
8261 def : Pat<(store (f64 (vector_extract (bc_v2f64 (v8i16
8262 (int_x86_vcvtps2ph_128 VR128:$src1, i32:$src2))), (iPTR 0))),
8264 (VCVTPS2PHmr addr:$dst, VR128:$src1, imm:$src2)>;
8265 def : Pat<(store (i64 (vector_extract (bc_v2i64 (v8i16
8266 (int_x86_vcvtps2ph_128 VR128:$src1, i32:$src2))), (iPTR 0))),
8268 (VCVTPS2PHmr addr:$dst, VR128:$src1, imm:$src2)>;
8269 def : Pat<(store (v8i16 (int_x86_vcvtps2ph_256 VR256:$src1, i32:$src2)),
8271 (VCVTPS2PHYmr addr:$dst, VR256:$src1, imm:$src2)>;
8274 // Patterns for matching conversions from float to half-float and vice versa.
8275 let Predicates = [HasF16C] in {
8276 def : Pat<(fp_to_f16 FR32:$src),
8277 (i16 (EXTRACT_SUBREG (VMOVPDI2DIrr (VCVTPS2PHrr
8278 (COPY_TO_REGCLASS FR32:$src, VR128), 0)), sub_16bit))>;
8280 def : Pat<(f16_to_fp GR16:$src),
8281 (f32 (COPY_TO_REGCLASS (VCVTPH2PSrr
8282 (COPY_TO_REGCLASS (MOVSX32rr16 GR16:$src), VR128)), FR32)) >;
8284 def : Pat<(f16_to_fp (i16 (fp_to_f16 FR32:$src))),
8285 (f32 (COPY_TO_REGCLASS (VCVTPH2PSrr
8286 (VCVTPS2PHrr (COPY_TO_REGCLASS FR32:$src, VR128), 0)), FR32)) >;
8289 //===----------------------------------------------------------------------===//
8290 // AVX2 Instructions
8291 //===----------------------------------------------------------------------===//
8293 /// AVX2_binop_rmi - AVX2 binary operator with 8-bit immediate
8294 multiclass AVX2_binop_rmi<bits<8> opc, string OpcodeStr, SDNode OpNode,
8295 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
8296 X86MemOperand x86memop> {
8297 let isCommutable = 1 in
8298 def rri : AVX2AIi8<opc, MRMSrcReg, (outs RC:$dst),
8299 (ins RC:$src1, RC:$src2, u8imm:$src3),
8300 !strconcat(OpcodeStr,
8301 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
8302 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2, imm:$src3)))]>,
8303 Sched<[WriteBlend]>, VEX_4V;
8304 def rmi : AVX2AIi8<opc, MRMSrcMem, (outs RC:$dst),
8305 (ins RC:$src1, x86memop:$src2, u8imm:$src3),
8306 !strconcat(OpcodeStr,
8307 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
8309 (OpVT (OpNode RC:$src1,
8310 (bitconvert (memop_frag addr:$src2)), imm:$src3)))]>,
8311 Sched<[WriteBlendLd, ReadAfterLd]>, VEX_4V;
8314 defm VPBLENDD : AVX2_binop_rmi<0x02, "vpblendd", X86Blendi, v4i32,
8315 VR128, loadv2i64, i128mem>;
8316 defm VPBLENDDY : AVX2_binop_rmi<0x02, "vpblendd", X86Blendi, v8i32,
8317 VR256, loadv4i64, i256mem>, VEX_L;
8319 //===----------------------------------------------------------------------===//
8320 // VPBROADCAST - Load from memory and broadcast to all elements of the
8321 // destination operand
8323 multiclass avx2_broadcast<bits<8> opc, string OpcodeStr,
8324 X86MemOperand x86memop, PatFrag ld_frag,
8325 ValueType OpVT128, ValueType OpVT256, Predicate prd> {
8326 let Predicates = [HasAVX2, prd] in {
8327 def rr : AVX28I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
8328 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
8330 (OpVT128 (X86VBroadcast (OpVT128 VR128:$src))))]>,
8331 Sched<[WriteShuffle]>, VEX;
8332 def rm : AVX28I<opc, MRMSrcMem, (outs VR128:$dst), (ins x86memop:$src),
8333 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
8335 (OpVT128 (X86VBroadcast (ld_frag addr:$src))))]>,
8336 Sched<[WriteLoad]>, VEX;
8337 def Yrr : AVX28I<opc, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
8338 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
8340 (OpVT256 (X86VBroadcast (OpVT128 VR128:$src))))]>,
8341 Sched<[WriteShuffle256]>, VEX, VEX_L;
8342 def Yrm : AVX28I<opc, MRMSrcMem, (outs VR256:$dst), (ins x86memop:$src),
8343 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
8345 (OpVT256 (X86VBroadcast (ld_frag addr:$src))))]>,
8346 Sched<[WriteLoad]>, VEX, VEX_L;
8348 // Provide aliases for broadcast from the same register class that
8349 // automatically does the extract.
8350 def : Pat<(OpVT256 (X86VBroadcast (OpVT256 VR256:$src))),
8351 (!cast<Instruction>(NAME#"Yrr")
8352 (OpVT128 (EXTRACT_SUBREG (OpVT256 VR256:$src),sub_xmm)))>;
8356 defm VPBROADCASTB : avx2_broadcast<0x78, "vpbroadcastb", i8mem, loadi8,
8357 v16i8, v32i8, NoVLX_Or_NoBWI>;
8358 defm VPBROADCASTW : avx2_broadcast<0x79, "vpbroadcastw", i16mem, loadi16,
8359 v8i16, v16i16, NoVLX_Or_NoBWI>;
8360 defm VPBROADCASTD : avx2_broadcast<0x58, "vpbroadcastd", i32mem, loadi32,
8361 v4i32, v8i32, NoVLX>;
8362 defm VPBROADCASTQ : avx2_broadcast<0x59, "vpbroadcastq", i64mem, loadi64,
8363 v2i64, v4i64, NoVLX>;
8365 let Predicates = [HasAVX2] in {
8366 // loadi16 is tricky to fold, because !isTypeDesirableForOp, justifiably.
8367 // This means we'll encounter truncated i32 loads; match that here.
8368 def : Pat<(v8i16 (X86VBroadcast (i16 (trunc (i32 (load addr:$src)))))),
8369 (VPBROADCASTWrm addr:$src)>;
8370 def : Pat<(v16i16 (X86VBroadcast (i16 (trunc (i32 (load addr:$src)))))),
8371 (VPBROADCASTWYrm addr:$src)>;
8372 def : Pat<(v8i16 (X86VBroadcast
8373 (i16 (trunc (i32 (zextloadi16 addr:$src)))))),
8374 (VPBROADCASTWrm addr:$src)>;
8375 def : Pat<(v16i16 (X86VBroadcast
8376 (i16 (trunc (i32 (zextloadi16 addr:$src)))))),
8377 (VPBROADCASTWYrm addr:$src)>;
8379 // Provide aliases for broadcast from the same register class that
8380 // automatically does the extract.
8381 def : Pat<(v8f32 (X86VBroadcast (v8f32 VR256:$src))),
8382 (VBROADCASTSSYrr (v4f32 (EXTRACT_SUBREG (v8f32 VR256:$src),
8384 def : Pat<(v4f64 (X86VBroadcast (v4f64 VR256:$src))),
8385 (VBROADCASTSDYrr (v2f64 (EXTRACT_SUBREG (v4f64 VR256:$src),
8388 // Provide fallback in case the load node that is used in the patterns above
8389 // is used by additional users, which prevents the pattern selection.
8390 let AddedComplexity = 20 in {
8391 def : Pat<(v4f32 (X86VBroadcast FR32:$src)),
8392 (VBROADCASTSSrr (COPY_TO_REGCLASS FR32:$src, VR128))>;
8393 def : Pat<(v8f32 (X86VBroadcast FR32:$src)),
8394 (VBROADCASTSSYrr (COPY_TO_REGCLASS FR32:$src, VR128))>;
8395 def : Pat<(v4f64 (X86VBroadcast FR64:$src)),
8396 (VBROADCASTSDYrr (COPY_TO_REGCLASS FR64:$src, VR128))>;
8398 def : Pat<(v4i32 (X86VBroadcast GR32:$src)),
8399 (VBROADCASTSSrr (COPY_TO_REGCLASS GR32:$src, VR128))>;
8400 def : Pat<(v8i32 (X86VBroadcast GR32:$src)),
8401 (VBROADCASTSSYrr (COPY_TO_REGCLASS GR32:$src, VR128))>;
8402 def : Pat<(v4i64 (X86VBroadcast GR64:$src)),
8403 (VBROADCASTSDYrr (COPY_TO_REGCLASS GR64:$src, VR128))>;
8405 def : Pat<(v16i8 (X86VBroadcast GR8:$src)),
8406 (VPBROADCASTBrr (COPY_TO_REGCLASS
8407 (i32 (SUBREG_TO_REG (i32 0), GR8:$src, sub_8bit)),
8409 def : Pat<(v32i8 (X86VBroadcast GR8:$src)),
8410 (VPBROADCASTBYrr (COPY_TO_REGCLASS
8411 (i32 (SUBREG_TO_REG (i32 0), GR8:$src, sub_8bit)),
8414 def : Pat<(v8i16 (X86VBroadcast GR16:$src)),
8415 (VPBROADCASTWrr (COPY_TO_REGCLASS
8416 (i32 (SUBREG_TO_REG (i32 0), GR16:$src, sub_16bit)),
8418 def : Pat<(v16i16 (X86VBroadcast GR16:$src)),
8419 (VPBROADCASTWYrr (COPY_TO_REGCLASS
8420 (i32 (SUBREG_TO_REG (i32 0), GR16:$src, sub_16bit)),
8423 // The patterns for VPBROADCASTD are not needed because they would match
8424 // the exact same thing as VBROADCASTSS patterns.
8426 def : Pat<(v2i64 (X86VBroadcast GR64:$src)),
8427 (VPBROADCASTQrr (COPY_TO_REGCLASS GR64:$src, VR128))>;
8428 // The v4i64 pattern is not needed because VBROADCASTSDYrr already match.
8432 // AVX1 broadcast patterns
8433 let Predicates = [HasAVX1Only] in {
8434 def : Pat<(v8i32 (X86VBroadcast (loadi32 addr:$src))),
8435 (VBROADCASTSSYrm addr:$src)>;
8436 def : Pat<(v4i64 (X86VBroadcast (loadi64 addr:$src))),
8437 (VBROADCASTSDYrm addr:$src)>;
8438 def : Pat<(v4i32 (X86VBroadcast (loadi32 addr:$src))),
8439 (VBROADCASTSSrm addr:$src)>;
8442 let Predicates = [HasAVX] in {
8443 // Provide fallback in case the load node that is used in the patterns above
8444 // is used by additional users, which prevents the pattern selection.
8445 let AddedComplexity = 20 in {
8446 // 128bit broadcasts:
8447 def : Pat<(v4f32 (X86VBroadcast FR32:$src)),
8448 (VPSHUFDri (COPY_TO_REGCLASS FR32:$src, VR128), 0)>;
8449 def : Pat<(v8f32 (X86VBroadcast FR32:$src)),
8450 (VINSERTF128rr (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)),
8451 (VPSHUFDri (COPY_TO_REGCLASS FR32:$src, VR128), 0), sub_xmm),
8452 (VPSHUFDri (COPY_TO_REGCLASS FR32:$src, VR128), 0), 1)>;
8453 def : Pat<(v4f64 (X86VBroadcast FR64:$src)),
8454 (VINSERTF128rr (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)),
8455 (VPSHUFDri (COPY_TO_REGCLASS FR64:$src, VR128), 0x44), sub_xmm),
8456 (VPSHUFDri (COPY_TO_REGCLASS FR64:$src, VR128), 0x44), 1)>;
8458 def : Pat<(v4i32 (X86VBroadcast GR32:$src)),
8459 (VPSHUFDri (COPY_TO_REGCLASS GR32:$src, VR128), 0)>;
8460 def : Pat<(v8i32 (X86VBroadcast GR32:$src)),
8461 (VINSERTF128rr (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)),
8462 (VPSHUFDri (COPY_TO_REGCLASS GR32:$src, VR128), 0), sub_xmm),
8463 (VPSHUFDri (COPY_TO_REGCLASS GR32:$src, VR128), 0), 1)>;
8464 def : Pat<(v4i64 (X86VBroadcast GR64:$src)),
8465 (VINSERTF128rr (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)),
8466 (VPSHUFDri (COPY_TO_REGCLASS GR64:$src, VR128), 0x44), sub_xmm),
8467 (VPSHUFDri (COPY_TO_REGCLASS GR64:$src, VR128), 0x44), 1)>;
8470 def : Pat<(v2f64 (X86VBroadcast f64:$src)),
8471 (VMOVDDUPrr (COPY_TO_REGCLASS FR64:$src, VR128))>;
8472 def : Pat<(v2i64 (X86VBroadcast i64:$src)),
8473 (VMOVDDUPrr (COPY_TO_REGCLASS GR64:$src, VR128))>;
8476 //===----------------------------------------------------------------------===//
8477 // VPERM - Permute instructions
8480 multiclass avx2_perm<bits<8> opc, string OpcodeStr, PatFrag mem_frag,
8481 ValueType OpVT, X86FoldableSchedWrite Sched> {
8482 def Yrr : AVX28I<opc, MRMSrcReg, (outs VR256:$dst),
8483 (ins VR256:$src1, VR256:$src2),
8484 !strconcat(OpcodeStr,
8485 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8487 (OpVT (X86VPermv VR256:$src1, VR256:$src2)))]>,
8488 Sched<[Sched]>, VEX_4V, VEX_L;
8489 def Yrm : AVX28I<opc, MRMSrcMem, (outs VR256:$dst),
8490 (ins VR256:$src1, i256mem:$src2),
8491 !strconcat(OpcodeStr,
8492 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8494 (OpVT (X86VPermv VR256:$src1,
8495 (bitconvert (mem_frag addr:$src2)))))]>,
8496 Sched<[Sched.Folded, ReadAfterLd]>, VEX_4V, VEX_L;
8499 defm VPERMD : avx2_perm<0x36, "vpermd", loadv4i64, v8i32, WriteShuffle256>;
8500 let ExeDomain = SSEPackedSingle in
8501 defm VPERMPS : avx2_perm<0x16, "vpermps", loadv8f32, v8f32, WriteFShuffle256>;
8503 multiclass avx2_perm_imm<bits<8> opc, string OpcodeStr, PatFrag mem_frag,
8504 ValueType OpVT, X86FoldableSchedWrite Sched> {
8505 def Yri : AVX2AIi8<opc, MRMSrcReg, (outs VR256:$dst),
8506 (ins VR256:$src1, u8imm:$src2),
8507 !strconcat(OpcodeStr,
8508 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8510 (OpVT (X86VPermi VR256:$src1, (i8 imm:$src2))))]>,
8511 Sched<[Sched]>, VEX, VEX_L;
8512 def Ymi : AVX2AIi8<opc, MRMSrcMem, (outs VR256:$dst),
8513 (ins i256mem:$src1, u8imm:$src2),
8514 !strconcat(OpcodeStr,
8515 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8517 (OpVT (X86VPermi (mem_frag addr:$src1),
8518 (i8 imm:$src2))))]>,
8519 Sched<[Sched.Folded, ReadAfterLd]>, VEX, VEX_L;
8522 defm VPERMQ : avx2_perm_imm<0x00, "vpermq", loadv4i64, v4i64,
8523 WriteShuffle256>, VEX_W;
8524 let ExeDomain = SSEPackedDouble in
8525 defm VPERMPD : avx2_perm_imm<0x01, "vpermpd", loadv4f64, v4f64,
8526 WriteFShuffle256>, VEX_W;
8528 //===----------------------------------------------------------------------===//
8529 // VPERM2I128 - Permute Floating-Point Values in 128-bit chunks
8531 def VPERM2I128rr : AVX2AIi8<0x46, MRMSrcReg, (outs VR256:$dst),
8532 (ins VR256:$src1, VR256:$src2, u8imm:$src3),
8533 "vperm2i128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
8534 [(set VR256:$dst, (v4i64 (X86VPerm2x128 VR256:$src1, VR256:$src2,
8535 (i8 imm:$src3))))]>, Sched<[WriteShuffle256]>,
8537 def VPERM2I128rm : AVX2AIi8<0x46, MRMSrcMem, (outs VR256:$dst),
8538 (ins VR256:$src1, f256mem:$src2, u8imm:$src3),
8539 "vperm2i128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
8540 [(set VR256:$dst, (X86VPerm2x128 VR256:$src1, (loadv4i64 addr:$src2),
8542 Sched<[WriteShuffle256Ld, ReadAfterLd]>, VEX_4V, VEX_L;
8544 let Predicates = [HasAVX2] in {
8545 def : Pat<(v8i32 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
8546 (VPERM2I128rr VR256:$src1, VR256:$src2, imm:$imm)>;
8547 def : Pat<(v32i8 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
8548 (VPERM2I128rr VR256:$src1, VR256:$src2, imm:$imm)>;
8549 def : Pat<(v16i16 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
8550 (VPERM2I128rr VR256:$src1, VR256:$src2, imm:$imm)>;
8552 def : Pat<(v32i8 (X86VPerm2x128 VR256:$src1, (bc_v32i8 (loadv4i64 addr:$src2)),
8554 (VPERM2I128rm VR256:$src1, addr:$src2, imm:$imm)>;
8555 def : Pat<(v16i16 (X86VPerm2x128 VR256:$src1,
8556 (bc_v16i16 (loadv4i64 addr:$src2)), (i8 imm:$imm))),
8557 (VPERM2I128rm VR256:$src1, addr:$src2, imm:$imm)>;
8558 def : Pat<(v8i32 (X86VPerm2x128 VR256:$src1, (bc_v8i32 (loadv4i64 addr:$src2)),
8560 (VPERM2I128rm VR256:$src1, addr:$src2, imm:$imm)>;
8564 //===----------------------------------------------------------------------===//
8565 // VINSERTI128 - Insert packed integer values
8567 let hasSideEffects = 0 in {
8568 def VINSERTI128rr : AVX2AIi8<0x38, MRMSrcReg, (outs VR256:$dst),
8569 (ins VR256:$src1, VR128:$src2, u8imm:$src3),
8570 "vinserti128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
8571 []>, Sched<[WriteShuffle256]>, VEX_4V, VEX_L;
8573 def VINSERTI128rm : AVX2AIi8<0x38, MRMSrcMem, (outs VR256:$dst),
8574 (ins VR256:$src1, i128mem:$src2, u8imm:$src3),
8575 "vinserti128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
8576 []>, Sched<[WriteShuffle256Ld, ReadAfterLd]>, VEX_4V, VEX_L;
8579 let Predicates = [HasAVX2, NoVLX] in {
8580 def : Pat<(vinsert128_insert:$ins (v4i64 VR256:$src1), (v2i64 VR128:$src2),
8582 (VINSERTI128rr VR256:$src1, VR128:$src2,
8583 (INSERT_get_vinsert128_imm VR256:$ins))>;
8584 def : Pat<(vinsert128_insert:$ins (v8i32 VR256:$src1), (v4i32 VR128:$src2),
8586 (VINSERTI128rr VR256:$src1, VR128:$src2,
8587 (INSERT_get_vinsert128_imm VR256:$ins))>;
8588 def : Pat<(vinsert128_insert:$ins (v32i8 VR256:$src1), (v16i8 VR128:$src2),
8590 (VINSERTI128rr VR256:$src1, VR128:$src2,
8591 (INSERT_get_vinsert128_imm VR256:$ins))>;
8592 def : Pat<(vinsert128_insert:$ins (v16i16 VR256:$src1), (v8i16 VR128:$src2),
8594 (VINSERTI128rr VR256:$src1, VR128:$src2,
8595 (INSERT_get_vinsert128_imm VR256:$ins))>;
8597 def : Pat<(vinsert128_insert:$ins (v4i64 VR256:$src1), (loadv2i64 addr:$src2),
8599 (VINSERTI128rm VR256:$src1, addr:$src2,
8600 (INSERT_get_vinsert128_imm VR256:$ins))>;
8601 def : Pat<(vinsert128_insert:$ins (v8i32 VR256:$src1),
8602 (bc_v4i32 (loadv2i64 addr:$src2)),
8604 (VINSERTI128rm VR256:$src1, addr:$src2,
8605 (INSERT_get_vinsert128_imm VR256:$ins))>;
8606 def : Pat<(vinsert128_insert:$ins (v32i8 VR256:$src1),
8607 (bc_v16i8 (loadv2i64 addr:$src2)),
8609 (VINSERTI128rm VR256:$src1, addr:$src2,
8610 (INSERT_get_vinsert128_imm VR256:$ins))>;
8611 def : Pat<(vinsert128_insert:$ins (v16i16 VR256:$src1),
8612 (bc_v8i16 (loadv2i64 addr:$src2)),
8614 (VINSERTI128rm VR256:$src1, addr:$src2,
8615 (INSERT_get_vinsert128_imm VR256:$ins))>;
8618 //===----------------------------------------------------------------------===//
8619 // VEXTRACTI128 - Extract packed integer values
8621 def VEXTRACTI128rr : AVX2AIi8<0x39, MRMDestReg, (outs VR128:$dst),
8622 (ins VR256:$src1, u8imm:$src2),
8623 "vextracti128\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
8624 Sched<[WriteShuffle256]>, VEX, VEX_L;
8625 let hasSideEffects = 0, mayStore = 1 in
8626 def VEXTRACTI128mr : AVX2AIi8<0x39, MRMDestMem, (outs),
8627 (ins i128mem:$dst, VR256:$src1, u8imm:$src2),
8628 "vextracti128\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
8629 Sched<[WriteStore]>, VEX, VEX_L;
8631 let Predicates = [HasAVX2] in {
8632 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
8633 (v2i64 (VEXTRACTI128rr
8634 (v4i64 VR256:$src1),
8635 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
8636 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
8637 (v4i32 (VEXTRACTI128rr
8638 (v8i32 VR256:$src1),
8639 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
8640 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
8641 (v8i16 (VEXTRACTI128rr
8642 (v16i16 VR256:$src1),
8643 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
8644 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
8645 (v16i8 (VEXTRACTI128rr
8646 (v32i8 VR256:$src1),
8647 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
8649 def : Pat<(store (v2i64 (vextract128_extract:$ext (v4i64 VR256:$src1),
8650 (iPTR imm))), addr:$dst),
8651 (VEXTRACTI128mr addr:$dst, VR256:$src1,
8652 (EXTRACT_get_vextract128_imm VR128:$ext))>;
8653 def : Pat<(store (v4i32 (vextract128_extract:$ext (v8i32 VR256:$src1),
8654 (iPTR imm))), addr:$dst),
8655 (VEXTRACTI128mr addr:$dst, VR256:$src1,
8656 (EXTRACT_get_vextract128_imm VR128:$ext))>;
8657 def : Pat<(store (v8i16 (vextract128_extract:$ext (v16i16 VR256:$src1),
8658 (iPTR imm))), addr:$dst),
8659 (VEXTRACTI128mr addr:$dst, VR256:$src1,
8660 (EXTRACT_get_vextract128_imm VR128:$ext))>;
8661 def : Pat<(store (v16i8 (vextract128_extract:$ext (v32i8 VR256:$src1),
8662 (iPTR imm))), addr:$dst),
8663 (VEXTRACTI128mr addr:$dst, VR256:$src1,
8664 (EXTRACT_get_vextract128_imm VR128:$ext))>;
8667 //===----------------------------------------------------------------------===//
8668 // VPMASKMOV - Conditional SIMD Integer Packed Loads and Stores
8670 multiclass avx2_pmovmask<string OpcodeStr,
8671 Intrinsic IntLd128, Intrinsic IntLd256,
8672 Intrinsic IntSt128, Intrinsic IntSt256> {
8673 def rm : AVX28I<0x8c, MRMSrcMem, (outs VR128:$dst),
8674 (ins VR128:$src1, i128mem:$src2),
8675 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8676 [(set VR128:$dst, (IntLd128 addr:$src2, VR128:$src1))]>, VEX_4V;
8677 def Yrm : AVX28I<0x8c, MRMSrcMem, (outs VR256:$dst),
8678 (ins VR256:$src1, i256mem:$src2),
8679 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8680 [(set VR256:$dst, (IntLd256 addr:$src2, VR256:$src1))]>,
8682 def mr : AVX28I<0x8e, MRMDestMem, (outs),
8683 (ins i128mem:$dst, VR128:$src1, VR128:$src2),
8684 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8685 [(IntSt128 addr:$dst, VR128:$src1, VR128:$src2)]>, VEX_4V;
8686 def Ymr : AVX28I<0x8e, MRMDestMem, (outs),
8687 (ins i256mem:$dst, VR256:$src1, VR256:$src2),
8688 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8689 [(IntSt256 addr:$dst, VR256:$src1, VR256:$src2)]>, VEX_4V, VEX_L;
8692 defm VPMASKMOVD : avx2_pmovmask<"vpmaskmovd",
8693 int_x86_avx2_maskload_d,
8694 int_x86_avx2_maskload_d_256,
8695 int_x86_avx2_maskstore_d,
8696 int_x86_avx2_maskstore_d_256>;
8697 defm VPMASKMOVQ : avx2_pmovmask<"vpmaskmovq",
8698 int_x86_avx2_maskload_q,
8699 int_x86_avx2_maskload_q_256,
8700 int_x86_avx2_maskstore_q,
8701 int_x86_avx2_maskstore_q_256>, VEX_W;
8703 def: Pat<(X86mstore addr:$ptr, (v8i32 VR256:$mask), (v8f32 VR256:$src)),
8704 (VMASKMOVPSYmr addr:$ptr, VR256:$mask, VR256:$src)>;
8706 def: Pat<(X86mstore addr:$ptr, (v8i32 VR256:$mask), (v8i32 VR256:$src)),
8707 (VPMASKMOVDYmr addr:$ptr, VR256:$mask, VR256:$src)>;
8709 def: Pat<(X86mstore addr:$ptr, (v4i32 VR128:$mask), (v4f32 VR128:$src)),
8710 (VMASKMOVPSmr addr:$ptr, VR128:$mask, VR128:$src)>;
8712 def: Pat<(X86mstore addr:$ptr, (v4i32 VR128:$mask), (v4i32 VR128:$src)),
8713 (VPMASKMOVDmr addr:$ptr, VR128:$mask, VR128:$src)>;
8715 def: Pat<(v8f32 (masked_load addr:$ptr, (v8i32 VR256:$mask), undef)),
8716 (VMASKMOVPSYrm VR256:$mask, addr:$ptr)>;
8718 def: Pat<(v8f32 (masked_load addr:$ptr, (v8i32 VR256:$mask),
8719 (bc_v8f32 (v8i32 immAllZerosV)))),
8720 (VMASKMOVPSYrm VR256:$mask, addr:$ptr)>;
8722 def: Pat<(v8f32 (masked_load addr:$ptr, (v8i32 VR256:$mask), (v8f32 VR256:$src0))),
8723 (VBLENDVPSYrr VR256:$src0, (VMASKMOVPSYrm VR256:$mask, addr:$ptr),
8726 def: Pat<(v8i32 (masked_load addr:$ptr, (v8i32 VR256:$mask), undef)),
8727 (VPMASKMOVDYrm VR256:$mask, addr:$ptr)>;
8729 def: Pat<(v8i32 (masked_load addr:$ptr, (v8i32 VR256:$mask), (v8i32 immAllZerosV))),
8730 (VPMASKMOVDYrm VR256:$mask, addr:$ptr)>;
8732 def: Pat<(v8i32 (masked_load addr:$ptr, (v8i32 VR256:$mask), (v8i32 VR256:$src0))),
8733 (VBLENDVPSYrr VR256:$src0, (VPMASKMOVDYrm VR256:$mask, addr:$ptr),
8736 def: Pat<(v4f32 (masked_load addr:$ptr, (v4i32 VR128:$mask), undef)),
8737 (VMASKMOVPSrm VR128:$mask, addr:$ptr)>;
8739 def: Pat<(v4f32 (masked_load addr:$ptr, (v4i32 VR128:$mask),
8740 (bc_v4f32 (v4i32 immAllZerosV)))),
8741 (VMASKMOVPSrm VR128:$mask, addr:$ptr)>;
8743 def: Pat<(v4f32 (masked_load addr:$ptr, (v4i32 VR128:$mask), (v4f32 VR128:$src0))),
8744 (VBLENDVPSrr VR128:$src0, (VMASKMOVPSrm VR128:$mask, addr:$ptr),
8747 def: Pat<(v4i32 (masked_load addr:$ptr, (v4i32 VR128:$mask), undef)),
8748 (VPMASKMOVDrm VR128:$mask, addr:$ptr)>;
8750 def: Pat<(v4i32 (masked_load addr:$ptr, (v4i32 VR128:$mask), (v4i32 immAllZerosV))),
8751 (VPMASKMOVDrm VR128:$mask, addr:$ptr)>;
8753 def: Pat<(v4i32 (masked_load addr:$ptr, (v4i32 VR128:$mask), (v4i32 VR128:$src0))),
8754 (VBLENDVPSrr VR128:$src0, (VPMASKMOVDrm VR128:$mask, addr:$ptr),
8757 def: Pat<(X86mstore addr:$ptr, (v4i64 VR256:$mask), (v4f64 VR256:$src)),
8758 (VMASKMOVPDYmr addr:$ptr, VR256:$mask, VR256:$src)>;
8760 def: Pat<(X86mstore addr:$ptr, (v4i64 VR256:$mask), (v4i64 VR256:$src)),
8761 (VPMASKMOVQYmr addr:$ptr, VR256:$mask, VR256:$src)>;
8763 def: Pat<(v4f64 (masked_load addr:$ptr, (v4i64 VR256:$mask), undef)),
8764 (VMASKMOVPDYrm VR256:$mask, addr:$ptr)>;
8766 def: Pat<(v4f64 (masked_load addr:$ptr, (v4i64 VR256:$mask),
8767 (v4f64 immAllZerosV))),
8768 (VMASKMOVPDYrm VR256:$mask, addr:$ptr)>;
8770 def: Pat<(v4f64 (masked_load addr:$ptr, (v4i64 VR256:$mask), (v4f64 VR256:$src0))),
8771 (VBLENDVPDYrr VR256:$src0, (VMASKMOVPDYrm VR256:$mask, addr:$ptr),
8774 def: Pat<(v4i64 (masked_load addr:$ptr, (v4i64 VR256:$mask), undef)),
8775 (VPMASKMOVQYrm VR256:$mask, addr:$ptr)>;
8777 def: Pat<(v4i64 (masked_load addr:$ptr, (v4i64 VR256:$mask),
8778 (bc_v4i64 (v8i32 immAllZerosV)))),
8779 (VPMASKMOVQYrm VR256:$mask, addr:$ptr)>;
8781 def: Pat<(v4i64 (masked_load addr:$ptr, (v4i64 VR256:$mask), (v4i64 VR256:$src0))),
8782 (VBLENDVPDYrr VR256:$src0, (VPMASKMOVQYrm VR256:$mask, addr:$ptr),
8785 def: Pat<(X86mstore addr:$ptr, (v2i64 VR128:$mask), (v2f64 VR128:$src)),
8786 (VMASKMOVPDmr addr:$ptr, VR128:$mask, VR128:$src)>;
8788 def: Pat<(X86mstore addr:$ptr, (v2i64 VR128:$mask), (v2i64 VR128:$src)),
8789 (VPMASKMOVQmr addr:$ptr, VR128:$mask, VR128:$src)>;
8791 def: Pat<(v2f64 (masked_load addr:$ptr, (v2i64 VR128:$mask), undef)),
8792 (VMASKMOVPDrm VR128:$mask, addr:$ptr)>;
8794 def: Pat<(v2f64 (masked_load addr:$ptr, (v2i64 VR128:$mask),
8795 (v2f64 immAllZerosV))),
8796 (VMASKMOVPDrm VR128:$mask, addr:$ptr)>;
8798 def: Pat<(v2f64 (masked_load addr:$ptr, (v2i64 VR128:$mask), (v2f64 VR128:$src0))),
8799 (VBLENDVPDrr VR128:$src0, (VMASKMOVPDrm VR128:$mask, addr:$ptr),
8802 def: Pat<(v2i64 (masked_load addr:$ptr, (v2i64 VR128:$mask), undef)),
8803 (VPMASKMOVQrm VR128:$mask, addr:$ptr)>;
8805 def: Pat<(v2i64 (masked_load addr:$ptr, (v2i64 VR128:$mask),
8806 (bc_v2i64 (v4i32 immAllZerosV)))),
8807 (VPMASKMOVQrm VR128:$mask, addr:$ptr)>;
8809 def: Pat<(v2i64 (masked_load addr:$ptr, (v2i64 VR128:$mask), (v2i64 VR128:$src0))),
8810 (VBLENDVPDrr VR128:$src0, (VPMASKMOVQrm VR128:$mask, addr:$ptr),
8813 //===----------------------------------------------------------------------===//
8814 // Variable Bit Shifts
8816 multiclass avx2_var_shift<bits<8> opc, string OpcodeStr, SDNode OpNode,
8817 ValueType vt128, ValueType vt256> {
8818 def rr : AVX28I<opc, MRMSrcReg, (outs VR128:$dst),
8819 (ins VR128:$src1, VR128:$src2),
8820 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8822 (vt128 (OpNode VR128:$src1, (vt128 VR128:$src2))))]>,
8823 VEX_4V, Sched<[WriteVarVecShift]>;
8824 def rm : AVX28I<opc, MRMSrcMem, (outs VR128:$dst),
8825 (ins VR128:$src1, i128mem:$src2),
8826 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8828 (vt128 (OpNode VR128:$src1,
8829 (vt128 (bitconvert (loadv2i64 addr:$src2))))))]>,
8830 VEX_4V, Sched<[WriteVarVecShiftLd, ReadAfterLd]>;
8831 def Yrr : AVX28I<opc, MRMSrcReg, (outs VR256:$dst),
8832 (ins VR256:$src1, VR256:$src2),
8833 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8835 (vt256 (OpNode VR256:$src1, (vt256 VR256:$src2))))]>,
8836 VEX_4V, VEX_L, Sched<[WriteVarVecShift]>;
8837 def Yrm : AVX28I<opc, MRMSrcMem, (outs VR256:$dst),
8838 (ins VR256:$src1, i256mem:$src2),
8839 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8841 (vt256 (OpNode VR256:$src1,
8842 (vt256 (bitconvert (loadv4i64 addr:$src2))))))]>,
8843 VEX_4V, VEX_L, Sched<[WriteVarVecShiftLd, ReadAfterLd]>;
8846 defm VPSLLVD : avx2_var_shift<0x47, "vpsllvd", shl, v4i32, v8i32>;
8847 defm VPSLLVQ : avx2_var_shift<0x47, "vpsllvq", shl, v2i64, v4i64>, VEX_W;
8848 defm VPSRLVD : avx2_var_shift<0x45, "vpsrlvd", srl, v4i32, v8i32>;
8849 defm VPSRLVQ : avx2_var_shift<0x45, "vpsrlvq", srl, v2i64, v4i64>, VEX_W;
8850 defm VPSRAVD : avx2_var_shift<0x46, "vpsravd", sra, v4i32, v8i32>;
8852 //===----------------------------------------------------------------------===//
8853 // VGATHER - GATHER Operations
8854 multiclass avx2_gather<bits<8> opc, string OpcodeStr, RegisterClass RC256,
8855 X86MemOperand memop128, X86MemOperand memop256> {
8856 def rm : AVX28I<opc, MRMSrcMem, (outs VR128:$dst, VR128:$mask_wb),
8857 (ins VR128:$src1, memop128:$src2, VR128:$mask),
8858 !strconcat(OpcodeStr,
8859 "\t{$mask, $src2, $dst|$dst, $src2, $mask}"),
8861 def Yrm : AVX28I<opc, MRMSrcMem, (outs RC256:$dst, RC256:$mask_wb),
8862 (ins RC256:$src1, memop256:$src2, RC256:$mask),
8863 !strconcat(OpcodeStr,
8864 "\t{$mask, $src2, $dst|$dst, $src2, $mask}"),
8865 []>, VEX_4VOp3, VEX_L;
8868 let mayLoad = 1, Constraints
8869 = "@earlyclobber $dst,@earlyclobber $mask_wb, $src1 = $dst, $mask = $mask_wb"
8871 defm VPGATHERDQ : avx2_gather<0x90, "vpgatherdq", VR256, vx64mem, vx64mem>, VEX_W;
8872 defm VPGATHERQQ : avx2_gather<0x91, "vpgatherqq", VR256, vx64mem, vy64mem>, VEX_W;
8873 defm VPGATHERDD : avx2_gather<0x90, "vpgatherdd", VR256, vx32mem, vy32mem>;
8874 defm VPGATHERQD : avx2_gather<0x91, "vpgatherqd", VR128, vx32mem, vy32mem>;
8876 let ExeDomain = SSEPackedDouble in {
8877 defm VGATHERDPD : avx2_gather<0x92, "vgatherdpd", VR256, vx64mem, vx64mem>, VEX_W;
8878 defm VGATHERQPD : avx2_gather<0x93, "vgatherqpd", VR256, vx64mem, vy64mem>, VEX_W;
8881 let ExeDomain = SSEPackedSingle in {
8882 defm VGATHERDPS : avx2_gather<0x92, "vgatherdps", VR256, vx32mem, vy32mem>;
8883 defm VGATHERQPS : avx2_gather<0x93, "vgatherqps", VR128, vx32mem, vy32mem>;