1 //===-- X86InstrSSE.td - SSE Instruction Set ---------------*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86 SSE instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
16 class OpndItins<InstrItinClass arg_rr, InstrItinClass arg_rm> {
17 InstrItinClass rr = arg_rr;
18 InstrItinClass rm = arg_rm;
19 // InstrSchedModel info.
20 X86FoldableSchedWrite Sched = WriteFAdd;
23 class SizeItins<OpndItins arg_s, OpndItins arg_d> {
29 class ShiftOpndItins<InstrItinClass arg_rr, InstrItinClass arg_rm,
30 InstrItinClass arg_ri> {
31 InstrItinClass rr = arg_rr;
32 InstrItinClass rm = arg_rm;
33 InstrItinClass ri = arg_ri;
38 let Sched = WriteFAdd in {
39 def SSE_ALU_F32S : OpndItins<
40 IIC_SSE_ALU_F32S_RR, IIC_SSE_ALU_F32S_RM
43 def SSE_ALU_F64S : OpndItins<
44 IIC_SSE_ALU_F64S_RR, IIC_SSE_ALU_F64S_RM
48 def SSE_ALU_ITINS_S : SizeItins<
49 SSE_ALU_F32S, SSE_ALU_F64S
52 let Sched = WriteFMul in {
53 def SSE_MUL_F32S : OpndItins<
54 IIC_SSE_MUL_F32S_RR, IIC_SSE_MUL_F64S_RM
57 def SSE_MUL_F64S : OpndItins<
58 IIC_SSE_MUL_F64S_RR, IIC_SSE_MUL_F64S_RM
62 def SSE_MUL_ITINS_S : SizeItins<
63 SSE_MUL_F32S, SSE_MUL_F64S
66 let Sched = WriteFDiv in {
67 def SSE_DIV_F32S : OpndItins<
68 IIC_SSE_DIV_F32S_RR, IIC_SSE_DIV_F64S_RM
71 def SSE_DIV_F64S : OpndItins<
72 IIC_SSE_DIV_F64S_RR, IIC_SSE_DIV_F64S_RM
76 def SSE_DIV_ITINS_S : SizeItins<
77 SSE_DIV_F32S, SSE_DIV_F64S
81 let Sched = WriteFAdd in {
82 def SSE_ALU_F32P : OpndItins<
83 IIC_SSE_ALU_F32P_RR, IIC_SSE_ALU_F32P_RM
86 def SSE_ALU_F64P : OpndItins<
87 IIC_SSE_ALU_F64P_RR, IIC_SSE_ALU_F64P_RM
91 def SSE_ALU_ITINS_P : SizeItins<
92 SSE_ALU_F32P, SSE_ALU_F64P
95 let Sched = WriteFMul in {
96 def SSE_MUL_F32P : OpndItins<
97 IIC_SSE_MUL_F32P_RR, IIC_SSE_MUL_F64P_RM
100 def SSE_MUL_F64P : OpndItins<
101 IIC_SSE_MUL_F64P_RR, IIC_SSE_MUL_F64P_RM
105 def SSE_MUL_ITINS_P : SizeItins<
106 SSE_MUL_F32P, SSE_MUL_F64P
109 let Sched = WriteFDiv in {
110 def SSE_DIV_F32P : OpndItins<
111 IIC_SSE_DIV_F32P_RR, IIC_SSE_DIV_F64P_RM
114 def SSE_DIV_F64P : OpndItins<
115 IIC_SSE_DIV_F64P_RR, IIC_SSE_DIV_F64P_RM
119 def SSE_DIV_ITINS_P : SizeItins<
120 SSE_DIV_F32P, SSE_DIV_F64P
123 let Sched = WriteVecLogic in
124 def SSE_VEC_BIT_ITINS_P : OpndItins<
125 IIC_SSE_BIT_P_RR, IIC_SSE_BIT_P_RM
128 def SSE_BIT_ITINS_P : OpndItins<
129 IIC_SSE_BIT_P_RR, IIC_SSE_BIT_P_RM
132 let Sched = WriteVecALU in {
133 def SSE_INTALU_ITINS_P : OpndItins<
134 IIC_SSE_INTALU_P_RR, IIC_SSE_INTALU_P_RM
137 def SSE_INTALUQ_ITINS_P : OpndItins<
138 IIC_SSE_INTALUQ_P_RR, IIC_SSE_INTALUQ_P_RM
142 let Sched = WriteVecIMul in
143 def SSE_INTMUL_ITINS_P : OpndItins<
144 IIC_SSE_INTMUL_P_RR, IIC_SSE_INTMUL_P_RM
147 def SSE_INTSHIFT_ITINS_P : ShiftOpndItins<
148 IIC_SSE_INTSH_P_RR, IIC_SSE_INTSH_P_RM, IIC_SSE_INTSH_P_RI
151 def SSE_MOVA_ITINS : OpndItins<
152 IIC_SSE_MOVA_P_RR, IIC_SSE_MOVA_P_RM
155 def SSE_MOVU_ITINS : OpndItins<
156 IIC_SSE_MOVU_P_RR, IIC_SSE_MOVU_P_RM
159 def SSE_DPPD_ITINS : OpndItins<
160 IIC_SSE_DPPD_RR, IIC_SSE_DPPD_RM
163 def SSE_DPPS_ITINS : OpndItins<
164 IIC_SSE_DPPS_RR, IIC_SSE_DPPD_RM
167 def DEFAULT_ITINS : OpndItins<
168 IIC_ALU_NONMEM, IIC_ALU_MEM
171 def SSE_EXTRACT_ITINS : OpndItins<
172 IIC_SSE_EXTRACTPS_RR, IIC_SSE_EXTRACTPS_RM
175 def SSE_INSERT_ITINS : OpndItins<
176 IIC_SSE_INSERTPS_RR, IIC_SSE_INSERTPS_RM
179 let Sched = WriteMPSAD in
180 def SSE_MPSADBW_ITINS : OpndItins<
181 IIC_SSE_MPSADBW_RR, IIC_SSE_MPSADBW_RM
184 let Sched = WriteVecIMul in
185 def SSE_PMULLD_ITINS : OpndItins<
186 IIC_SSE_PMULLD_RR, IIC_SSE_PMULLD_RM
189 // Definitions for backward compatibility.
190 // The instructions mapped on these definitions uses a different itinerary
191 // than the actual scheduling model.
192 let Sched = WriteShuffle in
193 def DEFAULT_ITINS_SHUFFLESCHED : OpndItins<
194 IIC_ALU_NONMEM, IIC_ALU_MEM
197 let Sched = WriteVecIMul in
198 def DEFAULT_ITINS_VECIMULSCHED : OpndItins<
199 IIC_ALU_NONMEM, IIC_ALU_MEM
202 let Sched = WriteShuffle in
203 def SSE_INTALU_ITINS_SHUFF_P : OpndItins<
204 IIC_SSE_INTALU_P_RR, IIC_SSE_INTALU_P_RM
207 let Sched = WriteMPSAD in
208 def DEFAULT_ITINS_MPSADSCHED : OpndItins<
209 IIC_ALU_NONMEM, IIC_ALU_MEM
212 let Sched = WriteFBlend in
213 def DEFAULT_ITINS_FBLENDSCHED : OpndItins<
214 IIC_ALU_NONMEM, IIC_ALU_MEM
217 let Sched = WriteBlend in
218 def DEFAULT_ITINS_BLENDSCHED : OpndItins<
219 IIC_ALU_NONMEM, IIC_ALU_MEM
222 let Sched = WriteVarBlend in
223 def DEFAULT_ITINS_VARBLENDSCHED : OpndItins<
224 IIC_ALU_NONMEM, IIC_ALU_MEM
227 let Sched = WriteFBlend in
228 def SSE_INTALU_ITINS_FBLEND_P : OpndItins<
229 IIC_SSE_INTALU_P_RR, IIC_SSE_INTALU_P_RM
232 let Sched = WriteBlend in
233 def SSE_INTALU_ITINS_BLEND_P : OpndItins<
234 IIC_SSE_INTALU_P_RR, IIC_SSE_INTALU_P_RM
237 //===----------------------------------------------------------------------===//
238 // SSE 1 & 2 Instructions Classes
239 //===----------------------------------------------------------------------===//
241 /// sse12_fp_scalar - SSE 1 & 2 scalar instructions class
242 multiclass sse12_fp_scalar<bits<8> opc, string OpcodeStr, SDNode OpNode,
243 RegisterClass RC, X86MemOperand x86memop,
246 let isCommutable = 1 in {
247 def rr : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
249 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
250 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
251 [(set RC:$dst, (OpNode RC:$src1, RC:$src2))], itins.rr>,
252 Sched<[itins.Sched]>;
254 def rm : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
256 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
257 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
258 [(set RC:$dst, (OpNode RC:$src1, (load addr:$src2)))], itins.rm>,
259 Sched<[itins.Sched.Folded, ReadAfterLd]>;
262 /// sse12_fp_scalar_int - SSE 1 & 2 scalar instructions intrinsics class
263 multiclass sse12_fp_scalar_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
264 string asm, string SSEVer, string FPSizeStr,
265 Operand memopr, ComplexPattern mem_cpat,
268 let isCodeGenOnly = 1 in {
269 def rr_Int : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
271 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
272 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
273 [(set RC:$dst, (!cast<Intrinsic>(
274 !strconcat("int_x86_sse", SSEVer, "_", OpcodeStr, FPSizeStr))
275 RC:$src1, RC:$src2))], itins.rr>,
276 Sched<[itins.Sched]>;
277 def rm_Int : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, memopr:$src2),
279 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
280 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
281 [(set RC:$dst, (!cast<Intrinsic>(!strconcat("int_x86_sse",
282 SSEVer, "_", OpcodeStr, FPSizeStr))
283 RC:$src1, mem_cpat:$src2))], itins.rm>,
284 Sched<[itins.Sched.Folded, ReadAfterLd]>;
288 /// sse12_fp_packed - SSE 1 & 2 packed instructions class
289 multiclass sse12_fp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
290 RegisterClass RC, ValueType vt,
291 X86MemOperand x86memop, PatFrag mem_frag,
292 Domain d, OpndItins itins, bit Is2Addr = 1> {
293 let isCommutable = 1 in
294 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
296 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
297 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
298 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], itins.rr, d>,
299 Sched<[itins.Sched]>;
301 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
303 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
304 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
305 [(set RC:$dst, (OpNode RC:$src1, (mem_frag addr:$src2)))],
307 Sched<[itins.Sched.Folded, ReadAfterLd]>;
310 /// sse12_fp_packed_logical_rm - SSE 1 & 2 packed instructions class
311 multiclass sse12_fp_packed_logical_rm<bits<8> opc, RegisterClass RC, Domain d,
312 string OpcodeStr, X86MemOperand x86memop,
313 list<dag> pat_rr, list<dag> pat_rm,
315 let isCommutable = 1, hasSideEffects = 0 in
316 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
318 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
319 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
320 pat_rr, NoItinerary, d>,
321 Sched<[WriteVecLogic]>;
322 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
324 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
325 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
326 pat_rm, NoItinerary, d>,
327 Sched<[WriteVecLogicLd, ReadAfterLd]>;
330 //===----------------------------------------------------------------------===//
331 // Non-instruction patterns
332 //===----------------------------------------------------------------------===//
334 // A vector extract of the first f32/f64 position is a subregister copy
335 def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
336 (COPY_TO_REGCLASS (v4f32 VR128:$src), FR32)>;
337 def : Pat<(f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
338 (COPY_TO_REGCLASS (v2f64 VR128:$src), FR64)>;
340 // A 128-bit subvector extract from the first 256-bit vector position
341 // is a subregister copy that needs no instruction.
342 def : Pat<(v4i32 (extract_subvector (v8i32 VR256:$src), (iPTR 0))),
343 (v4i32 (EXTRACT_SUBREG (v8i32 VR256:$src), sub_xmm))>;
344 def : Pat<(v4f32 (extract_subvector (v8f32 VR256:$src), (iPTR 0))),
345 (v4f32 (EXTRACT_SUBREG (v8f32 VR256:$src), sub_xmm))>;
347 def : Pat<(v2i64 (extract_subvector (v4i64 VR256:$src), (iPTR 0))),
348 (v2i64 (EXTRACT_SUBREG (v4i64 VR256:$src), sub_xmm))>;
349 def : Pat<(v2f64 (extract_subvector (v4f64 VR256:$src), (iPTR 0))),
350 (v2f64 (EXTRACT_SUBREG (v4f64 VR256:$src), sub_xmm))>;
352 def : Pat<(v8i16 (extract_subvector (v16i16 VR256:$src), (iPTR 0))),
353 (v8i16 (EXTRACT_SUBREG (v16i16 VR256:$src), sub_xmm))>;
354 def : Pat<(v16i8 (extract_subvector (v32i8 VR256:$src), (iPTR 0))),
355 (v16i8 (EXTRACT_SUBREG (v32i8 VR256:$src), sub_xmm))>;
357 // A 128-bit subvector insert to the first 256-bit vector position
358 // is a subregister copy that needs no instruction.
359 let AddedComplexity = 25 in { // to give priority over vinsertf128rm
360 def : Pat<(insert_subvector undef, (v2i64 VR128:$src), (iPTR 0)),
361 (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
362 def : Pat<(insert_subvector undef, (v2f64 VR128:$src), (iPTR 0)),
363 (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
364 def : Pat<(insert_subvector undef, (v4i32 VR128:$src), (iPTR 0)),
365 (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
366 def : Pat<(insert_subvector undef, (v4f32 VR128:$src), (iPTR 0)),
367 (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
368 def : Pat<(insert_subvector undef, (v8i16 VR128:$src), (iPTR 0)),
369 (INSERT_SUBREG (v16i16 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
370 def : Pat<(insert_subvector undef, (v16i8 VR128:$src), (iPTR 0)),
371 (INSERT_SUBREG (v32i8 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
374 // Implicitly promote a 32-bit scalar to a vector.
375 def : Pat<(v4f32 (scalar_to_vector FR32:$src)),
376 (COPY_TO_REGCLASS FR32:$src, VR128)>;
377 def : Pat<(v8f32 (scalar_to_vector FR32:$src)),
378 (COPY_TO_REGCLASS FR32:$src, VR128)>;
379 // Implicitly promote a 64-bit scalar to a vector.
380 def : Pat<(v2f64 (scalar_to_vector FR64:$src)),
381 (COPY_TO_REGCLASS FR64:$src, VR128)>;
382 def : Pat<(v4f64 (scalar_to_vector FR64:$src)),
383 (COPY_TO_REGCLASS FR64:$src, VR128)>;
385 // Bitcasts between 128-bit vector types. Return the original type since
386 // no instruction is needed for the conversion
387 let Predicates = [HasSSE2] in {
388 def : Pat<(v2i64 (bitconvert (v4i32 VR128:$src))), (v2i64 VR128:$src)>;
389 def : Pat<(v2i64 (bitconvert (v8i16 VR128:$src))), (v2i64 VR128:$src)>;
390 def : Pat<(v2i64 (bitconvert (v16i8 VR128:$src))), (v2i64 VR128:$src)>;
391 def : Pat<(v2i64 (bitconvert (v2f64 VR128:$src))), (v2i64 VR128:$src)>;
392 def : Pat<(v2i64 (bitconvert (v4f32 VR128:$src))), (v2i64 VR128:$src)>;
393 def : Pat<(v4i32 (bitconvert (v2i64 VR128:$src))), (v4i32 VR128:$src)>;
394 def : Pat<(v4i32 (bitconvert (v8i16 VR128:$src))), (v4i32 VR128:$src)>;
395 def : Pat<(v4i32 (bitconvert (v16i8 VR128:$src))), (v4i32 VR128:$src)>;
396 def : Pat<(v4i32 (bitconvert (v2f64 VR128:$src))), (v4i32 VR128:$src)>;
397 def : Pat<(v4i32 (bitconvert (v4f32 VR128:$src))), (v4i32 VR128:$src)>;
398 def : Pat<(v8i16 (bitconvert (v2i64 VR128:$src))), (v8i16 VR128:$src)>;
399 def : Pat<(v8i16 (bitconvert (v4i32 VR128:$src))), (v8i16 VR128:$src)>;
400 def : Pat<(v8i16 (bitconvert (v16i8 VR128:$src))), (v8i16 VR128:$src)>;
401 def : Pat<(v8i16 (bitconvert (v2f64 VR128:$src))), (v8i16 VR128:$src)>;
402 def : Pat<(v8i16 (bitconvert (v4f32 VR128:$src))), (v8i16 VR128:$src)>;
403 def : Pat<(v16i8 (bitconvert (v2i64 VR128:$src))), (v16i8 VR128:$src)>;
404 def : Pat<(v16i8 (bitconvert (v4i32 VR128:$src))), (v16i8 VR128:$src)>;
405 def : Pat<(v16i8 (bitconvert (v8i16 VR128:$src))), (v16i8 VR128:$src)>;
406 def : Pat<(v16i8 (bitconvert (v2f64 VR128:$src))), (v16i8 VR128:$src)>;
407 def : Pat<(v16i8 (bitconvert (v4f32 VR128:$src))), (v16i8 VR128:$src)>;
408 def : Pat<(v4f32 (bitconvert (v2i64 VR128:$src))), (v4f32 VR128:$src)>;
409 def : Pat<(v4f32 (bitconvert (v4i32 VR128:$src))), (v4f32 VR128:$src)>;
410 def : Pat<(v4f32 (bitconvert (v8i16 VR128:$src))), (v4f32 VR128:$src)>;
411 def : Pat<(v4f32 (bitconvert (v16i8 VR128:$src))), (v4f32 VR128:$src)>;
412 def : Pat<(v4f32 (bitconvert (v2f64 VR128:$src))), (v4f32 VR128:$src)>;
413 def : Pat<(v2f64 (bitconvert (v2i64 VR128:$src))), (v2f64 VR128:$src)>;
414 def : Pat<(v2f64 (bitconvert (v4i32 VR128:$src))), (v2f64 VR128:$src)>;
415 def : Pat<(v2f64 (bitconvert (v8i16 VR128:$src))), (v2f64 VR128:$src)>;
416 def : Pat<(v2f64 (bitconvert (v16i8 VR128:$src))), (v2f64 VR128:$src)>;
417 def : Pat<(v2f64 (bitconvert (v4f32 VR128:$src))), (v2f64 VR128:$src)>;
420 // Bitcasts between 256-bit vector types. Return the original type since
421 // no instruction is needed for the conversion
422 let Predicates = [HasAVX] in {
423 def : Pat<(v4f64 (bitconvert (v8f32 VR256:$src))), (v4f64 VR256:$src)>;
424 def : Pat<(v4f64 (bitconvert (v8i32 VR256:$src))), (v4f64 VR256:$src)>;
425 def : Pat<(v4f64 (bitconvert (v4i64 VR256:$src))), (v4f64 VR256:$src)>;
426 def : Pat<(v4f64 (bitconvert (v16i16 VR256:$src))), (v4f64 VR256:$src)>;
427 def : Pat<(v4f64 (bitconvert (v32i8 VR256:$src))), (v4f64 VR256:$src)>;
428 def : Pat<(v8f32 (bitconvert (v8i32 VR256:$src))), (v8f32 VR256:$src)>;
429 def : Pat<(v8f32 (bitconvert (v4i64 VR256:$src))), (v8f32 VR256:$src)>;
430 def : Pat<(v8f32 (bitconvert (v4f64 VR256:$src))), (v8f32 VR256:$src)>;
431 def : Pat<(v8f32 (bitconvert (v32i8 VR256:$src))), (v8f32 VR256:$src)>;
432 def : Pat<(v8f32 (bitconvert (v16i16 VR256:$src))), (v8f32 VR256:$src)>;
433 def : Pat<(v4i64 (bitconvert (v8f32 VR256:$src))), (v4i64 VR256:$src)>;
434 def : Pat<(v4i64 (bitconvert (v8i32 VR256:$src))), (v4i64 VR256:$src)>;
435 def : Pat<(v4i64 (bitconvert (v4f64 VR256:$src))), (v4i64 VR256:$src)>;
436 def : Pat<(v4i64 (bitconvert (v32i8 VR256:$src))), (v4i64 VR256:$src)>;
437 def : Pat<(v4i64 (bitconvert (v16i16 VR256:$src))), (v4i64 VR256:$src)>;
438 def : Pat<(v32i8 (bitconvert (v4f64 VR256:$src))), (v32i8 VR256:$src)>;
439 def : Pat<(v32i8 (bitconvert (v4i64 VR256:$src))), (v32i8 VR256:$src)>;
440 def : Pat<(v32i8 (bitconvert (v8f32 VR256:$src))), (v32i8 VR256:$src)>;
441 def : Pat<(v32i8 (bitconvert (v8i32 VR256:$src))), (v32i8 VR256:$src)>;
442 def : Pat<(v32i8 (bitconvert (v16i16 VR256:$src))), (v32i8 VR256:$src)>;
443 def : Pat<(v8i32 (bitconvert (v32i8 VR256:$src))), (v8i32 VR256:$src)>;
444 def : Pat<(v8i32 (bitconvert (v16i16 VR256:$src))), (v8i32 VR256:$src)>;
445 def : Pat<(v8i32 (bitconvert (v8f32 VR256:$src))), (v8i32 VR256:$src)>;
446 def : Pat<(v8i32 (bitconvert (v4i64 VR256:$src))), (v8i32 VR256:$src)>;
447 def : Pat<(v8i32 (bitconvert (v4f64 VR256:$src))), (v8i32 VR256:$src)>;
448 def : Pat<(v16i16 (bitconvert (v8f32 VR256:$src))), (v16i16 VR256:$src)>;
449 def : Pat<(v16i16 (bitconvert (v8i32 VR256:$src))), (v16i16 VR256:$src)>;
450 def : Pat<(v16i16 (bitconvert (v4i64 VR256:$src))), (v16i16 VR256:$src)>;
451 def : Pat<(v16i16 (bitconvert (v4f64 VR256:$src))), (v16i16 VR256:$src)>;
452 def : Pat<(v16i16 (bitconvert (v32i8 VR256:$src))), (v16i16 VR256:$src)>;
455 // Alias instructions that map fld0 to xorps for sse or vxorps for avx.
456 // This is expanded by ExpandPostRAPseudos.
457 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
458 isPseudo = 1, SchedRW = [WriteZero] in {
459 def FsFLD0SS : I<0, Pseudo, (outs FR32:$dst), (ins), "",
460 [(set FR32:$dst, fp32imm0)]>, Requires<[HasSSE1]>;
461 def FsFLD0SD : I<0, Pseudo, (outs FR64:$dst), (ins), "",
462 [(set FR64:$dst, fpimm0)]>, Requires<[HasSSE2]>;
465 //===----------------------------------------------------------------------===//
466 // AVX & SSE - Zero/One Vectors
467 //===----------------------------------------------------------------------===//
469 // Alias instruction that maps zero vector to pxor / xorp* for sse.
470 // This is expanded by ExpandPostRAPseudos to an xorps / vxorps, and then
471 // swizzled by ExecutionDepsFix to pxor.
472 // We set canFoldAsLoad because this can be converted to a constant-pool
473 // load of an all-zeros value if folding it would be beneficial.
474 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
475 isPseudo = 1, SchedRW = [WriteZero] in {
476 def V_SET0 : I<0, Pseudo, (outs VR128:$dst), (ins), "",
477 [(set VR128:$dst, (v4f32 immAllZerosV))]>;
480 def : Pat<(v2f64 immAllZerosV), (V_SET0)>;
481 def : Pat<(v4i32 immAllZerosV), (V_SET0)>;
482 def : Pat<(v2i64 immAllZerosV), (V_SET0)>;
483 def : Pat<(v8i16 immAllZerosV), (V_SET0)>;
484 def : Pat<(v16i8 immAllZerosV), (V_SET0)>;
487 // The same as done above but for AVX. The 256-bit AVX1 ISA doesn't support PI,
488 // and doesn't need it because on sandy bridge the register is set to zero
489 // at the rename stage without using any execution unit, so SET0PSY
490 // and SET0PDY can be used for vector int instructions without penalty
491 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
492 isPseudo = 1, Predicates = [HasAVX], SchedRW = [WriteZero] in {
493 def AVX_SET0 : I<0, Pseudo, (outs VR256:$dst), (ins), "",
494 [(set VR256:$dst, (v8f32 immAllZerosV))]>;
497 let Predicates = [HasAVX] in
498 def : Pat<(v4f64 immAllZerosV), (AVX_SET0)>;
500 let Predicates = [HasAVX2] in {
501 def : Pat<(v4i64 immAllZerosV), (AVX_SET0)>;
502 def : Pat<(v8i32 immAllZerosV), (AVX_SET0)>;
503 def : Pat<(v16i16 immAllZerosV), (AVX_SET0)>;
504 def : Pat<(v32i8 immAllZerosV), (AVX_SET0)>;
507 // AVX1 has no support for 256-bit integer instructions, but since the 128-bit
508 // VPXOR instruction writes zero to its upper part, it's safe build zeros.
509 let Predicates = [HasAVX1Only] in {
510 def : Pat<(v32i8 immAllZerosV), (SUBREG_TO_REG (i8 0), (V_SET0), sub_xmm)>;
511 def : Pat<(bc_v32i8 (v8f32 immAllZerosV)),
512 (SUBREG_TO_REG (i8 0), (V_SET0), sub_xmm)>;
514 def : Pat<(v16i16 immAllZerosV), (SUBREG_TO_REG (i16 0), (V_SET0), sub_xmm)>;
515 def : Pat<(bc_v16i16 (v8f32 immAllZerosV)),
516 (SUBREG_TO_REG (i16 0), (V_SET0), sub_xmm)>;
518 def : Pat<(v8i32 immAllZerosV), (SUBREG_TO_REG (i32 0), (V_SET0), sub_xmm)>;
519 def : Pat<(bc_v8i32 (v8f32 immAllZerosV)),
520 (SUBREG_TO_REG (i32 0), (V_SET0), sub_xmm)>;
522 def : Pat<(v4i64 immAllZerosV), (SUBREG_TO_REG (i64 0), (V_SET0), sub_xmm)>;
523 def : Pat<(bc_v4i64 (v8f32 immAllZerosV)),
524 (SUBREG_TO_REG (i64 0), (V_SET0), sub_xmm)>;
527 // We set canFoldAsLoad because this can be converted to a constant-pool
528 // load of an all-ones value if folding it would be beneficial.
529 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
530 isPseudo = 1, SchedRW = [WriteZero] in {
531 def V_SETALLONES : I<0, Pseudo, (outs VR128:$dst), (ins), "",
532 [(set VR128:$dst, (v4i32 immAllOnesV))]>;
533 let Predicates = [HasAVX2] in
534 def AVX2_SETALLONES : I<0, Pseudo, (outs VR256:$dst), (ins), "",
535 [(set VR256:$dst, (v8i32 immAllOnesV))]>;
539 //===----------------------------------------------------------------------===//
540 // SSE 1 & 2 - Move FP Scalar Instructions
542 // Move Instructions. Register-to-register movss/movsd is not used for FR32/64
543 // register copies because it's a partial register update; Register-to-register
544 // movss/movsd is not modeled as an INSERT_SUBREG because INSERT_SUBREG requires
545 // that the insert be implementable in terms of a copy, and just mentioned, we
546 // don't use movss/movsd for copies.
547 //===----------------------------------------------------------------------===//
549 multiclass sse12_move_rr<RegisterClass RC, SDNode OpNode, ValueType vt,
550 X86MemOperand x86memop, string base_opc,
551 string asm_opr, Domain d = GenericDomain> {
552 def rr : SI<0x10, MRMSrcReg, (outs VR128:$dst),
553 (ins VR128:$src1, RC:$src2),
554 !strconcat(base_opc, asm_opr),
555 [(set VR128:$dst, (vt (OpNode VR128:$src1,
556 (scalar_to_vector RC:$src2))))],
557 IIC_SSE_MOV_S_RR, d>, Sched<[WriteFShuffle]>;
559 // For the disassembler
560 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in
561 def rr_REV : SI<0x11, MRMDestReg, (outs VR128:$dst),
562 (ins VR128:$src1, RC:$src2),
563 !strconcat(base_opc, asm_opr),
564 [], IIC_SSE_MOV_S_RR>, Sched<[WriteFShuffle]>;
567 multiclass sse12_move<RegisterClass RC, SDNode OpNode, ValueType vt,
568 X86MemOperand x86memop, string OpcodeStr,
569 Domain d = GenericDomain> {
571 defm V#NAME : sse12_move_rr<RC, OpNode, vt, x86memop, OpcodeStr,
572 "\t{$src2, $src1, $dst|$dst, $src1, $src2}", d>,
575 def V#NAME#mr : SI<0x11, MRMDestMem, (outs), (ins x86memop:$dst, RC:$src),
576 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
577 [(store RC:$src, addr:$dst)], IIC_SSE_MOV_S_MR, d>,
578 VEX, VEX_LIG, Sched<[WriteStore]>;
580 let Constraints = "$src1 = $dst" in {
581 defm NAME : sse12_move_rr<RC, OpNode, vt, x86memop, OpcodeStr,
582 "\t{$src2, $dst|$dst, $src2}", d>;
585 def NAME#mr : SI<0x11, MRMDestMem, (outs), (ins x86memop:$dst, RC:$src),
586 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
587 [(store RC:$src, addr:$dst)], IIC_SSE_MOV_S_MR, d>,
591 // Loading from memory automatically zeroing upper bits.
592 multiclass sse12_move_rm<RegisterClass RC, X86MemOperand x86memop,
593 PatFrag mem_pat, string OpcodeStr,
594 Domain d = GenericDomain> {
595 def V#NAME#rm : SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
596 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
597 [(set RC:$dst, (mem_pat addr:$src))],
598 IIC_SSE_MOV_S_RM, d>, VEX, VEX_LIG, Sched<[WriteLoad]>;
599 def NAME#rm : SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
600 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
601 [(set RC:$dst, (mem_pat addr:$src))],
602 IIC_SSE_MOV_S_RM, d>, Sched<[WriteLoad]>;
605 defm MOVSS : sse12_move<FR32, X86Movss, v4f32, f32mem, "movss",
606 SSEPackedSingle>, XS;
607 defm MOVSD : sse12_move<FR64, X86Movsd, v2f64, f64mem, "movsd",
608 SSEPackedDouble>, XD;
610 let canFoldAsLoad = 1, isReMaterializable = 1 in {
611 defm MOVSS : sse12_move_rm<FR32, f32mem, loadf32, "movss",
612 SSEPackedSingle>, XS;
614 let AddedComplexity = 20 in
615 defm MOVSD : sse12_move_rm<FR64, f64mem, loadf64, "movsd",
616 SSEPackedDouble>, XD;
620 let Predicates = [UseAVX] in {
621 let AddedComplexity = 20 in {
622 // MOVSSrm zeros the high parts of the register; represent this
623 // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
624 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
625 (COPY_TO_REGCLASS (VMOVSSrm addr:$src), VR128)>;
626 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
627 (COPY_TO_REGCLASS (VMOVSSrm addr:$src), VR128)>;
628 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
629 (COPY_TO_REGCLASS (VMOVSSrm addr:$src), VR128)>;
631 // MOVSDrm zeros the high parts of the register; represent this
632 // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
633 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
634 (COPY_TO_REGCLASS (VMOVSDrm addr:$src), VR128)>;
635 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
636 (COPY_TO_REGCLASS (VMOVSDrm addr:$src), VR128)>;
637 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
638 (COPY_TO_REGCLASS (VMOVSDrm addr:$src), VR128)>;
639 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
640 (COPY_TO_REGCLASS (VMOVSDrm addr:$src), VR128)>;
641 def : Pat<(v2f64 (X86vzload addr:$src)),
642 (COPY_TO_REGCLASS (VMOVSDrm addr:$src), VR128)>;
644 // Represent the same patterns above but in the form they appear for
646 def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
647 (v4i32 (scalar_to_vector (loadi32 addr:$src))), (iPTR 0)))),
648 (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_xmm)>;
649 def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
650 (v4f32 (scalar_to_vector (loadf32 addr:$src))), (iPTR 0)))),
651 (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_xmm)>;
652 def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,
653 (v2f64 (scalar_to_vector (loadf64 addr:$src))), (iPTR 0)))),
654 (SUBREG_TO_REG (i32 0), (VMOVSDrm addr:$src), sub_xmm)>;
656 def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
657 (v2i64 (scalar_to_vector (loadi64 addr:$src))), (iPTR 0)))),
658 (SUBREG_TO_REG (i64 0), (VMOVSDrm addr:$src), sub_xmm)>;
660 // Extract and store.
661 def : Pat<(store (f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
663 (VMOVSSmr addr:$dst, (COPY_TO_REGCLASS (v4f32 VR128:$src), FR32))>;
664 def : Pat<(store (f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
666 (VMOVSDmr addr:$dst, (COPY_TO_REGCLASS (v2f64 VR128:$src), FR64))>;
668 // Shuffle with VMOVSS
669 def : Pat<(v4i32 (X86Movss VR128:$src1, VR128:$src2)),
670 (VMOVSSrr (v4i32 VR128:$src1),
671 (COPY_TO_REGCLASS (v4i32 VR128:$src2), FR32))>;
672 def : Pat<(v4f32 (X86Movss VR128:$src1, VR128:$src2)),
673 (VMOVSSrr (v4f32 VR128:$src1),
674 (COPY_TO_REGCLASS (v4f32 VR128:$src2), FR32))>;
677 def : Pat<(v8i32 (X86Movss VR256:$src1, VR256:$src2)),
678 (SUBREG_TO_REG (i32 0),
679 (VMOVSSrr (EXTRACT_SUBREG (v8i32 VR256:$src1), sub_xmm),
680 (EXTRACT_SUBREG (v8i32 VR256:$src2), sub_xmm)),
682 def : Pat<(v8f32 (X86Movss VR256:$src1, VR256:$src2)),
683 (SUBREG_TO_REG (i32 0),
684 (VMOVSSrr (EXTRACT_SUBREG (v8f32 VR256:$src1), sub_xmm),
685 (EXTRACT_SUBREG (v8f32 VR256:$src2), sub_xmm)),
688 // Shuffle with VMOVSD
689 def : Pat<(v2i64 (X86Movsd VR128:$src1, VR128:$src2)),
690 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
691 def : Pat<(v2f64 (X86Movsd VR128:$src1, VR128:$src2)),
692 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
693 def : Pat<(v4f32 (X86Movsd VR128:$src1, VR128:$src2)),
694 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
695 def : Pat<(v4i32 (X86Movsd VR128:$src1, VR128:$src2)),
696 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
699 def : Pat<(v4i64 (X86Movsd VR256:$src1, VR256:$src2)),
700 (SUBREG_TO_REG (i32 0),
701 (VMOVSDrr (EXTRACT_SUBREG (v4i64 VR256:$src1), sub_xmm),
702 (EXTRACT_SUBREG (v4i64 VR256:$src2), sub_xmm)),
704 def : Pat<(v4f64 (X86Movsd VR256:$src1, VR256:$src2)),
705 (SUBREG_TO_REG (i32 0),
706 (VMOVSDrr (EXTRACT_SUBREG (v4f64 VR256:$src1), sub_xmm),
707 (EXTRACT_SUBREG (v4f64 VR256:$src2), sub_xmm)),
710 // FIXME: Instead of a X86Movlps there should be a X86Movsd here, the problem
711 // is during lowering, where it's not possible to recognize the fold cause
712 // it has two uses through a bitcast. One use disappears at isel time and the
713 // fold opportunity reappears.
714 def : Pat<(v2f64 (X86Movlpd VR128:$src1, VR128:$src2)),
715 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
716 def : Pat<(v2i64 (X86Movlpd VR128:$src1, VR128:$src2)),
717 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
718 def : Pat<(v4f32 (X86Movlps VR128:$src1, VR128:$src2)),
719 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
720 def : Pat<(v4i32 (X86Movlps VR128:$src1, VR128:$src2)),
721 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
724 let Predicates = [UseSSE1] in {
725 let Predicates = [NoSSE41], AddedComplexity = 15 in {
726 // Move scalar to XMM zero-extended, zeroing a VR128 then do a
727 // MOVSS to the lower bits.
728 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),
729 (MOVSSrr (v4f32 (V_SET0)), FR32:$src)>;
730 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
731 (MOVSSrr (v4f32 (V_SET0)), (COPY_TO_REGCLASS VR128:$src, FR32))>;
732 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
733 (MOVSSrr (v4i32 (V_SET0)), (COPY_TO_REGCLASS VR128:$src, FR32))>;
736 let AddedComplexity = 20 in {
737 // MOVSSrm already zeros the high parts of the register.
738 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
739 (COPY_TO_REGCLASS (MOVSSrm addr:$src), VR128)>;
740 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
741 (COPY_TO_REGCLASS (MOVSSrm addr:$src), VR128)>;
742 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
743 (COPY_TO_REGCLASS (MOVSSrm addr:$src), VR128)>;
746 // Extract and store.
747 def : Pat<(store (f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
749 (MOVSSmr addr:$dst, (COPY_TO_REGCLASS VR128:$src, FR32))>;
751 // Shuffle with MOVSS
752 def : Pat<(v4i32 (X86Movss VR128:$src1, VR128:$src2)),
753 (MOVSSrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR32))>;
754 def : Pat<(v4f32 (X86Movss VR128:$src1, VR128:$src2)),
755 (MOVSSrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR32))>;
758 let Predicates = [UseSSE2] in {
759 let Predicates = [NoSSE41], AddedComplexity = 15 in {
760 // Move scalar to XMM zero-extended, zeroing a VR128 then do a
761 // MOVSD to the lower bits.
762 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))),
763 (MOVSDrr (v2f64 (V_SET0)), FR64:$src)>;
766 let AddedComplexity = 20 in {
767 // MOVSDrm already zeros the high parts of the register.
768 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
769 (COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
770 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
771 (COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
772 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
773 (COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
774 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
775 (COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
776 def : Pat<(v2f64 (X86vzload addr:$src)),
777 (COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
780 // Extract and store.
781 def : Pat<(store (f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
783 (MOVSDmr addr:$dst, (COPY_TO_REGCLASS VR128:$src, FR64))>;
785 // Shuffle with MOVSD
786 def : Pat<(v2i64 (X86Movsd VR128:$src1, VR128:$src2)),
787 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
788 def : Pat<(v2f64 (X86Movsd VR128:$src1, VR128:$src2)),
789 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
790 def : Pat<(v4f32 (X86Movsd VR128:$src1, VR128:$src2)),
791 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
792 def : Pat<(v4i32 (X86Movsd VR128:$src1, VR128:$src2)),
793 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
795 // FIXME: Instead of a X86Movlps there should be a X86Movsd here, the problem
796 // is during lowering, where it's not possible to recognize the fold cause
797 // it has two uses through a bitcast. One use disappears at isel time and the
798 // fold opportunity reappears.
799 def : Pat<(v2f64 (X86Movlpd VR128:$src1, VR128:$src2)),
800 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
801 def : Pat<(v2i64 (X86Movlpd VR128:$src1, VR128:$src2)),
802 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
803 def : Pat<(v4f32 (X86Movlps VR128:$src1, VR128:$src2)),
804 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
805 def : Pat<(v4i32 (X86Movlps VR128:$src1, VR128:$src2)),
806 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
809 //===----------------------------------------------------------------------===//
810 // SSE 1 & 2 - Move Aligned/Unaligned FP Instructions
811 //===----------------------------------------------------------------------===//
813 multiclass sse12_mov_packed<bits<8> opc, RegisterClass RC,
814 X86MemOperand x86memop, PatFrag ld_frag,
815 string asm, Domain d,
817 bit IsReMaterializable = 1> {
818 let hasSideEffects = 0 in
819 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
820 !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], itins.rr, d>,
821 Sched<[WriteFShuffle]>;
822 let canFoldAsLoad = 1, isReMaterializable = IsReMaterializable in
823 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
824 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
825 [(set RC:$dst, (ld_frag addr:$src))], itins.rm, d>,
829 let Predicates = [HasAVX, NoVLX] in {
830 defm VMOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
831 "movaps", SSEPackedSingle, SSE_MOVA_ITINS>,
833 defm VMOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
834 "movapd", SSEPackedDouble, SSE_MOVA_ITINS>,
836 defm VMOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
837 "movups", SSEPackedSingle, SSE_MOVU_ITINS>,
839 defm VMOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
840 "movupd", SSEPackedDouble, SSE_MOVU_ITINS, 0>,
843 defm VMOVAPSY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv8f32,
844 "movaps", SSEPackedSingle, SSE_MOVA_ITINS>,
846 defm VMOVAPDY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv4f64,
847 "movapd", SSEPackedDouble, SSE_MOVA_ITINS>,
849 defm VMOVUPSY : sse12_mov_packed<0x10, VR256, f256mem, loadv8f32,
850 "movups", SSEPackedSingle, SSE_MOVU_ITINS>,
852 defm VMOVUPDY : sse12_mov_packed<0x10, VR256, f256mem, loadv4f64,
853 "movupd", SSEPackedDouble, SSE_MOVU_ITINS, 0>,
857 let Predicates = [UseSSE1] in {
858 defm MOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
859 "movaps", SSEPackedSingle, SSE_MOVA_ITINS>,
861 defm MOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
862 "movups", SSEPackedSingle, SSE_MOVU_ITINS>,
865 let Predicates = [UseSSE2] in {
866 defm MOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
867 "movapd", SSEPackedDouble, SSE_MOVA_ITINS>,
869 defm MOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
870 "movupd", SSEPackedDouble, SSE_MOVU_ITINS, 0>,
874 let SchedRW = [WriteStore], Predicates = [HasAVX, NoVLX] in {
875 def VMOVAPSmr : VPSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
876 "movaps\t{$src, $dst|$dst, $src}",
877 [(alignedstore (v4f32 VR128:$src), addr:$dst)],
878 IIC_SSE_MOVA_P_MR>, VEX;
879 def VMOVAPDmr : VPDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
880 "movapd\t{$src, $dst|$dst, $src}",
881 [(alignedstore (v2f64 VR128:$src), addr:$dst)],
882 IIC_SSE_MOVA_P_MR>, VEX;
883 def VMOVUPSmr : VPSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
884 "movups\t{$src, $dst|$dst, $src}",
885 [(store (v4f32 VR128:$src), addr:$dst)],
886 IIC_SSE_MOVU_P_MR>, VEX;
887 def VMOVUPDmr : VPDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
888 "movupd\t{$src, $dst|$dst, $src}",
889 [(store (v2f64 VR128:$src), addr:$dst)],
890 IIC_SSE_MOVU_P_MR>, VEX;
891 def VMOVAPSYmr : VPSI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
892 "movaps\t{$src, $dst|$dst, $src}",
893 [(alignedstore256 (v8f32 VR256:$src), addr:$dst)],
894 IIC_SSE_MOVA_P_MR>, VEX, VEX_L;
895 def VMOVAPDYmr : VPDI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
896 "movapd\t{$src, $dst|$dst, $src}",
897 [(alignedstore256 (v4f64 VR256:$src), addr:$dst)],
898 IIC_SSE_MOVA_P_MR>, VEX, VEX_L;
899 def VMOVUPSYmr : VPSI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
900 "movups\t{$src, $dst|$dst, $src}",
901 [(store (v8f32 VR256:$src), addr:$dst)],
902 IIC_SSE_MOVU_P_MR>, VEX, VEX_L;
903 def VMOVUPDYmr : VPDI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
904 "movupd\t{$src, $dst|$dst, $src}",
905 [(store (v4f64 VR256:$src), addr:$dst)],
906 IIC_SSE_MOVU_P_MR>, VEX, VEX_L;
910 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
911 SchedRW = [WriteFShuffle] in {
912 def VMOVAPSrr_REV : VPSI<0x29, MRMDestReg, (outs VR128:$dst),
914 "movaps\t{$src, $dst|$dst, $src}", [],
915 IIC_SSE_MOVA_P_RR>, VEX;
916 def VMOVAPDrr_REV : VPDI<0x29, MRMDestReg, (outs VR128:$dst),
918 "movapd\t{$src, $dst|$dst, $src}", [],
919 IIC_SSE_MOVA_P_RR>, VEX;
920 def VMOVUPSrr_REV : VPSI<0x11, MRMDestReg, (outs VR128:$dst),
922 "movups\t{$src, $dst|$dst, $src}", [],
923 IIC_SSE_MOVU_P_RR>, VEX;
924 def VMOVUPDrr_REV : VPDI<0x11, MRMDestReg, (outs VR128:$dst),
926 "movupd\t{$src, $dst|$dst, $src}", [],
927 IIC_SSE_MOVU_P_RR>, VEX;
928 def VMOVAPSYrr_REV : VPSI<0x29, MRMDestReg, (outs VR256:$dst),
930 "movaps\t{$src, $dst|$dst, $src}", [],
931 IIC_SSE_MOVA_P_RR>, VEX, VEX_L;
932 def VMOVAPDYrr_REV : VPDI<0x29, MRMDestReg, (outs VR256:$dst),
934 "movapd\t{$src, $dst|$dst, $src}", [],
935 IIC_SSE_MOVA_P_RR>, VEX, VEX_L;
936 def VMOVUPSYrr_REV : VPSI<0x11, MRMDestReg, (outs VR256:$dst),
938 "movups\t{$src, $dst|$dst, $src}", [],
939 IIC_SSE_MOVU_P_RR>, VEX, VEX_L;
940 def VMOVUPDYrr_REV : VPDI<0x11, MRMDestReg, (outs VR256:$dst),
942 "movupd\t{$src, $dst|$dst, $src}", [],
943 IIC_SSE_MOVU_P_RR>, VEX, VEX_L;
946 let Predicates = [HasAVX] in {
947 def : Pat<(v8i32 (X86vzmovl
948 (insert_subvector undef, (v4i32 VR128:$src), (iPTR 0)))),
949 (SUBREG_TO_REG (i32 0), (VMOVAPSrr VR128:$src), sub_xmm)>;
950 def : Pat<(v4i64 (X86vzmovl
951 (insert_subvector undef, (v2i64 VR128:$src), (iPTR 0)))),
952 (SUBREG_TO_REG (i32 0), (VMOVAPSrr VR128:$src), sub_xmm)>;
953 def : Pat<(v8f32 (X86vzmovl
954 (insert_subvector undef, (v4f32 VR128:$src), (iPTR 0)))),
955 (SUBREG_TO_REG (i32 0), (VMOVAPSrr VR128:$src), sub_xmm)>;
956 def : Pat<(v4f64 (X86vzmovl
957 (insert_subvector undef, (v2f64 VR128:$src), (iPTR 0)))),
958 (SUBREG_TO_REG (i32 0), (VMOVAPSrr VR128:$src), sub_xmm)>;
962 def : Pat<(int_x86_avx_storeu_ps_256 addr:$dst, VR256:$src),
963 (VMOVUPSYmr addr:$dst, VR256:$src)>;
964 def : Pat<(int_x86_avx_storeu_pd_256 addr:$dst, VR256:$src),
965 (VMOVUPDYmr addr:$dst, VR256:$src)>;
967 let SchedRW = [WriteStore] in {
968 def MOVAPSmr : PSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
969 "movaps\t{$src, $dst|$dst, $src}",
970 [(alignedstore (v4f32 VR128:$src), addr:$dst)],
972 def MOVAPDmr : PDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
973 "movapd\t{$src, $dst|$dst, $src}",
974 [(alignedstore (v2f64 VR128:$src), addr:$dst)],
976 def MOVUPSmr : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
977 "movups\t{$src, $dst|$dst, $src}",
978 [(store (v4f32 VR128:$src), addr:$dst)],
980 def MOVUPDmr : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
981 "movupd\t{$src, $dst|$dst, $src}",
982 [(store (v2f64 VR128:$src), addr:$dst)],
987 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
988 SchedRW = [WriteFShuffle] in {
989 def MOVAPSrr_REV : PSI<0x29, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
990 "movaps\t{$src, $dst|$dst, $src}", [],
992 def MOVAPDrr_REV : PDI<0x29, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
993 "movapd\t{$src, $dst|$dst, $src}", [],
995 def MOVUPSrr_REV : PSI<0x11, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
996 "movups\t{$src, $dst|$dst, $src}", [],
998 def MOVUPDrr_REV : PDI<0x11, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
999 "movupd\t{$src, $dst|$dst, $src}", [],
1003 let Predicates = [HasAVX] in {
1004 def : Pat<(int_x86_sse_storeu_ps addr:$dst, VR128:$src),
1005 (VMOVUPSmr addr:$dst, VR128:$src)>;
1006 def : Pat<(int_x86_sse2_storeu_pd addr:$dst, VR128:$src),
1007 (VMOVUPDmr addr:$dst, VR128:$src)>;
1010 let Predicates = [UseSSE1] in
1011 def : Pat<(int_x86_sse_storeu_ps addr:$dst, VR128:$src),
1012 (MOVUPSmr addr:$dst, VR128:$src)>;
1013 let Predicates = [UseSSE2] in
1014 def : Pat<(int_x86_sse2_storeu_pd addr:$dst, VR128:$src),
1015 (MOVUPDmr addr:$dst, VR128:$src)>;
1017 // Use vmovaps/vmovups for AVX integer load/store.
1018 let Predicates = [HasAVX, NoVLX] in {
1019 // 128-bit load/store
1020 def : Pat<(alignedloadv2i64 addr:$src),
1021 (VMOVAPSrm addr:$src)>;
1022 def : Pat<(loadv2i64 addr:$src),
1023 (VMOVUPSrm addr:$src)>;
1025 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
1026 (VMOVAPSmr addr:$dst, VR128:$src)>;
1027 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
1028 (VMOVAPSmr addr:$dst, VR128:$src)>;
1029 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
1030 (VMOVAPSmr addr:$dst, VR128:$src)>;
1031 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
1032 (VMOVAPSmr addr:$dst, VR128:$src)>;
1033 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
1034 (VMOVUPSmr addr:$dst, VR128:$src)>;
1035 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
1036 (VMOVUPSmr addr:$dst, VR128:$src)>;
1037 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
1038 (VMOVUPSmr addr:$dst, VR128:$src)>;
1039 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
1040 (VMOVUPSmr addr:$dst, VR128:$src)>;
1042 // 256-bit load/store
1043 def : Pat<(alignedloadv4i64 addr:$src),
1044 (VMOVAPSYrm addr:$src)>;
1045 def : Pat<(loadv4i64 addr:$src),
1046 (VMOVUPSYrm addr:$src)>;
1047 def : Pat<(alignedstore256 (v4i64 VR256:$src), addr:$dst),
1048 (VMOVAPSYmr addr:$dst, VR256:$src)>;
1049 def : Pat<(alignedstore256 (v8i32 VR256:$src), addr:$dst),
1050 (VMOVAPSYmr addr:$dst, VR256:$src)>;
1051 def : Pat<(alignedstore256 (v16i16 VR256:$src), addr:$dst),
1052 (VMOVAPSYmr addr:$dst, VR256:$src)>;
1053 def : Pat<(alignedstore256 (v32i8 VR256:$src), addr:$dst),
1054 (VMOVAPSYmr addr:$dst, VR256:$src)>;
1055 def : Pat<(store (v4i64 VR256:$src), addr:$dst),
1056 (VMOVUPSYmr addr:$dst, VR256:$src)>;
1057 def : Pat<(store (v8i32 VR256:$src), addr:$dst),
1058 (VMOVUPSYmr addr:$dst, VR256:$src)>;
1059 def : Pat<(store (v16i16 VR256:$src), addr:$dst),
1060 (VMOVUPSYmr addr:$dst, VR256:$src)>;
1061 def : Pat<(store (v32i8 VR256:$src), addr:$dst),
1062 (VMOVUPSYmr addr:$dst, VR256:$src)>;
1064 // Special patterns for storing subvector extracts of lower 128-bits
1065 // Its cheaper to just use VMOVAPS/VMOVUPS instead of VEXTRACTF128mr
1066 def : Pat<(alignedstore (v2f64 (extract_subvector
1067 (v4f64 VR256:$src), (iPTR 0))), addr:$dst),
1068 (VMOVAPDmr addr:$dst, (v2f64 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1069 def : Pat<(alignedstore (v4f32 (extract_subvector
1070 (v8f32 VR256:$src), (iPTR 0))), addr:$dst),
1071 (VMOVAPSmr addr:$dst, (v4f32 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1072 def : Pat<(alignedstore (v2i64 (extract_subvector
1073 (v4i64 VR256:$src), (iPTR 0))), addr:$dst),
1074 (VMOVAPDmr addr:$dst, (v2i64 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1075 def : Pat<(alignedstore (v4i32 (extract_subvector
1076 (v8i32 VR256:$src), (iPTR 0))), addr:$dst),
1077 (VMOVAPSmr addr:$dst, (v4i32 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1078 def : Pat<(alignedstore (v8i16 (extract_subvector
1079 (v16i16 VR256:$src), (iPTR 0))), addr:$dst),
1080 (VMOVAPSmr addr:$dst, (v8i16 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1081 def : Pat<(alignedstore (v16i8 (extract_subvector
1082 (v32i8 VR256:$src), (iPTR 0))), addr:$dst),
1083 (VMOVAPSmr addr:$dst, (v16i8 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1085 def : Pat<(store (v2f64 (extract_subvector
1086 (v4f64 VR256:$src), (iPTR 0))), addr:$dst),
1087 (VMOVUPDmr addr:$dst, (v2f64 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1088 def : Pat<(store (v4f32 (extract_subvector
1089 (v8f32 VR256:$src), (iPTR 0))), addr:$dst),
1090 (VMOVUPSmr addr:$dst, (v4f32 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1091 def : Pat<(store (v2i64 (extract_subvector
1092 (v4i64 VR256:$src), (iPTR 0))), addr:$dst),
1093 (VMOVUPDmr addr:$dst, (v2i64 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1094 def : Pat<(store (v4i32 (extract_subvector
1095 (v8i32 VR256:$src), (iPTR 0))), addr:$dst),
1096 (VMOVUPSmr addr:$dst, (v4i32 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1097 def : Pat<(store (v8i16 (extract_subvector
1098 (v16i16 VR256:$src), (iPTR 0))), addr:$dst),
1099 (VMOVUPSmr addr:$dst, (v8i16 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1100 def : Pat<(store (v16i8 (extract_subvector
1101 (v32i8 VR256:$src), (iPTR 0))), addr:$dst),
1102 (VMOVUPSmr addr:$dst, (v16i8 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1105 // Use movaps / movups for SSE integer load / store (one byte shorter).
1106 // The instructions selected below are then converted to MOVDQA/MOVDQU
1107 // during the SSE domain pass.
1108 let Predicates = [UseSSE1] in {
1109 def : Pat<(alignedloadv2i64 addr:$src),
1110 (MOVAPSrm addr:$src)>;
1111 def : Pat<(loadv2i64 addr:$src),
1112 (MOVUPSrm addr:$src)>;
1114 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
1115 (MOVAPSmr addr:$dst, VR128:$src)>;
1116 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
1117 (MOVAPSmr addr:$dst, VR128:$src)>;
1118 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
1119 (MOVAPSmr addr:$dst, VR128:$src)>;
1120 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
1121 (MOVAPSmr addr:$dst, VR128:$src)>;
1122 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
1123 (MOVUPSmr addr:$dst, VR128:$src)>;
1124 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
1125 (MOVUPSmr addr:$dst, VR128:$src)>;
1126 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
1127 (MOVUPSmr addr:$dst, VR128:$src)>;
1128 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
1129 (MOVUPSmr addr:$dst, VR128:$src)>;
1132 // Alias instruction to load FR32 or FR64 from f128mem using movaps. Upper
1133 // bits are disregarded. FIXME: Set encoding to pseudo!
1134 let canFoldAsLoad = 1, isReMaterializable = 1, SchedRW = [WriteLoad] in {
1135 let isCodeGenOnly = 1 in {
1136 def FsVMOVAPSrm : VPSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
1137 "movaps\t{$src, $dst|$dst, $src}",
1138 [(set FR32:$dst, (alignedloadfsf32 addr:$src))],
1139 IIC_SSE_MOVA_P_RM>, VEX;
1140 def FsVMOVAPDrm : VPDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
1141 "movapd\t{$src, $dst|$dst, $src}",
1142 [(set FR64:$dst, (alignedloadfsf64 addr:$src))],
1143 IIC_SSE_MOVA_P_RM>, VEX;
1144 def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
1145 "movaps\t{$src, $dst|$dst, $src}",
1146 [(set FR32:$dst, (alignedloadfsf32 addr:$src))],
1148 def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
1149 "movapd\t{$src, $dst|$dst, $src}",
1150 [(set FR64:$dst, (alignedloadfsf64 addr:$src))],
1155 //===----------------------------------------------------------------------===//
1156 // SSE 1 & 2 - Move Low packed FP Instructions
1157 //===----------------------------------------------------------------------===//
1159 multiclass sse12_mov_hilo_packed_base<bits<8>opc, SDNode psnode, SDNode pdnode,
1160 string base_opc, string asm_opr,
1161 InstrItinClass itin> {
1162 def PSrm : PI<opc, MRMSrcMem,
1163 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
1164 !strconcat(base_opc, "s", asm_opr),
1166 (psnode VR128:$src1,
1167 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))))],
1168 itin, SSEPackedSingle>, PS,
1169 Sched<[WriteFShuffleLd, ReadAfterLd]>;
1171 def PDrm : PI<opc, MRMSrcMem,
1172 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
1173 !strconcat(base_opc, "d", asm_opr),
1174 [(set VR128:$dst, (v2f64 (pdnode VR128:$src1,
1175 (scalar_to_vector (loadf64 addr:$src2)))))],
1176 itin, SSEPackedDouble>, PD,
1177 Sched<[WriteFShuffleLd, ReadAfterLd]>;
1181 multiclass sse12_mov_hilo_packed<bits<8>opc, SDNode psnode, SDNode pdnode,
1182 string base_opc, InstrItinClass itin> {
1183 defm V#NAME : sse12_mov_hilo_packed_base<opc, psnode, pdnode, base_opc,
1184 "\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1187 let Constraints = "$src1 = $dst" in
1188 defm NAME : sse12_mov_hilo_packed_base<opc, psnode, pdnode, base_opc,
1189 "\t{$src2, $dst|$dst, $src2}",
1193 let AddedComplexity = 20 in {
1194 defm MOVL : sse12_mov_hilo_packed<0x12, X86Movlps, X86Movlpd, "movlp",
1198 let SchedRW = [WriteStore] in {
1199 def VMOVLPSmr : VPSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1200 "movlps\t{$src, $dst|$dst, $src}",
1201 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
1202 (iPTR 0))), addr:$dst)],
1203 IIC_SSE_MOV_LH>, VEX;
1204 def VMOVLPDmr : VPDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1205 "movlpd\t{$src, $dst|$dst, $src}",
1206 [(store (f64 (vector_extract (v2f64 VR128:$src),
1207 (iPTR 0))), addr:$dst)],
1208 IIC_SSE_MOV_LH>, VEX;
1209 def MOVLPSmr : PSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1210 "movlps\t{$src, $dst|$dst, $src}",
1211 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
1212 (iPTR 0))), addr:$dst)],
1214 def MOVLPDmr : PDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1215 "movlpd\t{$src, $dst|$dst, $src}",
1216 [(store (f64 (vector_extract (v2f64 VR128:$src),
1217 (iPTR 0))), addr:$dst)],
1221 let Predicates = [HasAVX] in {
1222 // Shuffle with VMOVLPS
1223 def : Pat<(v4f32 (X86Movlps VR128:$src1, (load addr:$src2))),
1224 (VMOVLPSrm VR128:$src1, addr:$src2)>;
1225 def : Pat<(v4i32 (X86Movlps VR128:$src1, (load addr:$src2))),
1226 (VMOVLPSrm VR128:$src1, addr:$src2)>;
1228 // Shuffle with VMOVLPD
1229 def : Pat<(v2f64 (X86Movlpd VR128:$src1, (load addr:$src2))),
1230 (VMOVLPDrm VR128:$src1, addr:$src2)>;
1231 def : Pat<(v2i64 (X86Movlpd VR128:$src1, (load addr:$src2))),
1232 (VMOVLPDrm VR128:$src1, addr:$src2)>;
1233 def : Pat<(v2f64 (X86Movsd VR128:$src1,
1234 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
1235 (VMOVLPDrm VR128:$src1, addr:$src2)>;
1238 def : Pat<(store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)),
1240 (VMOVLPSmr addr:$src1, VR128:$src2)>;
1241 def : Pat<(store (v4i32 (X86Movlps
1242 (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)), addr:$src1),
1243 (VMOVLPSmr addr:$src1, VR128:$src2)>;
1244 def : Pat<(store (v2f64 (X86Movlpd (load addr:$src1), VR128:$src2)),
1246 (VMOVLPDmr addr:$src1, VR128:$src2)>;
1247 def : Pat<(store (v2i64 (X86Movlpd (load addr:$src1), VR128:$src2)),
1249 (VMOVLPDmr addr:$src1, VR128:$src2)>;
1252 let Predicates = [UseSSE1] in {
1253 // (store (vector_shuffle (load addr), v2, <4, 5, 2, 3>), addr) using MOVLPS
1254 def : Pat<(store (i64 (vector_extract (bc_v2i64 (v4f32 VR128:$src2)),
1255 (iPTR 0))), addr:$src1),
1256 (MOVLPSmr addr:$src1, VR128:$src2)>;
1258 // Shuffle with MOVLPS
1259 def : Pat<(v4f32 (X86Movlps VR128:$src1, (load addr:$src2))),
1260 (MOVLPSrm VR128:$src1, addr:$src2)>;
1261 def : Pat<(v4i32 (X86Movlps VR128:$src1, (load addr:$src2))),
1262 (MOVLPSrm VR128:$src1, addr:$src2)>;
1263 def : Pat<(X86Movlps VR128:$src1,
1264 (bc_v4f32 (v2i64 (scalar_to_vector (loadi64 addr:$src2))))),
1265 (MOVLPSrm VR128:$src1, addr:$src2)>;
1268 def : Pat<(store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)),
1270 (MOVLPSmr addr:$src1, VR128:$src2)>;
1271 def : Pat<(store (v4i32 (X86Movlps
1272 (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)),
1274 (MOVLPSmr addr:$src1, VR128:$src2)>;
1277 let Predicates = [UseSSE2] in {
1278 // Shuffle with MOVLPD
1279 def : Pat<(v2f64 (X86Movlpd VR128:$src1, (load addr:$src2))),
1280 (MOVLPDrm VR128:$src1, addr:$src2)>;
1281 def : Pat<(v2i64 (X86Movlpd VR128:$src1, (load addr:$src2))),
1282 (MOVLPDrm VR128:$src1, addr:$src2)>;
1283 def : Pat<(v2f64 (X86Movsd VR128:$src1,
1284 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
1285 (MOVLPDrm VR128:$src1, addr:$src2)>;
1288 def : Pat<(store (v2f64 (X86Movlpd (load addr:$src1), VR128:$src2)),
1290 (MOVLPDmr addr:$src1, VR128:$src2)>;
1291 def : Pat<(store (v2i64 (X86Movlpd (load addr:$src1), VR128:$src2)),
1293 (MOVLPDmr addr:$src1, VR128:$src2)>;
1296 //===----------------------------------------------------------------------===//
1297 // SSE 1 & 2 - Move Hi packed FP Instructions
1298 //===----------------------------------------------------------------------===//
1300 let AddedComplexity = 20 in {
1301 defm MOVH : sse12_mov_hilo_packed<0x16, X86Movlhps, X86Movlhpd, "movhp",
1305 let SchedRW = [WriteStore] in {
1306 // v2f64 extract element 1 is always custom lowered to unpack high to low
1307 // and extract element 0 so the non-store version isn't too horrible.
1308 def VMOVHPSmr : VPSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1309 "movhps\t{$src, $dst|$dst, $src}",
1310 [(store (f64 (vector_extract
1311 (X86Unpckh (bc_v2f64 (v4f32 VR128:$src)),
1312 (bc_v2f64 (v4f32 VR128:$src))),
1313 (iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>, VEX;
1314 def VMOVHPDmr : VPDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1315 "movhpd\t{$src, $dst|$dst, $src}",
1316 [(store (f64 (vector_extract
1317 (v2f64 (X86Unpckh VR128:$src, VR128:$src)),
1318 (iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>, VEX;
1319 def MOVHPSmr : PSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1320 "movhps\t{$src, $dst|$dst, $src}",
1321 [(store (f64 (vector_extract
1322 (X86Unpckh (bc_v2f64 (v4f32 VR128:$src)),
1323 (bc_v2f64 (v4f32 VR128:$src))),
1324 (iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>;
1325 def MOVHPDmr : PDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1326 "movhpd\t{$src, $dst|$dst, $src}",
1327 [(store (f64 (vector_extract
1328 (v2f64 (X86Unpckh VR128:$src, VR128:$src)),
1329 (iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>;
1332 let Predicates = [HasAVX] in {
1334 def : Pat<(X86Movlhps VR128:$src1,
1335 (bc_v4f32 (v2i64 (scalar_to_vector (loadi64 addr:$src2))))),
1336 (VMOVHPSrm VR128:$src1, addr:$src2)>;
1337 def : Pat<(X86Movlhps VR128:$src1,
1338 (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
1339 (VMOVHPSrm VR128:$src1, addr:$src2)>;
1343 // FIXME: Instead of X86Unpckl, there should be a X86Movlhpd here, the problem
1344 // is during lowering, where it's not possible to recognize the load fold
1345 // cause it has two uses through a bitcast. One use disappears at isel time
1346 // and the fold opportunity reappears.
1347 def : Pat<(v2f64 (X86Unpckl VR128:$src1,
1348 (scalar_to_vector (loadf64 addr:$src2)))),
1349 (VMOVHPDrm VR128:$src1, addr:$src2)>;
1350 // Also handle an i64 load because that may get selected as a faster way to
1352 def : Pat<(v2f64 (X86Unpckl VR128:$src1,
1353 (bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src2)))))),
1354 (VMOVHPDrm VR128:$src1, addr:$src2)>;
1356 def : Pat<(store (f64 (vector_extract
1357 (v2f64 (X86VPermilpi VR128:$src, (i8 1))),
1358 (iPTR 0))), addr:$dst),
1359 (VMOVHPDmr addr:$dst, VR128:$src)>;
1362 let Predicates = [UseSSE1] in {
1364 def : Pat<(X86Movlhps VR128:$src1,
1365 (bc_v4f32 (v2i64 (scalar_to_vector (loadi64 addr:$src2))))),
1366 (MOVHPSrm VR128:$src1, addr:$src2)>;
1367 def : Pat<(X86Movlhps VR128:$src1,
1368 (bc_v4f32 (v2i64 (X86vzload addr:$src2)))),
1369 (MOVHPSrm VR128:$src1, addr:$src2)>;
1372 let Predicates = [UseSSE2] in {
1375 // FIXME: Instead of X86Unpckl, there should be a X86Movlhpd here, the problem
1376 // is during lowering, where it's not possible to recognize the load fold
1377 // cause it has two uses through a bitcast. One use disappears at isel time
1378 // and the fold opportunity reappears.
1379 def : Pat<(v2f64 (X86Unpckl VR128:$src1,
1380 (scalar_to_vector (loadf64 addr:$src2)))),
1381 (MOVHPDrm VR128:$src1, addr:$src2)>;
1382 // Also handle an i64 load because that may get selected as a faster way to
1384 def : Pat<(v2f64 (X86Unpckl VR128:$src1,
1385 (bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src2)))))),
1386 (MOVHPDrm VR128:$src1, addr:$src2)>;
1388 def : Pat<(store (f64 (vector_extract
1389 (v2f64 (X86Shufp VR128:$src, VR128:$src, (i8 1))),
1390 (iPTR 0))), addr:$dst),
1391 (MOVHPDmr addr:$dst, VR128:$src)>;
1394 //===----------------------------------------------------------------------===//
1395 // SSE 1 & 2 - Move Low to High and High to Low packed FP Instructions
1396 //===----------------------------------------------------------------------===//
1398 let AddedComplexity = 20, Predicates = [UseAVX] in {
1399 def VMOVLHPSrr : VPSI<0x16, MRMSrcReg, (outs VR128:$dst),
1400 (ins VR128:$src1, VR128:$src2),
1401 "movlhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1403 (v4f32 (X86Movlhps VR128:$src1, VR128:$src2)))],
1405 VEX_4V, Sched<[WriteFShuffle]>;
1406 def VMOVHLPSrr : VPSI<0x12, MRMSrcReg, (outs VR128:$dst),
1407 (ins VR128:$src1, VR128:$src2),
1408 "movhlps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1410 (v4f32 (X86Movhlps VR128:$src1, VR128:$src2)))],
1412 VEX_4V, Sched<[WriteFShuffle]>;
1414 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
1415 def MOVLHPSrr : PSI<0x16, MRMSrcReg, (outs VR128:$dst),
1416 (ins VR128:$src1, VR128:$src2),
1417 "movlhps\t{$src2, $dst|$dst, $src2}",
1419 (v4f32 (X86Movlhps VR128:$src1, VR128:$src2)))],
1420 IIC_SSE_MOV_LH>, Sched<[WriteFShuffle]>;
1421 def MOVHLPSrr : PSI<0x12, MRMSrcReg, (outs VR128:$dst),
1422 (ins VR128:$src1, VR128:$src2),
1423 "movhlps\t{$src2, $dst|$dst, $src2}",
1425 (v4f32 (X86Movhlps VR128:$src1, VR128:$src2)))],
1426 IIC_SSE_MOV_LH>, Sched<[WriteFShuffle]>;
1429 let Predicates = [UseAVX] in {
1431 def : Pat<(v4i32 (X86Movlhps VR128:$src1, VR128:$src2)),
1432 (VMOVLHPSrr VR128:$src1, VR128:$src2)>;
1433 def : Pat<(v2i64 (X86Movlhps VR128:$src1, VR128:$src2)),
1434 (VMOVLHPSrr (v2i64 VR128:$src1), VR128:$src2)>;
1437 def : Pat<(v4i32 (X86Movhlps VR128:$src1, VR128:$src2)),
1438 (VMOVHLPSrr VR128:$src1, VR128:$src2)>;
1441 let Predicates = [UseSSE1] in {
1443 def : Pat<(v4i32 (X86Movlhps VR128:$src1, VR128:$src2)),
1444 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
1445 def : Pat<(v2i64 (X86Movlhps VR128:$src1, VR128:$src2)),
1446 (MOVLHPSrr (v2i64 VR128:$src1), VR128:$src2)>;
1449 def : Pat<(v4i32 (X86Movhlps VR128:$src1, VR128:$src2)),
1450 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
1453 //===----------------------------------------------------------------------===//
1454 // SSE 1 & 2 - Conversion Instructions
1455 //===----------------------------------------------------------------------===//
1457 def SSE_CVT_PD : OpndItins<
1458 IIC_SSE_CVT_PD_RR, IIC_SSE_CVT_PD_RM
1461 let Sched = WriteCvtI2F in
1462 def SSE_CVT_PS : OpndItins<
1463 IIC_SSE_CVT_PS_RR, IIC_SSE_CVT_PS_RM
1466 let Sched = WriteCvtI2F in
1467 def SSE_CVT_Scalar : OpndItins<
1468 IIC_SSE_CVT_Scalar_RR, IIC_SSE_CVT_Scalar_RM
1471 let Sched = WriteCvtF2I in
1472 def SSE_CVT_SS2SI_32 : OpndItins<
1473 IIC_SSE_CVT_SS2SI32_RR, IIC_SSE_CVT_SS2SI32_RM
1476 let Sched = WriteCvtF2I in
1477 def SSE_CVT_SS2SI_64 : OpndItins<
1478 IIC_SSE_CVT_SS2SI64_RR, IIC_SSE_CVT_SS2SI64_RM
1481 let Sched = WriteCvtF2I in
1482 def SSE_CVT_SD2SI : OpndItins<
1483 IIC_SSE_CVT_SD2SI_RR, IIC_SSE_CVT_SD2SI_RM
1486 multiclass sse12_cvt_s<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
1487 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
1488 string asm, OpndItins itins> {
1489 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
1490 [(set DstRC:$dst, (OpNode SrcRC:$src))],
1491 itins.rr>, Sched<[itins.Sched]>;
1492 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
1493 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))],
1494 itins.rm>, Sched<[itins.Sched.Folded]>;
1497 multiclass sse12_cvt_p<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
1498 X86MemOperand x86memop, string asm, Domain d,
1500 let hasSideEffects = 0 in {
1501 def rr : I<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
1502 [], itins.rr, d>, Sched<[itins.Sched]>;
1504 def rm : I<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
1505 [], itins.rm, d>, Sched<[itins.Sched.Folded]>;
1509 multiclass sse12_vcvt_avx<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
1510 X86MemOperand x86memop, string asm> {
1511 let hasSideEffects = 0, Predicates = [UseAVX] in {
1512 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src),
1513 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>,
1514 Sched<[WriteCvtI2F]>;
1516 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
1517 (ins DstRC:$src1, x86memop:$src),
1518 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>,
1519 Sched<[WriteCvtI2FLd, ReadAfterLd]>;
1520 } // hasSideEffects = 0
1523 let Predicates = [UseAVX] in {
1524 defm VCVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
1525 "cvttss2si\t{$src, $dst|$dst, $src}",
1528 defm VCVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
1529 "cvttss2si\t{$src, $dst|$dst, $src}",
1531 XS, VEX, VEX_W, VEX_LIG;
1532 defm VCVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
1533 "cvttsd2si\t{$src, $dst|$dst, $src}",
1536 defm VCVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
1537 "cvttsd2si\t{$src, $dst|$dst, $src}",
1539 XD, VEX, VEX_W, VEX_LIG;
1541 def : InstAlias<"vcvttss2si{l}\t{$src, $dst|$dst, $src}",
1542 (VCVTTSS2SIrr GR32:$dst, FR32:$src), 0>;
1543 def : InstAlias<"vcvttss2si{l}\t{$src, $dst|$dst, $src}",
1544 (VCVTTSS2SIrm GR32:$dst, f32mem:$src), 0>;
1545 def : InstAlias<"vcvttsd2si{l}\t{$src, $dst|$dst, $src}",
1546 (VCVTTSD2SIrr GR32:$dst, FR64:$src), 0>;
1547 def : InstAlias<"vcvttsd2si{l}\t{$src, $dst|$dst, $src}",
1548 (VCVTTSD2SIrm GR32:$dst, f64mem:$src), 0>;
1549 def : InstAlias<"vcvttss2si{q}\t{$src, $dst|$dst, $src}",
1550 (VCVTTSS2SI64rr GR64:$dst, FR32:$src), 0>;
1551 def : InstAlias<"vcvttss2si{q}\t{$src, $dst|$dst, $src}",
1552 (VCVTTSS2SI64rm GR64:$dst, f32mem:$src), 0>;
1553 def : InstAlias<"vcvttsd2si{q}\t{$src, $dst|$dst, $src}",
1554 (VCVTTSD2SI64rr GR64:$dst, FR64:$src), 0>;
1555 def : InstAlias<"vcvttsd2si{q}\t{$src, $dst|$dst, $src}",
1556 (VCVTTSD2SI64rm GR64:$dst, f64mem:$src), 0>;
1558 // The assembler can recognize rr 64-bit instructions by seeing a rxx
1559 // register, but the same isn't true when only using memory operands,
1560 // provide other assembly "l" and "q" forms to address this explicitly
1561 // where appropriate to do so.
1562 defm VCVTSI2SS : sse12_vcvt_avx<0x2A, GR32, FR32, i32mem, "cvtsi2ss{l}">,
1563 XS, VEX_4V, VEX_LIG;
1564 defm VCVTSI2SS64 : sse12_vcvt_avx<0x2A, GR64, FR32, i64mem, "cvtsi2ss{q}">,
1565 XS, VEX_4V, VEX_W, VEX_LIG;
1566 defm VCVTSI2SD : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd{l}">,
1567 XD, VEX_4V, VEX_LIG;
1568 defm VCVTSI2SD64 : sse12_vcvt_avx<0x2A, GR64, FR64, i64mem, "cvtsi2sd{q}">,
1569 XD, VEX_4V, VEX_W, VEX_LIG;
1571 let Predicates = [UseAVX] in {
1572 def : InstAlias<"vcvtsi2ss\t{$src, $src1, $dst|$dst, $src1, $src}",
1573 (VCVTSI2SSrm FR64:$dst, FR64:$src1, i32mem:$src), 0>;
1574 def : InstAlias<"vcvtsi2sd\t{$src, $src1, $dst|$dst, $src1, $src}",
1575 (VCVTSI2SDrm FR64:$dst, FR64:$src1, i32mem:$src), 0>;
1577 def : Pat<(f32 (sint_to_fp (loadi32 addr:$src))),
1578 (VCVTSI2SSrm (f32 (IMPLICIT_DEF)), addr:$src)>;
1579 def : Pat<(f32 (sint_to_fp (loadi64 addr:$src))),
1580 (VCVTSI2SS64rm (f32 (IMPLICIT_DEF)), addr:$src)>;
1581 def : Pat<(f64 (sint_to_fp (loadi32 addr:$src))),
1582 (VCVTSI2SDrm (f64 (IMPLICIT_DEF)), addr:$src)>;
1583 def : Pat<(f64 (sint_to_fp (loadi64 addr:$src))),
1584 (VCVTSI2SD64rm (f64 (IMPLICIT_DEF)), addr:$src)>;
1586 def : Pat<(f32 (sint_to_fp GR32:$src)),
1587 (VCVTSI2SSrr (f32 (IMPLICIT_DEF)), GR32:$src)>;
1588 def : Pat<(f32 (sint_to_fp GR64:$src)),
1589 (VCVTSI2SS64rr (f32 (IMPLICIT_DEF)), GR64:$src)>;
1590 def : Pat<(f64 (sint_to_fp GR32:$src)),
1591 (VCVTSI2SDrr (f64 (IMPLICIT_DEF)), GR32:$src)>;
1592 def : Pat<(f64 (sint_to_fp GR64:$src)),
1593 (VCVTSI2SD64rr (f64 (IMPLICIT_DEF)), GR64:$src)>;
1596 defm CVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
1597 "cvttss2si\t{$src, $dst|$dst, $src}",
1598 SSE_CVT_SS2SI_32>, XS;
1599 defm CVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
1600 "cvttss2si\t{$src, $dst|$dst, $src}",
1601 SSE_CVT_SS2SI_64>, XS, REX_W;
1602 defm CVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
1603 "cvttsd2si\t{$src, $dst|$dst, $src}",
1605 defm CVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
1606 "cvttsd2si\t{$src, $dst|$dst, $src}",
1607 SSE_CVT_SD2SI>, XD, REX_W;
1608 defm CVTSI2SS : sse12_cvt_s<0x2A, GR32, FR32, sint_to_fp, i32mem, loadi32,
1609 "cvtsi2ss{l}\t{$src, $dst|$dst, $src}",
1610 SSE_CVT_Scalar>, XS;
1611 defm CVTSI2SS64 : sse12_cvt_s<0x2A, GR64, FR32, sint_to_fp, i64mem, loadi64,
1612 "cvtsi2ss{q}\t{$src, $dst|$dst, $src}",
1613 SSE_CVT_Scalar>, XS, REX_W;
1614 defm CVTSI2SD : sse12_cvt_s<0x2A, GR32, FR64, sint_to_fp, i32mem, loadi32,
1615 "cvtsi2sd{l}\t{$src, $dst|$dst, $src}",
1616 SSE_CVT_Scalar>, XD;
1617 defm CVTSI2SD64 : sse12_cvt_s<0x2A, GR64, FR64, sint_to_fp, i64mem, loadi64,
1618 "cvtsi2sd{q}\t{$src, $dst|$dst, $src}",
1619 SSE_CVT_Scalar>, XD, REX_W;
1621 def : InstAlias<"cvttss2si{l}\t{$src, $dst|$dst, $src}",
1622 (CVTTSS2SIrr GR32:$dst, FR32:$src), 0>;
1623 def : InstAlias<"cvttss2si{l}\t{$src, $dst|$dst, $src}",
1624 (CVTTSS2SIrm GR32:$dst, f32mem:$src), 0>;
1625 def : InstAlias<"cvttsd2si{l}\t{$src, $dst|$dst, $src}",
1626 (CVTTSD2SIrr GR32:$dst, FR64:$src), 0>;
1627 def : InstAlias<"cvttsd2si{l}\t{$src, $dst|$dst, $src}",
1628 (CVTTSD2SIrm GR32:$dst, f64mem:$src), 0>;
1629 def : InstAlias<"cvttss2si{q}\t{$src, $dst|$dst, $src}",
1630 (CVTTSS2SI64rr GR64:$dst, FR32:$src), 0>;
1631 def : InstAlias<"cvttss2si{q}\t{$src, $dst|$dst, $src}",
1632 (CVTTSS2SI64rm GR64:$dst, f32mem:$src), 0>;
1633 def : InstAlias<"cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1634 (CVTTSD2SI64rr GR64:$dst, FR64:$src), 0>;
1635 def : InstAlias<"cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1636 (CVTTSD2SI64rm GR64:$dst, f64mem:$src), 0>;
1638 def : InstAlias<"cvtsi2ss\t{$src, $dst|$dst, $src}",
1639 (CVTSI2SSrm FR64:$dst, i32mem:$src), 0>;
1640 def : InstAlias<"cvtsi2sd\t{$src, $dst|$dst, $src}",
1641 (CVTSI2SDrm FR64:$dst, i32mem:$src), 0>;
1643 // Conversion Instructions Intrinsics - Match intrinsics which expect MM
1644 // and/or XMM operand(s).
1646 multiclass sse12_cvt_sint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
1647 Intrinsic Int, Operand memop, ComplexPattern mem_cpat,
1648 string asm, OpndItins itins> {
1649 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
1650 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
1651 [(set DstRC:$dst, (Int SrcRC:$src))], itins.rr>,
1652 Sched<[itins.Sched]>;
1653 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins memop:$src),
1654 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
1655 [(set DstRC:$dst, (Int mem_cpat:$src))], itins.rm>,
1656 Sched<[itins.Sched.Folded]>;
1659 multiclass sse12_cvt_sint_3addr<bits<8> opc, RegisterClass SrcRC,
1660 RegisterClass DstRC, Intrinsic Int, X86MemOperand x86memop,
1661 PatFrag ld_frag, string asm, OpndItins itins,
1663 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src2),
1665 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
1666 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
1667 [(set DstRC:$dst, (Int DstRC:$src1, SrcRC:$src2))],
1668 itins.rr>, Sched<[itins.Sched]>;
1669 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
1670 (ins DstRC:$src1, x86memop:$src2),
1672 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
1673 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
1674 [(set DstRC:$dst, (Int DstRC:$src1, (ld_frag addr:$src2)))],
1675 itins.rm>, Sched<[itins.Sched.Folded, ReadAfterLd]>;
1678 let Predicates = [UseAVX] in {
1679 defm VCVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32,
1680 int_x86_sse2_cvtsd2si, sdmem, sse_load_f64, "cvtsd2si",
1681 SSE_CVT_SD2SI>, XD, VEX, VEX_LIG;
1682 defm VCVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64,
1683 int_x86_sse2_cvtsd2si64, sdmem, sse_load_f64, "cvtsd2si",
1684 SSE_CVT_SD2SI>, XD, VEX, VEX_W, VEX_LIG;
1686 defm CVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
1687 sdmem, sse_load_f64, "cvtsd2si", SSE_CVT_SD2SI>, XD;
1688 defm CVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse2_cvtsd2si64,
1689 sdmem, sse_load_f64, "cvtsd2si", SSE_CVT_SD2SI>, XD, REX_W;
1692 let isCodeGenOnly = 1 in {
1693 let Predicates = [UseAVX] in {
1694 defm Int_VCVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
1695 int_x86_sse_cvtsi2ss, i32mem, loadi32, "cvtsi2ss{l}",
1696 SSE_CVT_Scalar, 0>, XS, VEX_4V;
1697 defm Int_VCVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
1698 int_x86_sse_cvtsi642ss, i64mem, loadi64, "cvtsi2ss{q}",
1699 SSE_CVT_Scalar, 0>, XS, VEX_4V,
1701 defm Int_VCVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
1702 int_x86_sse2_cvtsi2sd, i32mem, loadi32, "cvtsi2sd{l}",
1703 SSE_CVT_Scalar, 0>, XD, VEX_4V;
1704 defm Int_VCVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
1705 int_x86_sse2_cvtsi642sd, i64mem, loadi64, "cvtsi2sd{q}",
1706 SSE_CVT_Scalar, 0>, XD,
1709 let Constraints = "$src1 = $dst" in {
1710 defm Int_CVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
1711 int_x86_sse_cvtsi2ss, i32mem, loadi32,
1712 "cvtsi2ss{l}", SSE_CVT_Scalar>, XS;
1713 defm Int_CVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
1714 int_x86_sse_cvtsi642ss, i64mem, loadi64,
1715 "cvtsi2ss{q}", SSE_CVT_Scalar>, XS, REX_W;
1716 defm Int_CVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
1717 int_x86_sse2_cvtsi2sd, i32mem, loadi32,
1718 "cvtsi2sd{l}", SSE_CVT_Scalar>, XD;
1719 defm Int_CVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
1720 int_x86_sse2_cvtsi642sd, i64mem, loadi64,
1721 "cvtsi2sd{q}", SSE_CVT_Scalar>, XD, REX_W;
1723 } // isCodeGenOnly = 1
1727 // Aliases for intrinsics
1728 let isCodeGenOnly = 1 in {
1729 let Predicates = [UseAVX] in {
1730 defm Int_VCVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
1731 ssmem, sse_load_f32, "cvttss2si",
1732 SSE_CVT_SS2SI_32>, XS, VEX;
1733 defm Int_VCVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
1734 int_x86_sse_cvttss2si64, ssmem, sse_load_f32,
1735 "cvttss2si", SSE_CVT_SS2SI_64>,
1737 defm Int_VCVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
1738 sdmem, sse_load_f64, "cvttsd2si",
1739 SSE_CVT_SD2SI>, XD, VEX;
1740 defm Int_VCVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
1741 int_x86_sse2_cvttsd2si64, sdmem, sse_load_f64,
1742 "cvttsd2si", SSE_CVT_SD2SI>,
1745 defm Int_CVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
1746 ssmem, sse_load_f32, "cvttss2si",
1747 SSE_CVT_SS2SI_32>, XS;
1748 defm Int_CVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
1749 int_x86_sse_cvttss2si64, ssmem, sse_load_f32,
1750 "cvttss2si", SSE_CVT_SS2SI_64>, XS, REX_W;
1751 defm Int_CVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
1752 sdmem, sse_load_f64, "cvttsd2si",
1754 defm Int_CVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
1755 int_x86_sse2_cvttsd2si64, sdmem, sse_load_f64,
1756 "cvttsd2si", SSE_CVT_SD2SI>, XD, REX_W;
1757 } // isCodeGenOnly = 1
1759 let Predicates = [UseAVX] in {
1760 defm VCVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
1761 ssmem, sse_load_f32, "cvtss2si",
1762 SSE_CVT_SS2SI_32>, XS, VEX, VEX_LIG;
1763 defm VCVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse_cvtss2si64,
1764 ssmem, sse_load_f32, "cvtss2si",
1765 SSE_CVT_SS2SI_64>, XS, VEX, VEX_W, VEX_LIG;
1767 defm CVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
1768 ssmem, sse_load_f32, "cvtss2si",
1769 SSE_CVT_SS2SI_32>, XS;
1770 defm CVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse_cvtss2si64,
1771 ssmem, sse_load_f32, "cvtss2si",
1772 SSE_CVT_SS2SI_64>, XS, REX_W;
1774 defm VCVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, i128mem,
1775 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
1776 SSEPackedSingle, SSE_CVT_PS>,
1777 PS, VEX, Requires<[HasAVX]>;
1778 defm VCVTDQ2PSY : sse12_cvt_p<0x5B, VR256, VR256, i256mem,
1779 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
1780 SSEPackedSingle, SSE_CVT_PS>,
1781 PS, VEX, VEX_L, Requires<[HasAVX]>;
1783 defm CVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, i128mem,
1784 "cvtdq2ps\t{$src, $dst|$dst, $src}",
1785 SSEPackedSingle, SSE_CVT_PS>,
1786 PS, Requires<[UseSSE2]>;
1788 let Predicates = [UseAVX] in {
1789 def : InstAlias<"vcvtss2si{l}\t{$src, $dst|$dst, $src}",
1790 (VCVTSS2SIrr GR32:$dst, VR128:$src), 0>;
1791 def : InstAlias<"vcvtss2si{l}\t{$src, $dst|$dst, $src}",
1792 (VCVTSS2SIrm GR32:$dst, ssmem:$src), 0>;
1793 def : InstAlias<"vcvtsd2si{l}\t{$src, $dst|$dst, $src}",
1794 (VCVTSD2SIrr GR32:$dst, VR128:$src), 0>;
1795 def : InstAlias<"vcvtsd2si{l}\t{$src, $dst|$dst, $src}",
1796 (VCVTSD2SIrm GR32:$dst, sdmem:$src), 0>;
1797 def : InstAlias<"vcvtss2si{q}\t{$src, $dst|$dst, $src}",
1798 (VCVTSS2SI64rr GR64:$dst, VR128:$src), 0>;
1799 def : InstAlias<"vcvtss2si{q}\t{$src, $dst|$dst, $src}",
1800 (VCVTSS2SI64rm GR64:$dst, ssmem:$src), 0>;
1801 def : InstAlias<"vcvtsd2si{q}\t{$src, $dst|$dst, $src}",
1802 (VCVTSD2SI64rr GR64:$dst, VR128:$src), 0>;
1803 def : InstAlias<"vcvtsd2si{q}\t{$src, $dst|$dst, $src}",
1804 (VCVTSD2SI64rm GR64:$dst, sdmem:$src), 0>;
1807 def : InstAlias<"cvtss2si{l}\t{$src, $dst|$dst, $src}",
1808 (CVTSS2SIrr GR32:$dst, VR128:$src), 0>;
1809 def : InstAlias<"cvtss2si{l}\t{$src, $dst|$dst, $src}",
1810 (CVTSS2SIrm GR32:$dst, ssmem:$src), 0>;
1811 def : InstAlias<"cvtsd2si{l}\t{$src, $dst|$dst, $src}",
1812 (CVTSD2SIrr GR32:$dst, VR128:$src), 0>;
1813 def : InstAlias<"cvtsd2si{l}\t{$src, $dst|$dst, $src}",
1814 (CVTSD2SIrm GR32:$dst, sdmem:$src), 0>;
1815 def : InstAlias<"cvtss2si{q}\t{$src, $dst|$dst, $src}",
1816 (CVTSS2SI64rr GR64:$dst, VR128:$src), 0>;
1817 def : InstAlias<"cvtss2si{q}\t{$src, $dst|$dst, $src}",
1818 (CVTSS2SI64rm GR64:$dst, ssmem:$src), 0>;
1819 def : InstAlias<"cvtsd2si{q}\t{$src, $dst|$dst, $src}",
1820 (CVTSD2SI64rr GR64:$dst, VR128:$src), 0>;
1821 def : InstAlias<"cvtsd2si{q}\t{$src, $dst|$dst, $src}",
1822 (CVTSD2SI64rm GR64:$dst, sdmem:$src)>;
1826 // Convert scalar double to scalar single
1827 let hasSideEffects = 0, Predicates = [UseAVX] in {
1828 def VCVTSD2SSrr : VSDI<0x5A, MRMSrcReg, (outs FR32:$dst),
1829 (ins FR64:$src1, FR64:$src2),
1830 "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", [],
1831 IIC_SSE_CVT_Scalar_RR>, VEX_4V, VEX_LIG,
1832 Sched<[WriteCvtF2F]>;
1834 def VCVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst),
1835 (ins FR64:$src1, f64mem:$src2),
1836 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1837 [], IIC_SSE_CVT_Scalar_RM>,
1838 XD, Requires<[HasAVX, OptForSize]>, VEX_4V, VEX_LIG,
1839 Sched<[WriteCvtF2FLd, ReadAfterLd]>;
1842 def : Pat<(f32 (fround FR64:$src)), (VCVTSD2SSrr FR64:$src, FR64:$src)>,
1845 def CVTSD2SSrr : SDI<0x5A, MRMSrcReg, (outs FR32:$dst), (ins FR64:$src),
1846 "cvtsd2ss\t{$src, $dst|$dst, $src}",
1847 [(set FR32:$dst, (fround FR64:$src))],
1848 IIC_SSE_CVT_Scalar_RR>, Sched<[WriteCvtF2F]>;
1849 def CVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src),
1850 "cvtsd2ss\t{$src, $dst|$dst, $src}",
1851 [(set FR32:$dst, (fround (loadf64 addr:$src)))],
1852 IIC_SSE_CVT_Scalar_RM>,
1854 Requires<[UseSSE2, OptForSize]>, Sched<[WriteCvtF2FLd]>;
1856 let isCodeGenOnly = 1 in {
1857 def Int_VCVTSD2SSrr: I<0x5A, MRMSrcReg,
1858 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1859 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1861 (int_x86_sse2_cvtsd2ss VR128:$src1, VR128:$src2))],
1862 IIC_SSE_CVT_Scalar_RR>, XD, VEX_4V, Requires<[UseAVX]>,
1863 Sched<[WriteCvtF2F]>;
1864 def Int_VCVTSD2SSrm: I<0x5A, MRMSrcReg,
1865 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2),
1866 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1867 [(set VR128:$dst, (int_x86_sse2_cvtsd2ss
1868 VR128:$src1, sse_load_f64:$src2))],
1869 IIC_SSE_CVT_Scalar_RM>, XD, VEX_4V, Requires<[UseAVX]>,
1870 Sched<[WriteCvtF2FLd, ReadAfterLd]>;
1872 let Constraints = "$src1 = $dst" in {
1873 def Int_CVTSD2SSrr: I<0x5A, MRMSrcReg,
1874 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1875 "cvtsd2ss\t{$src2, $dst|$dst, $src2}",
1877 (int_x86_sse2_cvtsd2ss VR128:$src1, VR128:$src2))],
1878 IIC_SSE_CVT_Scalar_RR>, XD, Requires<[UseSSE2]>,
1879 Sched<[WriteCvtF2F]>;
1880 def Int_CVTSD2SSrm: I<0x5A, MRMSrcReg,
1881 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2),
1882 "cvtsd2ss\t{$src2, $dst|$dst, $src2}",
1883 [(set VR128:$dst, (int_x86_sse2_cvtsd2ss
1884 VR128:$src1, sse_load_f64:$src2))],
1885 IIC_SSE_CVT_Scalar_RM>, XD, Requires<[UseSSE2]>,
1886 Sched<[WriteCvtF2FLd, ReadAfterLd]>;
1888 } // isCodeGenOnly = 1
1890 // Convert scalar single to scalar double
1891 // SSE2 instructions with XS prefix
1892 let hasSideEffects = 0, Predicates = [UseAVX] in {
1893 def VCVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst),
1894 (ins FR32:$src1, FR32:$src2),
1895 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1896 [], IIC_SSE_CVT_Scalar_RR>,
1897 XS, Requires<[HasAVX]>, VEX_4V, VEX_LIG,
1898 Sched<[WriteCvtF2F]>;
1900 def VCVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst),
1901 (ins FR32:$src1, f32mem:$src2),
1902 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1903 [], IIC_SSE_CVT_Scalar_RM>,
1904 XS, VEX_4V, VEX_LIG, Requires<[HasAVX, OptForSize]>,
1905 Sched<[WriteCvtF2FLd, ReadAfterLd]>;
1908 def : Pat<(f64 (fextend FR32:$src)),
1909 (VCVTSS2SDrr FR32:$src, FR32:$src)>, Requires<[UseAVX]>;
1910 def : Pat<(fextend (loadf32 addr:$src)),
1911 (VCVTSS2SDrm (f32 (IMPLICIT_DEF)), addr:$src)>, Requires<[UseAVX]>;
1913 def : Pat<(extloadf32 addr:$src),
1914 (VCVTSS2SDrm (f32 (IMPLICIT_DEF)), addr:$src)>,
1915 Requires<[UseAVX, OptForSize]>;
1916 def : Pat<(extloadf32 addr:$src),
1917 (VCVTSS2SDrr (f32 (IMPLICIT_DEF)), (VMOVSSrm addr:$src))>,
1918 Requires<[UseAVX, OptForSpeed]>;
1920 def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src),
1921 "cvtss2sd\t{$src, $dst|$dst, $src}",
1922 [(set FR64:$dst, (fextend FR32:$src))],
1923 IIC_SSE_CVT_Scalar_RR>, XS,
1924 Requires<[UseSSE2]>, Sched<[WriteCvtF2F]>;
1925 def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src),
1926 "cvtss2sd\t{$src, $dst|$dst, $src}",
1927 [(set FR64:$dst, (extloadf32 addr:$src))],
1928 IIC_SSE_CVT_Scalar_RM>, XS,
1929 Requires<[UseSSE2, OptForSize]>, Sched<[WriteCvtF2FLd]>;
1931 // extload f32 -> f64. This matches load+fextend because we have a hack in
1932 // the isel (PreprocessForFPConvert) that can introduce loads after dag
1934 // Since these loads aren't folded into the fextend, we have to match it
1936 def : Pat<(fextend (loadf32 addr:$src)),
1937 (CVTSS2SDrm addr:$src)>, Requires<[UseSSE2]>;
1938 def : Pat<(extloadf32 addr:$src),
1939 (CVTSS2SDrr (MOVSSrm addr:$src))>, Requires<[UseSSE2, OptForSpeed]>;
1941 let isCodeGenOnly = 1 in {
1942 def Int_VCVTSS2SDrr: I<0x5A, MRMSrcReg,
1943 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1944 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1946 (int_x86_sse2_cvtss2sd VR128:$src1, VR128:$src2))],
1947 IIC_SSE_CVT_Scalar_RR>, XS, VEX_4V, Requires<[UseAVX]>,
1948 Sched<[WriteCvtF2F]>;
1949 def Int_VCVTSS2SDrm: I<0x5A, MRMSrcMem,
1950 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2),
1951 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1953 (int_x86_sse2_cvtss2sd VR128:$src1, sse_load_f32:$src2))],
1954 IIC_SSE_CVT_Scalar_RM>, XS, VEX_4V, Requires<[UseAVX]>,
1955 Sched<[WriteCvtF2FLd, ReadAfterLd]>;
1956 let Constraints = "$src1 = $dst" in { // SSE2 instructions with XS prefix
1957 def Int_CVTSS2SDrr: I<0x5A, MRMSrcReg,
1958 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1959 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
1961 (int_x86_sse2_cvtss2sd VR128:$src1, VR128:$src2))],
1962 IIC_SSE_CVT_Scalar_RR>, XS, Requires<[UseSSE2]>,
1963 Sched<[WriteCvtF2F]>;
1964 def Int_CVTSS2SDrm: I<0x5A, MRMSrcMem,
1965 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2),
1966 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
1968 (int_x86_sse2_cvtss2sd VR128:$src1, sse_load_f32:$src2))],
1969 IIC_SSE_CVT_Scalar_RM>, XS, Requires<[UseSSE2]>,
1970 Sched<[WriteCvtF2FLd, ReadAfterLd]>;
1972 } // isCodeGenOnly = 1
1974 // Convert packed single/double fp to doubleword
1975 def VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1976 "cvtps2dq\t{$src, $dst|$dst, $src}",
1977 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))],
1978 IIC_SSE_CVT_PS_RR>, VEX, Sched<[WriteCvtF2I]>;
1979 def VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1980 "cvtps2dq\t{$src, $dst|$dst, $src}",
1982 (int_x86_sse2_cvtps2dq (loadv4f32 addr:$src)))],
1983 IIC_SSE_CVT_PS_RM>, VEX, Sched<[WriteCvtF2ILd]>;
1984 def VCVTPS2DQYrr : VPDI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1985 "cvtps2dq\t{$src, $dst|$dst, $src}",
1987 (int_x86_avx_cvt_ps2dq_256 VR256:$src))],
1988 IIC_SSE_CVT_PS_RR>, VEX, VEX_L, Sched<[WriteCvtF2I]>;
1989 def VCVTPS2DQYrm : VPDI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1990 "cvtps2dq\t{$src, $dst|$dst, $src}",
1992 (int_x86_avx_cvt_ps2dq_256 (loadv8f32 addr:$src)))],
1993 IIC_SSE_CVT_PS_RM>, VEX, VEX_L, Sched<[WriteCvtF2ILd]>;
1994 def CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1995 "cvtps2dq\t{$src, $dst|$dst, $src}",
1996 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))],
1997 IIC_SSE_CVT_PS_RR>, Sched<[WriteCvtF2I]>;
1998 def CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1999 "cvtps2dq\t{$src, $dst|$dst, $src}",
2001 (int_x86_sse2_cvtps2dq (memopv4f32 addr:$src)))],
2002 IIC_SSE_CVT_PS_RM>, Sched<[WriteCvtF2ILd]>;
2005 // Convert Packed Double FP to Packed DW Integers
2006 let Predicates = [HasAVX] in {
2007 // The assembler can recognize rr 256-bit instructions by seeing a ymm
2008 // register, but the same isn't true when using memory operands instead.
2009 // Provide other assembly rr and rm forms to address this explicitly.
2010 def VCVTPD2DQrr : SDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2011 "vcvtpd2dq\t{$src, $dst|$dst, $src}",
2012 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
2013 VEX, Sched<[WriteCvtF2I]>;
2016 def : InstAlias<"vcvtpd2dqx\t{$src, $dst|$dst, $src}",
2017 (VCVTPD2DQrr VR128:$dst, VR128:$src), 0>;
2018 def VCVTPD2DQXrm : SDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2019 "vcvtpd2dqx\t{$src, $dst|$dst, $src}",
2021 (int_x86_sse2_cvtpd2dq (loadv2f64 addr:$src)))]>, VEX,
2022 Sched<[WriteCvtF2ILd]>;
2025 def VCVTPD2DQYrr : SDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
2026 "vcvtpd2dq{y}\t{$src, $dst|$dst, $src}",
2028 (int_x86_avx_cvt_pd2dq_256 VR256:$src))]>, VEX, VEX_L,
2029 Sched<[WriteCvtF2I]>;
2030 def VCVTPD2DQYrm : SDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
2031 "vcvtpd2dq{y}\t{$src, $dst|$dst, $src}",
2033 (int_x86_avx_cvt_pd2dq_256 (loadv4f64 addr:$src)))]>,
2034 VEX, VEX_L, Sched<[WriteCvtF2ILd]>;
2035 def : InstAlias<"vcvtpd2dq\t{$src, $dst|$dst, $src}",
2036 (VCVTPD2DQYrr VR128:$dst, VR256:$src), 0>;
2039 def CVTPD2DQrm : SDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2040 "cvtpd2dq\t{$src, $dst|$dst, $src}",
2042 (int_x86_sse2_cvtpd2dq (memopv2f64 addr:$src)))],
2043 IIC_SSE_CVT_PD_RM>, Sched<[WriteCvtF2ILd]>;
2044 def CVTPD2DQrr : SDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2045 "cvtpd2dq\t{$src, $dst|$dst, $src}",
2046 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))],
2047 IIC_SSE_CVT_PD_RR>, Sched<[WriteCvtF2I]>;
2049 // Convert with truncation packed single/double fp to doubleword
2050 // SSE2 packed instructions with XS prefix
2051 def VCVTTPS2DQrr : VS2SI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2052 "cvttps2dq\t{$src, $dst|$dst, $src}",
2054 (int_x86_sse2_cvttps2dq VR128:$src))],
2055 IIC_SSE_CVT_PS_RR>, VEX, Sched<[WriteCvtF2I]>;
2056 def VCVTTPS2DQrm : VS2SI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2057 "cvttps2dq\t{$src, $dst|$dst, $src}",
2058 [(set VR128:$dst, (int_x86_sse2_cvttps2dq
2059 (loadv4f32 addr:$src)))],
2060 IIC_SSE_CVT_PS_RM>, VEX, Sched<[WriteCvtF2ILd]>;
2061 def VCVTTPS2DQYrr : VS2SI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2062 "cvttps2dq\t{$src, $dst|$dst, $src}",
2064 (int_x86_avx_cvtt_ps2dq_256 VR256:$src))],
2065 IIC_SSE_CVT_PS_RR>, VEX, VEX_L, Sched<[WriteCvtF2I]>;
2066 def VCVTTPS2DQYrm : VS2SI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
2067 "cvttps2dq\t{$src, $dst|$dst, $src}",
2068 [(set VR256:$dst, (int_x86_avx_cvtt_ps2dq_256
2069 (loadv8f32 addr:$src)))],
2070 IIC_SSE_CVT_PS_RM>, VEX, VEX_L,
2071 Sched<[WriteCvtF2ILd]>;
2073 def CVTTPS2DQrr : S2SI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2074 "cvttps2dq\t{$src, $dst|$dst, $src}",
2075 [(set VR128:$dst, (int_x86_sse2_cvttps2dq VR128:$src))],
2076 IIC_SSE_CVT_PS_RR>, Sched<[WriteCvtF2I]>;
2077 def CVTTPS2DQrm : S2SI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2078 "cvttps2dq\t{$src, $dst|$dst, $src}",
2080 (int_x86_sse2_cvttps2dq (memopv4f32 addr:$src)))],
2081 IIC_SSE_CVT_PS_RM>, Sched<[WriteCvtF2ILd]>;
2083 let Predicates = [HasAVX] in {
2084 def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
2085 (VCVTDQ2PSrr VR128:$src)>;
2086 def : Pat<(v4f32 (sint_to_fp (bc_v4i32 (loadv2i64 addr:$src)))),
2087 (VCVTDQ2PSrm addr:$src)>;
2089 def : Pat<(int_x86_sse2_cvtdq2ps VR128:$src),
2090 (VCVTDQ2PSrr VR128:$src)>;
2091 def : Pat<(int_x86_sse2_cvtdq2ps (bc_v4i32 (loadv2i64 addr:$src))),
2092 (VCVTDQ2PSrm addr:$src)>;
2094 def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
2095 (VCVTTPS2DQrr VR128:$src)>;
2096 def : Pat<(v4i32 (fp_to_sint (loadv4f32 addr:$src))),
2097 (VCVTTPS2DQrm addr:$src)>;
2099 def : Pat<(v8f32 (sint_to_fp (v8i32 VR256:$src))),
2100 (VCVTDQ2PSYrr VR256:$src)>;
2101 def : Pat<(v8f32 (sint_to_fp (bc_v8i32 (loadv4i64 addr:$src)))),
2102 (VCVTDQ2PSYrm addr:$src)>;
2104 def : Pat<(v8i32 (fp_to_sint (v8f32 VR256:$src))),
2105 (VCVTTPS2DQYrr VR256:$src)>;
2106 def : Pat<(v8i32 (fp_to_sint (loadv8f32 addr:$src))),
2107 (VCVTTPS2DQYrm addr:$src)>;
2110 let Predicates = [UseSSE2] in {
2111 def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
2112 (CVTDQ2PSrr VR128:$src)>;
2113 def : Pat<(v4f32 (sint_to_fp (bc_v4i32 (memopv2i64 addr:$src)))),
2114 (CVTDQ2PSrm addr:$src)>;
2116 def : Pat<(int_x86_sse2_cvtdq2ps VR128:$src),
2117 (CVTDQ2PSrr VR128:$src)>;
2118 def : Pat<(int_x86_sse2_cvtdq2ps (bc_v4i32 (memopv2i64 addr:$src))),
2119 (CVTDQ2PSrm addr:$src)>;
2121 def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
2122 (CVTTPS2DQrr VR128:$src)>;
2123 def : Pat<(v4i32 (fp_to_sint (memopv4f32 addr:$src))),
2124 (CVTTPS2DQrm addr:$src)>;
2127 def VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2128 "cvttpd2dq\t{$src, $dst|$dst, $src}",
2130 (int_x86_sse2_cvttpd2dq VR128:$src))],
2131 IIC_SSE_CVT_PD_RR>, VEX, Sched<[WriteCvtF2I]>;
2133 // The assembler can recognize rr 256-bit instructions by seeing a ymm
2134 // register, but the same isn't true when using memory operands instead.
2135 // Provide other assembly rr and rm forms to address this explicitly.
2138 def : InstAlias<"vcvttpd2dqx\t{$src, $dst|$dst, $src}",
2139 (VCVTTPD2DQrr VR128:$dst, VR128:$src), 0>;
2140 def VCVTTPD2DQXrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2141 "cvttpd2dqx\t{$src, $dst|$dst, $src}",
2142 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
2143 (loadv2f64 addr:$src)))],
2144 IIC_SSE_CVT_PD_RM>, VEX, Sched<[WriteCvtF2ILd]>;
2147 def VCVTTPD2DQYrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
2148 "cvttpd2dq{y}\t{$src, $dst|$dst, $src}",
2150 (int_x86_avx_cvtt_pd2dq_256 VR256:$src))],
2151 IIC_SSE_CVT_PD_RR>, VEX, VEX_L, Sched<[WriteCvtF2I]>;
2152 def VCVTTPD2DQYrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
2153 "cvttpd2dq{y}\t{$src, $dst|$dst, $src}",
2155 (int_x86_avx_cvtt_pd2dq_256 (loadv4f64 addr:$src)))],
2156 IIC_SSE_CVT_PD_RM>, VEX, VEX_L, Sched<[WriteCvtF2ILd]>;
2157 def : InstAlias<"vcvttpd2dq\t{$src, $dst|$dst, $src}",
2158 (VCVTTPD2DQYrr VR128:$dst, VR256:$src), 0>;
2160 let Predicates = [HasAVX] in {
2161 def : Pat<(v4i32 (fp_to_sint (v4f64 VR256:$src))),
2162 (VCVTTPD2DQYrr VR256:$src)>;
2163 def : Pat<(v4i32 (fp_to_sint (loadv4f64 addr:$src))),
2164 (VCVTTPD2DQYrm addr:$src)>;
2165 } // Predicates = [HasAVX]
2167 def CVTTPD2DQrr : PDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2168 "cvttpd2dq\t{$src, $dst|$dst, $src}",
2169 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))],
2170 IIC_SSE_CVT_PD_RR>, Sched<[WriteCvtF2I]>;
2171 def CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst),(ins f128mem:$src),
2172 "cvttpd2dq\t{$src, $dst|$dst, $src}",
2173 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
2174 (memopv2f64 addr:$src)))],
2176 Sched<[WriteCvtF2ILd]>;
2178 // Convert packed single to packed double
2179 let Predicates = [HasAVX] in {
2180 // SSE2 instructions without OpSize prefix
2181 def VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2182 "vcvtps2pd\t{$src, $dst|$dst, $src}",
2183 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))],
2184 IIC_SSE_CVT_PD_RR>, PS, VEX, Sched<[WriteCvtF2F]>;
2185 def VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
2186 "vcvtps2pd\t{$src, $dst|$dst, $src}",
2187 [(set VR128:$dst, (v2f64 (extloadv2f32 addr:$src)))],
2188 IIC_SSE_CVT_PD_RM>, PS, VEX, Sched<[WriteCvtF2FLd]>;
2189 def VCVTPS2PDYrr : I<0x5A, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
2190 "vcvtps2pd\t{$src, $dst|$dst, $src}",
2192 (int_x86_avx_cvt_ps2_pd_256 VR128:$src))],
2193 IIC_SSE_CVT_PD_RR>, PS, VEX, VEX_L, Sched<[WriteCvtF2F]>;
2194 def VCVTPS2PDYrm : I<0x5A, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
2195 "vcvtps2pd\t{$src, $dst|$dst, $src}",
2197 (int_x86_avx_cvt_ps2_pd_256 (loadv4f32 addr:$src)))],
2198 IIC_SSE_CVT_PD_RM>, PS, VEX, VEX_L, Sched<[WriteCvtF2FLd]>;
2201 let Predicates = [UseSSE2] in {
2202 def CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2203 "cvtps2pd\t{$src, $dst|$dst, $src}",
2204 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))],
2205 IIC_SSE_CVT_PD_RR>, PS, Sched<[WriteCvtF2F]>;
2206 def CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
2207 "cvtps2pd\t{$src, $dst|$dst, $src}",
2208 [(set VR128:$dst, (v2f64 (extloadv2f32 addr:$src)))],
2209 IIC_SSE_CVT_PD_RM>, PS, Sched<[WriteCvtF2FLd]>;
2212 // Convert Packed DW Integers to Packed Double FP
2213 let Predicates = [HasAVX] in {
2214 let hasSideEffects = 0, mayLoad = 1 in
2215 def VCVTDQ2PDrm : S2SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
2216 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
2217 []>, VEX, Sched<[WriteCvtI2FLd]>;
2218 def VCVTDQ2PDrr : S2SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2219 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
2221 (int_x86_sse2_cvtdq2pd VR128:$src))]>, VEX,
2222 Sched<[WriteCvtI2F]>;
2223 def VCVTDQ2PDYrm : S2SI<0xE6, MRMSrcMem, (outs VR256:$dst), (ins i128mem:$src),
2224 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
2226 (int_x86_avx_cvtdq2_pd_256
2227 (bitconvert (loadv2i64 addr:$src))))]>, VEX, VEX_L,
2228 Sched<[WriteCvtI2FLd]>;
2229 def VCVTDQ2PDYrr : S2SI<0xE6, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
2230 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
2232 (int_x86_avx_cvtdq2_pd_256 VR128:$src))]>, VEX, VEX_L,
2233 Sched<[WriteCvtI2F]>;
2236 let hasSideEffects = 0, mayLoad = 1 in
2237 def CVTDQ2PDrm : S2SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
2238 "cvtdq2pd\t{$src, $dst|$dst, $src}", [],
2239 IIC_SSE_CVT_PD_RR>, Sched<[WriteCvtI2FLd]>;
2240 def CVTDQ2PDrr : S2SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2241 "cvtdq2pd\t{$src, $dst|$dst, $src}",
2242 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))],
2243 IIC_SSE_CVT_PD_RM>, Sched<[WriteCvtI2F]>;
2245 // AVX 256-bit register conversion intrinsics
2246 let Predicates = [HasAVX] in {
2247 def : Pat<(v4f64 (sint_to_fp (v4i32 VR128:$src))),
2248 (VCVTDQ2PDYrr VR128:$src)>;
2249 def : Pat<(v4f64 (sint_to_fp (bc_v4i32 (loadv2i64 addr:$src)))),
2250 (VCVTDQ2PDYrm addr:$src)>;
2251 } // Predicates = [HasAVX]
2253 // Convert packed double to packed single
2254 // The assembler can recognize rr 256-bit instructions by seeing a ymm
2255 // register, but the same isn't true when using memory operands instead.
2256 // Provide other assembly rr and rm forms to address this explicitly.
2257 def VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2258 "cvtpd2ps\t{$src, $dst|$dst, $src}",
2259 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))],
2260 IIC_SSE_CVT_PD_RR>, VEX, Sched<[WriteCvtF2F]>;
2263 def : InstAlias<"vcvtpd2psx\t{$src, $dst|$dst, $src}",
2264 (VCVTPD2PSrr VR128:$dst, VR128:$src), 0>;
2265 def VCVTPD2PSXrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2266 "cvtpd2psx\t{$src, $dst|$dst, $src}",
2268 (int_x86_sse2_cvtpd2ps (loadv2f64 addr:$src)))],
2269 IIC_SSE_CVT_PD_RM>, VEX, Sched<[WriteCvtF2FLd]>;
2272 def VCVTPD2PSYrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
2273 "cvtpd2ps{y}\t{$src, $dst|$dst, $src}",
2275 (int_x86_avx_cvt_pd2_ps_256 VR256:$src))],
2276 IIC_SSE_CVT_PD_RR>, VEX, VEX_L, Sched<[WriteCvtF2F]>;
2277 def VCVTPD2PSYrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
2278 "cvtpd2ps{y}\t{$src, $dst|$dst, $src}",
2280 (int_x86_avx_cvt_pd2_ps_256 (loadv4f64 addr:$src)))],
2281 IIC_SSE_CVT_PD_RM>, VEX, VEX_L, Sched<[WriteCvtF2FLd]>;
2282 def : InstAlias<"vcvtpd2ps\t{$src, $dst|$dst, $src}",
2283 (VCVTPD2PSYrr VR128:$dst, VR256:$src), 0>;
2285 def CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2286 "cvtpd2ps\t{$src, $dst|$dst, $src}",
2287 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))],
2288 IIC_SSE_CVT_PD_RR>, Sched<[WriteCvtF2F]>;
2289 def CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2290 "cvtpd2ps\t{$src, $dst|$dst, $src}",
2292 (int_x86_sse2_cvtpd2ps (memopv2f64 addr:$src)))],
2293 IIC_SSE_CVT_PD_RM>, Sched<[WriteCvtF2FLd]>;
2296 // AVX 256-bit register conversion intrinsics
2297 // FIXME: Migrate SSE conversion intrinsics matching to use patterns as below
2298 // whenever possible to avoid declaring two versions of each one.
2299 let Predicates = [HasAVX] in {
2300 def : Pat<(int_x86_avx_cvtdq2_ps_256 VR256:$src),
2301 (VCVTDQ2PSYrr VR256:$src)>;
2302 def : Pat<(int_x86_avx_cvtdq2_ps_256 (bitconvert (loadv4i64 addr:$src))),
2303 (VCVTDQ2PSYrm addr:$src)>;
2305 // Match fround and fextend for 128/256-bit conversions
2306 def : Pat<(v4f32 (X86vfpround (v2f64 VR128:$src))),
2307 (VCVTPD2PSrr VR128:$src)>;
2308 def : Pat<(v4f32 (X86vfpround (loadv2f64 addr:$src))),
2309 (VCVTPD2PSXrm addr:$src)>;
2310 def : Pat<(v4f32 (fround (v4f64 VR256:$src))),
2311 (VCVTPD2PSYrr VR256:$src)>;
2312 def : Pat<(v4f32 (fround (loadv4f64 addr:$src))),
2313 (VCVTPD2PSYrm addr:$src)>;
2315 def : Pat<(v2f64 (X86vfpext (v4f32 VR128:$src))),
2316 (VCVTPS2PDrr VR128:$src)>;
2317 def : Pat<(v4f64 (fextend (v4f32 VR128:$src))),
2318 (VCVTPS2PDYrr VR128:$src)>;
2319 def : Pat<(v4f64 (extloadv4f32 addr:$src)),
2320 (VCVTPS2PDYrm addr:$src)>;
2323 let Predicates = [UseSSE2] in {
2324 // Match fround and fextend for 128 conversions
2325 def : Pat<(v4f32 (X86vfpround (v2f64 VR128:$src))),
2326 (CVTPD2PSrr VR128:$src)>;
2327 def : Pat<(v4f32 (X86vfpround (memopv2f64 addr:$src))),
2328 (CVTPD2PSrm addr:$src)>;
2330 def : Pat<(v2f64 (X86vfpext (v4f32 VR128:$src))),
2331 (CVTPS2PDrr VR128:$src)>;
2334 //===----------------------------------------------------------------------===//
2335 // SSE 1 & 2 - Compare Instructions
2336 //===----------------------------------------------------------------------===//
2338 // sse12_cmp_scalar - sse 1 & 2 compare scalar instructions
2339 multiclass sse12_cmp_scalar<RegisterClass RC, X86MemOperand x86memop,
2340 Operand CC, SDNode OpNode, ValueType VT,
2341 PatFrag ld_frag, string asm, string asm_alt,
2342 OpndItins itins, ImmLeaf immLeaf> {
2343 def rr : SIi8<0xC2, MRMSrcReg,
2344 (outs RC:$dst), (ins RC:$src1, RC:$src2, CC:$cc), asm,
2345 [(set RC:$dst, (OpNode (VT RC:$src1), RC:$src2, immLeaf:$cc))],
2346 itins.rr>, Sched<[itins.Sched]>;
2347 def rm : SIi8<0xC2, MRMSrcMem,
2348 (outs RC:$dst), (ins RC:$src1, x86memop:$src2, CC:$cc), asm,
2349 [(set RC:$dst, (OpNode (VT RC:$src1),
2350 (ld_frag addr:$src2), immLeaf:$cc))],
2352 Sched<[itins.Sched.Folded, ReadAfterLd]>;
2354 // Accept explicit immediate argument form instead of comparison code.
2355 let isAsmParserOnly = 1, hasSideEffects = 0 in {
2356 def rr_alt : SIi8<0xC2, MRMSrcReg, (outs RC:$dst),
2357 (ins RC:$src1, RC:$src2, u8imm:$cc), asm_alt, [],
2358 IIC_SSE_ALU_F32S_RR>, Sched<[itins.Sched]>;
2360 def rm_alt : SIi8<0xC2, MRMSrcMem, (outs RC:$dst),
2361 (ins RC:$src1, x86memop:$src2, u8imm:$cc), asm_alt, [],
2362 IIC_SSE_ALU_F32S_RM>,
2363 Sched<[itins.Sched.Folded, ReadAfterLd]>;
2367 defm VCMPSS : sse12_cmp_scalar<FR32, f32mem, AVXCC, X86cmps, f32, loadf32,
2368 "cmp${cc}ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2369 "cmpss\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2370 SSE_ALU_F32S, i8immZExt5>, XS, VEX_4V, VEX_LIG;
2371 defm VCMPSD : sse12_cmp_scalar<FR64, f64mem, AVXCC, X86cmps, f64, loadf64,
2372 "cmp${cc}sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2373 "cmpsd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2374 SSE_ALU_F32S, i8immZExt5>, // same latency as 32 bit compare
2375 XD, VEX_4V, VEX_LIG;
2377 let Constraints = "$src1 = $dst" in {
2378 defm CMPSS : sse12_cmp_scalar<FR32, f32mem, SSECC, X86cmps, f32, loadf32,
2379 "cmp${cc}ss\t{$src2, $dst|$dst, $src2}",
2380 "cmpss\t{$cc, $src2, $dst|$dst, $src2, $cc}", SSE_ALU_F32S,
2382 defm CMPSD : sse12_cmp_scalar<FR64, f64mem, SSECC, X86cmps, f64, loadf64,
2383 "cmp${cc}sd\t{$src2, $dst|$dst, $src2}",
2384 "cmpsd\t{$cc, $src2, $dst|$dst, $src2, $cc}",
2385 SSE_ALU_F64S, i8immZExt3>, XD;
2388 multiclass sse12_cmp_scalar_int<X86MemOperand x86memop, Operand CC,
2389 Intrinsic Int, string asm, OpndItins itins,
2391 def rr : SIi8<0xC2, MRMSrcReg, (outs VR128:$dst),
2392 (ins VR128:$src1, VR128:$src, CC:$cc), asm,
2393 [(set VR128:$dst, (Int VR128:$src1,
2394 VR128:$src, immLeaf:$cc))],
2396 Sched<[itins.Sched]>;
2397 def rm : SIi8<0xC2, MRMSrcMem, (outs VR128:$dst),
2398 (ins VR128:$src1, x86memop:$src, CC:$cc), asm,
2399 [(set VR128:$dst, (Int VR128:$src1,
2400 (load addr:$src), immLeaf:$cc))],
2402 Sched<[itins.Sched.Folded, ReadAfterLd]>;
2405 let isCodeGenOnly = 1 in {
2406 // Aliases to match intrinsics which expect XMM operand(s).
2407 defm Int_VCMPSS : sse12_cmp_scalar_int<f32mem, AVXCC, int_x86_sse_cmp_ss,
2408 "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}",
2409 SSE_ALU_F32S, i8immZExt5>,
2411 defm Int_VCMPSD : sse12_cmp_scalar_int<f64mem, AVXCC, int_x86_sse2_cmp_sd,
2412 "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}",
2413 SSE_ALU_F32S, i8immZExt5>, // same latency as f32
2415 let Constraints = "$src1 = $dst" in {
2416 defm Int_CMPSS : sse12_cmp_scalar_int<f32mem, SSECC, int_x86_sse_cmp_ss,
2417 "cmp${cc}ss\t{$src, $dst|$dst, $src}",
2418 SSE_ALU_F32S, i8immZExt3>, XS;
2419 defm Int_CMPSD : sse12_cmp_scalar_int<f64mem, SSECC, int_x86_sse2_cmp_sd,
2420 "cmp${cc}sd\t{$src, $dst|$dst, $src}",
2421 SSE_ALU_F64S, i8immZExt3>,
2427 // sse12_ord_cmp - Unordered/Ordered scalar fp compare and set EFLAGS
2428 multiclass sse12_ord_cmp<bits<8> opc, RegisterClass RC, SDNode OpNode,
2429 ValueType vt, X86MemOperand x86memop,
2430 PatFrag ld_frag, string OpcodeStr> {
2431 def rr: SI<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
2432 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
2433 [(set EFLAGS, (OpNode (vt RC:$src1), RC:$src2))],
2436 def rm: SI<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
2437 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
2438 [(set EFLAGS, (OpNode (vt RC:$src1),
2439 (ld_frag addr:$src2)))],
2441 Sched<[WriteFAddLd, ReadAfterLd]>;
2444 let Defs = [EFLAGS] in {
2445 defm VUCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
2446 "ucomiss">, PS, VEX, VEX_LIG;
2447 defm VUCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
2448 "ucomisd">, PD, VEX, VEX_LIG;
2449 let Pattern = []<dag> in {
2450 defm VCOMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
2451 "comiss">, PS, VEX, VEX_LIG;
2452 defm VCOMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
2453 "comisd">, PD, VEX, VEX_LIG;
2456 let isCodeGenOnly = 1 in {
2457 defm Int_VUCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
2458 load, "ucomiss">, PS, VEX;
2459 defm Int_VUCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
2460 load, "ucomisd">, PD, VEX;
2462 defm Int_VCOMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem,
2463 load, "comiss">, PS, VEX;
2464 defm Int_VCOMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem,
2465 load, "comisd">, PD, VEX;
2467 defm UCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
2469 defm UCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
2472 let Pattern = []<dag> in {
2473 defm COMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
2475 defm COMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
2479 let isCodeGenOnly = 1 in {
2480 defm Int_UCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
2481 load, "ucomiss">, PS;
2482 defm Int_UCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
2483 load, "ucomisd">, PD;
2485 defm Int_COMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem, load,
2487 defm Int_COMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem, load,
2490 } // Defs = [EFLAGS]
2492 // sse12_cmp_packed - sse 1 & 2 compare packed instructions
2493 multiclass sse12_cmp_packed<RegisterClass RC, X86MemOperand x86memop,
2494 Operand CC, Intrinsic Int, string asm,
2495 string asm_alt, Domain d, ImmLeaf immLeaf,
2496 PatFrag ld_frag, OpndItins itins = SSE_ALU_F32P> {
2497 let isCommutable = 1 in
2498 def rri : PIi8<0xC2, MRMSrcReg,
2499 (outs RC:$dst), (ins RC:$src1, RC:$src2, CC:$cc), asm,
2500 [(set RC:$dst, (Int RC:$src1, RC:$src2, immLeaf:$cc))],
2503 def rmi : PIi8<0xC2, MRMSrcMem,
2504 (outs RC:$dst), (ins RC:$src1, x86memop:$src2, CC:$cc), asm,
2505 [(set RC:$dst, (Int RC:$src1, (ld_frag addr:$src2), immLeaf:$cc))],
2507 Sched<[WriteFAddLd, ReadAfterLd]>;
2509 // Accept explicit immediate argument form instead of comparison code.
2510 let isAsmParserOnly = 1, hasSideEffects = 0 in {
2511 def rri_alt : PIi8<0xC2, MRMSrcReg,
2512 (outs RC:$dst), (ins RC:$src1, RC:$src2, u8imm:$cc),
2513 asm_alt, [], itins.rr, d>, Sched<[WriteFAdd]>;
2515 def rmi_alt : PIi8<0xC2, MRMSrcMem,
2516 (outs RC:$dst), (ins RC:$src1, x86memop:$src2, u8imm:$cc),
2517 asm_alt, [], itins.rm, d>,
2518 Sched<[WriteFAddLd, ReadAfterLd]>;
2522 defm VCMPPS : sse12_cmp_packed<VR128, f128mem, AVXCC, int_x86_sse_cmp_ps,
2523 "cmp${cc}ps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2524 "cmpps\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2525 SSEPackedSingle, i8immZExt5, loadv4f32>, PS, VEX_4V;
2526 defm VCMPPD : sse12_cmp_packed<VR128, f128mem, AVXCC, int_x86_sse2_cmp_pd,
2527 "cmp${cc}pd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2528 "cmppd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2529 SSEPackedDouble, i8immZExt5, loadv2f64>, PD, VEX_4V;
2530 defm VCMPPSY : sse12_cmp_packed<VR256, f256mem, AVXCC, int_x86_avx_cmp_ps_256,
2531 "cmp${cc}ps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2532 "cmpps\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2533 SSEPackedSingle, i8immZExt5, loadv8f32>, PS, VEX_4V, VEX_L;
2534 defm VCMPPDY : sse12_cmp_packed<VR256, f256mem, AVXCC, int_x86_avx_cmp_pd_256,
2535 "cmp${cc}pd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2536 "cmppd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2537 SSEPackedDouble, i8immZExt5, loadv4f64>, PD, VEX_4V, VEX_L;
2538 let Constraints = "$src1 = $dst" in {
2539 defm CMPPS : sse12_cmp_packed<VR128, f128mem, SSECC, int_x86_sse_cmp_ps,
2540 "cmp${cc}ps\t{$src2, $dst|$dst, $src2}",
2541 "cmpps\t{$cc, $src2, $dst|$dst, $src2, $cc}",
2542 SSEPackedSingle, i8immZExt5, memopv4f32, SSE_ALU_F32P>, PS;
2543 defm CMPPD : sse12_cmp_packed<VR128, f128mem, SSECC, int_x86_sse2_cmp_pd,
2544 "cmp${cc}pd\t{$src2, $dst|$dst, $src2}",
2545 "cmppd\t{$cc, $src2, $dst|$dst, $src2, $cc}",
2546 SSEPackedDouble, i8immZExt5, memopv2f64, SSE_ALU_F64P>, PD;
2549 let Predicates = [HasAVX] in {
2550 def : Pat<(v4i32 (X86cmpp (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
2551 (VCMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
2552 def : Pat<(v4i32 (X86cmpp (v4f32 VR128:$src1), (loadv4f32 addr:$src2), imm:$cc)),
2553 (VCMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
2554 def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
2555 (VCMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
2556 def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), (loadv2f64 addr:$src2), imm:$cc)),
2557 (VCMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
2559 def : Pat<(v8i32 (X86cmpp (v8f32 VR256:$src1), VR256:$src2, imm:$cc)),
2560 (VCMPPSYrri (v8f32 VR256:$src1), (v8f32 VR256:$src2), imm:$cc)>;
2561 def : Pat<(v8i32 (X86cmpp (v8f32 VR256:$src1), (loadv8f32 addr:$src2), imm:$cc)),
2562 (VCMPPSYrmi (v8f32 VR256:$src1), addr:$src2, imm:$cc)>;
2563 def : Pat<(v4i64 (X86cmpp (v4f64 VR256:$src1), VR256:$src2, imm:$cc)),
2564 (VCMPPDYrri VR256:$src1, VR256:$src2, imm:$cc)>;
2565 def : Pat<(v4i64 (X86cmpp (v4f64 VR256:$src1), (loadv4f64 addr:$src2), imm:$cc)),
2566 (VCMPPDYrmi VR256:$src1, addr:$src2, imm:$cc)>;
2569 let Predicates = [UseSSE1] in {
2570 def : Pat<(v4i32 (X86cmpp (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
2571 (CMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
2572 def : Pat<(v4i32 (X86cmpp (v4f32 VR128:$src1), (memopv4f32 addr:$src2), imm:$cc)),
2573 (CMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
2576 let Predicates = [UseSSE2] in {
2577 def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
2578 (CMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
2579 def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), (memopv2f64 addr:$src2), imm:$cc)),
2580 (CMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
2583 //===----------------------------------------------------------------------===//
2584 // SSE 1 & 2 - Shuffle Instructions
2585 //===----------------------------------------------------------------------===//
2587 /// sse12_shuffle - sse 1 & 2 fp shuffle instructions
2588 multiclass sse12_shuffle<RegisterClass RC, X86MemOperand x86memop,
2589 ValueType vt, string asm, PatFrag mem_frag,
2591 def rmi : PIi8<0xC6, MRMSrcMem, (outs RC:$dst),
2592 (ins RC:$src1, x86memop:$src2, u8imm:$src3), asm,
2593 [(set RC:$dst, (vt (X86Shufp RC:$src1, (mem_frag addr:$src2),
2594 (i8 imm:$src3))))], IIC_SSE_SHUFP, d>,
2595 Sched<[WriteFShuffleLd, ReadAfterLd]>;
2596 def rri : PIi8<0xC6, MRMSrcReg, (outs RC:$dst),
2597 (ins RC:$src1, RC:$src2, u8imm:$src3), asm,
2598 [(set RC:$dst, (vt (X86Shufp RC:$src1, RC:$src2,
2599 (i8 imm:$src3))))], IIC_SSE_SHUFP, d>,
2600 Sched<[WriteFShuffle]>;
2603 defm VSHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
2604 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2605 loadv4f32, SSEPackedSingle>, PS, VEX_4V;
2606 defm VSHUFPSY : sse12_shuffle<VR256, f256mem, v8f32,
2607 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2608 loadv8f32, SSEPackedSingle>, PS, VEX_4V, VEX_L;
2609 defm VSHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
2610 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2611 loadv2f64, SSEPackedDouble>, PD, VEX_4V;
2612 defm VSHUFPDY : sse12_shuffle<VR256, f256mem, v4f64,
2613 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2614 loadv4f64, SSEPackedDouble>, PD, VEX_4V, VEX_L;
2616 let Constraints = "$src1 = $dst" in {
2617 defm SHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
2618 "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2619 memopv4f32, SSEPackedSingle>, PS;
2620 defm SHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
2621 "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2622 memopv2f64, SSEPackedDouble>, PD;
2625 let Predicates = [HasAVX] in {
2626 def : Pat<(v4i32 (X86Shufp VR128:$src1,
2627 (bc_v4i32 (loadv2i64 addr:$src2)), (i8 imm:$imm))),
2628 (VSHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
2629 def : Pat<(v4i32 (X86Shufp VR128:$src1, VR128:$src2, (i8 imm:$imm))),
2630 (VSHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
2632 def : Pat<(v2i64 (X86Shufp VR128:$src1,
2633 (loadv2i64 addr:$src2), (i8 imm:$imm))),
2634 (VSHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>;
2635 def : Pat<(v2i64 (X86Shufp VR128:$src1, VR128:$src2, (i8 imm:$imm))),
2636 (VSHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
2639 def : Pat<(v8i32 (X86Shufp VR256:$src1, VR256:$src2, (i8 imm:$imm))),
2640 (VSHUFPSYrri VR256:$src1, VR256:$src2, imm:$imm)>;
2641 def : Pat<(v8i32 (X86Shufp VR256:$src1,
2642 (bc_v8i32 (loadv4i64 addr:$src2)), (i8 imm:$imm))),
2643 (VSHUFPSYrmi VR256:$src1, addr:$src2, imm:$imm)>;
2645 def : Pat<(v4i64 (X86Shufp VR256:$src1, VR256:$src2, (i8 imm:$imm))),
2646 (VSHUFPDYrri VR256:$src1, VR256:$src2, imm:$imm)>;
2647 def : Pat<(v4i64 (X86Shufp VR256:$src1,
2648 (loadv4i64 addr:$src2), (i8 imm:$imm))),
2649 (VSHUFPDYrmi VR256:$src1, addr:$src2, imm:$imm)>;
2652 let Predicates = [UseSSE1] in {
2653 def : Pat<(v4i32 (X86Shufp VR128:$src1,
2654 (bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))),
2655 (SHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
2656 def : Pat<(v4i32 (X86Shufp VR128:$src1, VR128:$src2, (i8 imm:$imm))),
2657 (SHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
2660 let Predicates = [UseSSE2] in {
2661 // Generic SHUFPD patterns
2662 def : Pat<(v2i64 (X86Shufp VR128:$src1,
2663 (memopv2i64 addr:$src2), (i8 imm:$imm))),
2664 (SHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>;
2665 def : Pat<(v2i64 (X86Shufp VR128:$src1, VR128:$src2, (i8 imm:$imm))),
2666 (SHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
2669 //===----------------------------------------------------------------------===//
2670 // SSE 1 & 2 - Unpack FP Instructions
2671 //===----------------------------------------------------------------------===//
2673 /// sse12_unpack_interleave - sse 1 & 2 fp unpack and interleave
2674 multiclass sse12_unpack_interleave<bits<8> opc, SDNode OpNode, ValueType vt,
2675 PatFrag mem_frag, RegisterClass RC,
2676 X86MemOperand x86memop, string asm,
2678 def rr : PI<opc, MRMSrcReg,
2679 (outs RC:$dst), (ins RC:$src1, RC:$src2),
2681 (vt (OpNode RC:$src1, RC:$src2)))],
2682 IIC_SSE_UNPCK, d>, Sched<[WriteFShuffle]>;
2683 def rm : PI<opc, MRMSrcMem,
2684 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
2686 (vt (OpNode RC:$src1,
2687 (mem_frag addr:$src2))))],
2689 Sched<[WriteFShuffleLd, ReadAfterLd]>;
2692 defm VUNPCKHPS: sse12_unpack_interleave<0x15, X86Unpckh, v4f32, loadv4f32,
2693 VR128, f128mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2694 SSEPackedSingle>, PS, VEX_4V;
2695 defm VUNPCKHPD: sse12_unpack_interleave<0x15, X86Unpckh, v2f64, loadv2f64,
2696 VR128, f128mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2697 SSEPackedDouble>, PD, VEX_4V;
2698 defm VUNPCKLPS: sse12_unpack_interleave<0x14, X86Unpckl, v4f32, loadv4f32,
2699 VR128, f128mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2700 SSEPackedSingle>, PS, VEX_4V;
2701 defm VUNPCKLPD: sse12_unpack_interleave<0x14, X86Unpckl, v2f64, loadv2f64,
2702 VR128, f128mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2703 SSEPackedDouble>, PD, VEX_4V;
2705 defm VUNPCKHPSY: sse12_unpack_interleave<0x15, X86Unpckh, v8f32, loadv8f32,
2706 VR256, f256mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2707 SSEPackedSingle>, PS, VEX_4V, VEX_L;
2708 defm VUNPCKHPDY: sse12_unpack_interleave<0x15, X86Unpckh, v4f64, loadv4f64,
2709 VR256, f256mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2710 SSEPackedDouble>, PD, VEX_4V, VEX_L;
2711 defm VUNPCKLPSY: sse12_unpack_interleave<0x14, X86Unpckl, v8f32, loadv8f32,
2712 VR256, f256mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2713 SSEPackedSingle>, PS, VEX_4V, VEX_L;
2714 defm VUNPCKLPDY: sse12_unpack_interleave<0x14, X86Unpckl, v4f64, loadv4f64,
2715 VR256, f256mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2716 SSEPackedDouble>, PD, VEX_4V, VEX_L;
2718 let Constraints = "$src1 = $dst" in {
2719 defm UNPCKHPS: sse12_unpack_interleave<0x15, X86Unpckh, v4f32, memopv4f32,
2720 VR128, f128mem, "unpckhps\t{$src2, $dst|$dst, $src2}",
2721 SSEPackedSingle>, PS;
2722 defm UNPCKHPD: sse12_unpack_interleave<0x15, X86Unpckh, v2f64, memopv2f64,
2723 VR128, f128mem, "unpckhpd\t{$src2, $dst|$dst, $src2}",
2724 SSEPackedDouble>, PD;
2725 defm UNPCKLPS: sse12_unpack_interleave<0x14, X86Unpckl, v4f32, memopv4f32,
2726 VR128, f128mem, "unpcklps\t{$src2, $dst|$dst, $src2}",
2727 SSEPackedSingle>, PS;
2728 defm UNPCKLPD: sse12_unpack_interleave<0x14, X86Unpckl, v2f64, memopv2f64,
2729 VR128, f128mem, "unpcklpd\t{$src2, $dst|$dst, $src2}",
2730 SSEPackedDouble>, PD;
2731 } // Constraints = "$src1 = $dst"
2733 let Predicates = [HasAVX1Only] in {
2734 def : Pat<(v8i32 (X86Unpckl VR256:$src1, (bc_v8i32 (loadv4i64 addr:$src2)))),
2735 (VUNPCKLPSYrm VR256:$src1, addr:$src2)>;
2736 def : Pat<(v8i32 (X86Unpckl VR256:$src1, VR256:$src2)),
2737 (VUNPCKLPSYrr VR256:$src1, VR256:$src2)>;
2738 def : Pat<(v8i32 (X86Unpckh VR256:$src1, (bc_v8i32 (loadv4i64 addr:$src2)))),
2739 (VUNPCKHPSYrm VR256:$src1, addr:$src2)>;
2740 def : Pat<(v8i32 (X86Unpckh VR256:$src1, VR256:$src2)),
2741 (VUNPCKHPSYrr VR256:$src1, VR256:$src2)>;
2743 def : Pat<(v4i64 (X86Unpckl VR256:$src1, (loadv4i64 addr:$src2))),
2744 (VUNPCKLPDYrm VR256:$src1, addr:$src2)>;
2745 def : Pat<(v4i64 (X86Unpckl VR256:$src1, VR256:$src2)),
2746 (VUNPCKLPDYrr VR256:$src1, VR256:$src2)>;
2747 def : Pat<(v4i64 (X86Unpckh VR256:$src1, (loadv4i64 addr:$src2))),
2748 (VUNPCKHPDYrm VR256:$src1, addr:$src2)>;
2749 def : Pat<(v4i64 (X86Unpckh VR256:$src1, VR256:$src2)),
2750 (VUNPCKHPDYrr VR256:$src1, VR256:$src2)>;
2753 //===----------------------------------------------------------------------===//
2754 // SSE 1 & 2 - Extract Floating-Point Sign mask
2755 //===----------------------------------------------------------------------===//
2757 /// sse12_extr_sign_mask - sse 1 & 2 unpack and interleave
2758 multiclass sse12_extr_sign_mask<RegisterClass RC, Intrinsic Int, string asm,
2760 def rr : PI<0x50, MRMSrcReg, (outs GR32orGR64:$dst), (ins RC:$src),
2761 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
2762 [(set GR32orGR64:$dst, (Int RC:$src))], IIC_SSE_MOVMSK, d>,
2763 Sched<[WriteVecLogic]>;
2766 let Predicates = [HasAVX] in {
2767 defm VMOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps,
2768 "movmskps", SSEPackedSingle>, PS, VEX;
2769 defm VMOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd,
2770 "movmskpd", SSEPackedDouble>, PD, VEX;
2771 defm VMOVMSKPSY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_ps_256,
2772 "movmskps", SSEPackedSingle>, PS,
2774 defm VMOVMSKPDY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_pd_256,
2775 "movmskpd", SSEPackedDouble>, PD,
2778 def : Pat<(i32 (X86fgetsign FR32:$src)),
2779 (VMOVMSKPSrr (COPY_TO_REGCLASS FR32:$src, VR128))>;
2780 def : Pat<(i64 (X86fgetsign FR32:$src)),
2781 (SUBREG_TO_REG (i64 0),
2782 (VMOVMSKPSrr (COPY_TO_REGCLASS FR32:$src, VR128)), sub_32bit)>;
2783 def : Pat<(i32 (X86fgetsign FR64:$src)),
2784 (VMOVMSKPDrr (COPY_TO_REGCLASS FR64:$src, VR128))>;
2785 def : Pat<(i64 (X86fgetsign FR64:$src)),
2786 (SUBREG_TO_REG (i64 0),
2787 (VMOVMSKPDrr (COPY_TO_REGCLASS FR64:$src, VR128)), sub_32bit)>;
2790 defm MOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps, "movmskps",
2791 SSEPackedSingle>, PS;
2792 defm MOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd, "movmskpd",
2793 SSEPackedDouble>, PD;
2795 def : Pat<(i32 (X86fgetsign FR32:$src)),
2796 (MOVMSKPSrr (COPY_TO_REGCLASS FR32:$src, VR128))>,
2797 Requires<[UseSSE1]>;
2798 def : Pat<(i64 (X86fgetsign FR32:$src)),
2799 (SUBREG_TO_REG (i64 0),
2800 (MOVMSKPSrr (COPY_TO_REGCLASS FR32:$src, VR128)), sub_32bit)>,
2801 Requires<[UseSSE1]>;
2802 def : Pat<(i32 (X86fgetsign FR64:$src)),
2803 (MOVMSKPDrr (COPY_TO_REGCLASS FR64:$src, VR128))>,
2804 Requires<[UseSSE2]>;
2805 def : Pat<(i64 (X86fgetsign FR64:$src)),
2806 (SUBREG_TO_REG (i64 0),
2807 (MOVMSKPDrr (COPY_TO_REGCLASS FR64:$src, VR128)), sub_32bit)>,
2808 Requires<[UseSSE2]>;
2810 //===---------------------------------------------------------------------===//
2811 // SSE2 - Packed Integer Logical Instructions
2812 //===---------------------------------------------------------------------===//
2814 let ExeDomain = SSEPackedInt in { // SSE integer instructions
2816 /// PDI_binop_rm - Simple SSE2 binary operator.
2817 multiclass PDI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
2818 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
2819 X86MemOperand x86memop, OpndItins itins,
2820 bit IsCommutable, bit Is2Addr> {
2821 let isCommutable = IsCommutable in
2822 def rr : PDI<opc, MRMSrcReg, (outs RC:$dst),
2823 (ins RC:$src1, RC:$src2),
2825 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2826 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2827 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2)))], itins.rr>,
2828 Sched<[itins.Sched]>;
2829 def rm : PDI<opc, MRMSrcMem, (outs RC:$dst),
2830 (ins RC:$src1, x86memop:$src2),
2832 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2833 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2834 [(set RC:$dst, (OpVT (OpNode RC:$src1,
2835 (bitconvert (memop_frag addr:$src2)))))],
2837 Sched<[itins.Sched.Folded, ReadAfterLd]>;
2839 } // ExeDomain = SSEPackedInt
2841 multiclass PDI_binop_all<bits<8> opc, string OpcodeStr, SDNode Opcode,
2842 ValueType OpVT128, ValueType OpVT256,
2843 OpndItins itins, bit IsCommutable = 0> {
2844 let Predicates = [HasAVX, NoVLX] in
2845 defm V#NAME : PDI_binop_rm<opc, !strconcat("v", OpcodeStr), Opcode, OpVT128,
2846 VR128, loadv2i64, i128mem, itins, IsCommutable, 0>, VEX_4V;
2848 let Constraints = "$src1 = $dst" in
2849 defm NAME : PDI_binop_rm<opc, OpcodeStr, Opcode, OpVT128, VR128,
2850 memopv2i64, i128mem, itins, IsCommutable, 1>;
2852 let Predicates = [HasAVX2, NoVLX] in
2853 defm V#NAME#Y : PDI_binop_rm<opc, !strconcat("v", OpcodeStr), Opcode,
2854 OpVT256, VR256, loadv4i64, i256mem, itins,
2855 IsCommutable, 0>, VEX_4V, VEX_L;
2858 // These are ordered here for pattern ordering requirements with the fp versions
2860 defm PAND : PDI_binop_all<0xDB, "pand", and, v2i64, v4i64,
2861 SSE_VEC_BIT_ITINS_P, 1>;
2862 defm POR : PDI_binop_all<0xEB, "por", or, v2i64, v4i64,
2863 SSE_VEC_BIT_ITINS_P, 1>;
2864 defm PXOR : PDI_binop_all<0xEF, "pxor", xor, v2i64, v4i64,
2865 SSE_VEC_BIT_ITINS_P, 1>;
2866 defm PANDN : PDI_binop_all<0xDF, "pandn", X86andnp, v2i64, v4i64,
2867 SSE_VEC_BIT_ITINS_P, 0>;
2869 //===----------------------------------------------------------------------===//
2870 // SSE 1 & 2 - Logical Instructions
2871 //===----------------------------------------------------------------------===//
2873 // Multiclass for scalars using the X86 logical operation aliases for FP.
2874 multiclass sse12_fp_packed_scalar_logical_alias<
2875 bits<8> opc, string OpcodeStr, SDNode OpNode, OpndItins itins> {
2876 defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
2877 FR32, f32, f128mem, loadf32_128, SSEPackedSingle, itins, 0>,
2880 defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
2881 FR64, f64, f128mem, loadf64_128, SSEPackedDouble, itins, 0>,
2884 let Constraints = "$src1 = $dst" in {
2885 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, FR32,
2886 f32, f128mem, memopfsf32_128, SSEPackedSingle, itins>, PS;
2888 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, FR64,
2889 f64, f128mem, memopfsf64_128, SSEPackedDouble, itins>, PD;
2893 let isCodeGenOnly = 1 in {
2894 defm FsAND : sse12_fp_packed_scalar_logical_alias<0x54, "and", X86fand,
2896 defm FsOR : sse12_fp_packed_scalar_logical_alias<0x56, "or", X86for,
2898 defm FsXOR : sse12_fp_packed_scalar_logical_alias<0x57, "xor", X86fxor,
2901 let isCommutable = 0 in
2902 defm FsANDN : sse12_fp_packed_scalar_logical_alias<0x55, "andn", X86fandn,
2906 // Multiclass for vectors using the X86 logical operation aliases for FP.
2907 multiclass sse12_fp_packed_vector_logical_alias<
2908 bits<8> opc, string OpcodeStr, SDNode OpNode, OpndItins itins> {
2909 let Predicates = [HasAVX, NoVLX] in {
2910 defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
2911 VR128, v4f32, f128mem, loadv4f32, SSEPackedSingle, itins, 0>,
2914 defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
2915 VR128, v2f64, f128mem, loadv2f64, SSEPackedDouble, itins, 0>,
2919 let Constraints = "$src1 = $dst" in {
2920 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR128,
2921 v4f32, f128mem, memopv4f32, SSEPackedSingle, itins>,
2924 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR128,
2925 v2f64, f128mem, memopv2f64, SSEPackedDouble, itins>,
2930 let isCodeGenOnly = 1 in {
2931 defm FvAND : sse12_fp_packed_vector_logical_alias<0x54, "and", X86fand,
2933 defm FvOR : sse12_fp_packed_vector_logical_alias<0x56, "or", X86for,
2935 defm FvXOR : sse12_fp_packed_vector_logical_alias<0x57, "xor", X86fxor,
2938 let isCommutable = 0 in
2939 defm FvANDN : sse12_fp_packed_vector_logical_alias<0x55, "andn", X86fandn,
2943 /// sse12_fp_packed_logical - SSE 1 & 2 packed FP logical ops
2945 multiclass sse12_fp_packed_logical<bits<8> opc, string OpcodeStr,
2947 let Predicates = [HasAVX, NoVLX] in {
2948 defm V#NAME#PSY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedSingle,
2949 !strconcat(OpcodeStr, "ps"), f256mem,
2950 [(set VR256:$dst, (v4i64 (OpNode VR256:$src1, VR256:$src2)))],
2951 [(set VR256:$dst, (OpNode (bc_v4i64 (v8f32 VR256:$src1)),
2952 (loadv4i64 addr:$src2)))], 0>, PS, VEX_4V, VEX_L;
2954 defm V#NAME#PDY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedDouble,
2955 !strconcat(OpcodeStr, "pd"), f256mem,
2956 [(set VR256:$dst, (OpNode (bc_v4i64 (v4f64 VR256:$src1)),
2957 (bc_v4i64 (v4f64 VR256:$src2))))],
2958 [(set VR256:$dst, (OpNode (bc_v4i64 (v4f64 VR256:$src1)),
2959 (loadv4i64 addr:$src2)))], 0>,
2962 // In AVX no need to add a pattern for 128-bit logical rr ps, because they
2963 // are all promoted to v2i64, and the patterns are covered by the int
2964 // version. This is needed in SSE only, because v2i64 isn't supported on
2965 // SSE1, but only on SSE2.
2966 defm V#NAME#PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
2967 !strconcat(OpcodeStr, "ps"), f128mem, [],
2968 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
2969 (loadv2i64 addr:$src2)))], 0>, PS, VEX_4V;
2971 defm V#NAME#PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
2972 !strconcat(OpcodeStr, "pd"), f128mem,
2973 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
2974 (bc_v2i64 (v2f64 VR128:$src2))))],
2975 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
2976 (loadv2i64 addr:$src2)))], 0>,
2980 let Constraints = "$src1 = $dst" in {
2981 defm PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
2982 !strconcat(OpcodeStr, "ps"), f128mem,
2983 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))],
2984 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
2985 (memopv2i64 addr:$src2)))]>, PS;
2987 defm PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
2988 !strconcat(OpcodeStr, "pd"), f128mem,
2989 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
2990 (bc_v2i64 (v2f64 VR128:$src2))))],
2991 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
2992 (memopv2i64 addr:$src2)))]>, PD;
2996 defm AND : sse12_fp_packed_logical<0x54, "and", and>;
2997 defm OR : sse12_fp_packed_logical<0x56, "or", or>;
2998 defm XOR : sse12_fp_packed_logical<0x57, "xor", xor>;
2999 let isCommutable = 0 in
3000 defm ANDN : sse12_fp_packed_logical<0x55, "andn", X86andnp>;
3002 // AVX1 requires type coercions in order to fold loads directly into logical
3004 let Predicates = [HasAVX1Only] in {
3005 def : Pat<(bc_v8f32 (and VR256:$src1, (loadv4i64 addr:$src2))),
3006 (VANDPSYrm VR256:$src1, addr:$src2)>;
3007 def : Pat<(bc_v8f32 (or VR256:$src1, (loadv4i64 addr:$src2))),
3008 (VORPSYrm VR256:$src1, addr:$src2)>;
3009 def : Pat<(bc_v8f32 (xor VR256:$src1, (loadv4i64 addr:$src2))),
3010 (VXORPSYrm VR256:$src1, addr:$src2)>;
3011 def : Pat<(bc_v8f32 (X86andnp VR256:$src1, (loadv4i64 addr:$src2))),
3012 (VANDNPSYrm VR256:$src1, addr:$src2)>;
3015 //===----------------------------------------------------------------------===//
3016 // SSE 1 & 2 - Arithmetic Instructions
3017 //===----------------------------------------------------------------------===//
3019 /// basic_sse12_fp_binop_xxx - SSE 1 & 2 binops come in both scalar and
3022 /// In addition, we also have a special variant of the scalar form here to
3023 /// represent the associated intrinsic operation. This form is unlike the
3024 /// plain scalar form, in that it takes an entire vector (instead of a scalar)
3025 /// and leaves the top elements unmodified (therefore these cannot be commuted).
3027 /// These three forms can each be reg+reg or reg+mem.
3030 /// FIXME: once all 256-bit intrinsics are matched, cleanup and refactor those
3032 multiclass basic_sse12_fp_binop_p<bits<8> opc, string OpcodeStr,
3033 SDNode OpNode, SizeItins itins> {
3034 let Predicates = [HasAVX, NoVLX] in {
3035 defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
3036 VR128, v4f32, f128mem, loadv4f32,
3037 SSEPackedSingle, itins.s, 0>, PS, VEX_4V;
3038 defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
3039 VR128, v2f64, f128mem, loadv2f64,
3040 SSEPackedDouble, itins.d, 0>, PD, VEX_4V;
3042 defm V#NAME#PSY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"),
3043 OpNode, VR256, v8f32, f256mem, loadv8f32,
3044 SSEPackedSingle, itins.s, 0>, PS, VEX_4V, VEX_L;
3045 defm V#NAME#PDY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"),
3046 OpNode, VR256, v4f64, f256mem, loadv4f64,
3047 SSEPackedDouble, itins.d, 0>, PD, VEX_4V, VEX_L;
3050 let Constraints = "$src1 = $dst" in {
3051 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR128,
3052 v4f32, f128mem, memopv4f32, SSEPackedSingle,
3054 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR128,
3055 v2f64, f128mem, memopv2f64, SSEPackedDouble,
3060 multiclass basic_sse12_fp_binop_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
3062 defm V#NAME#SS : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"),
3063 OpNode, FR32, f32mem, itins.s, 0>, XS, VEX_4V, VEX_LIG;
3064 defm V#NAME#SD : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd"),
3065 OpNode, FR64, f64mem, itins.d, 0>, XD, VEX_4V, VEX_LIG;
3067 let Constraints = "$src1 = $dst" in {
3068 defm SS : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"),
3069 OpNode, FR32, f32mem, itins.s>, XS;
3070 defm SD : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd"),
3071 OpNode, FR64, f64mem, itins.d>, XD;
3075 multiclass basic_sse12_fp_binop_s_int<bits<8> opc, string OpcodeStr,
3077 defm V#NAME#SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
3078 !strconcat(OpcodeStr, "ss"), "", "_ss", ssmem, sse_load_f32,
3079 itins.s, 0>, XS, VEX_4V, VEX_LIG;
3080 defm V#NAME#SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
3081 !strconcat(OpcodeStr, "sd"), "2", "_sd", sdmem, sse_load_f64,
3082 itins.d, 0>, XD, VEX_4V, VEX_LIG;
3084 let Constraints = "$src1 = $dst" in {
3085 defm SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
3086 !strconcat(OpcodeStr, "ss"), "", "_ss", ssmem, sse_load_f32,
3088 defm SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
3089 !strconcat(OpcodeStr, "sd"), "2", "_sd", sdmem, sse_load_f64,
3094 // Binary Arithmetic instructions
3095 defm ADD : basic_sse12_fp_binop_p<0x58, "add", fadd, SSE_ALU_ITINS_P>,
3096 basic_sse12_fp_binop_s<0x58, "add", fadd, SSE_ALU_ITINS_S>,
3097 basic_sse12_fp_binop_s_int<0x58, "add", SSE_ALU_ITINS_S>;
3098 defm MUL : basic_sse12_fp_binop_p<0x59, "mul", fmul, SSE_MUL_ITINS_P>,
3099 basic_sse12_fp_binop_s<0x59, "mul", fmul, SSE_MUL_ITINS_S>,
3100 basic_sse12_fp_binop_s_int<0x59, "mul", SSE_MUL_ITINS_S>;
3101 let isCommutable = 0 in {
3102 defm SUB : basic_sse12_fp_binop_p<0x5C, "sub", fsub, SSE_ALU_ITINS_P>,
3103 basic_sse12_fp_binop_s<0x5C, "sub", fsub, SSE_ALU_ITINS_S>,
3104 basic_sse12_fp_binop_s_int<0x5C, "sub", SSE_ALU_ITINS_S>;
3105 defm DIV : basic_sse12_fp_binop_p<0x5E, "div", fdiv, SSE_DIV_ITINS_P>,
3106 basic_sse12_fp_binop_s<0x5E, "div", fdiv, SSE_DIV_ITINS_S>,
3107 basic_sse12_fp_binop_s_int<0x5E, "div", SSE_DIV_ITINS_S>;
3108 defm MAX : basic_sse12_fp_binop_p<0x5F, "max", X86fmax, SSE_ALU_ITINS_P>,
3109 basic_sse12_fp_binop_s<0x5F, "max", X86fmax, SSE_ALU_ITINS_S>,
3110 basic_sse12_fp_binop_s_int<0x5F, "max", SSE_ALU_ITINS_S>;
3111 defm MIN : basic_sse12_fp_binop_p<0x5D, "min", X86fmin, SSE_ALU_ITINS_P>,
3112 basic_sse12_fp_binop_s<0x5D, "min", X86fmin, SSE_ALU_ITINS_S>,
3113 basic_sse12_fp_binop_s_int<0x5D, "min", SSE_ALU_ITINS_S>;
3116 let isCodeGenOnly = 1 in {
3117 defm MAXC: basic_sse12_fp_binop_p<0x5F, "max", X86fmaxc, SSE_ALU_ITINS_P>,
3118 basic_sse12_fp_binop_s<0x5F, "max", X86fmaxc, SSE_ALU_ITINS_S>;
3119 defm MINC: basic_sse12_fp_binop_p<0x5D, "min", X86fminc, SSE_ALU_ITINS_P>,
3120 basic_sse12_fp_binop_s<0x5D, "min", X86fminc, SSE_ALU_ITINS_S>;
3123 // Patterns used to select SSE scalar fp arithmetic instructions from
3126 // (1) a scalar fp operation followed by a blend
3128 // The effect is that the backend no longer emits unnecessary vector
3129 // insert instructions immediately after SSE scalar fp instructions
3130 // like addss or mulss.
3132 // For example, given the following code:
3133 // __m128 foo(__m128 A, __m128 B) {
3138 // Previously we generated:
3139 // addss %xmm0, %xmm1
3140 // movss %xmm1, %xmm0
3143 // addss %xmm1, %xmm0
3145 // (2) a vector packed single/double fp operation followed by a vector insert
3147 // The effect is that the backend converts the packed fp instruction
3148 // followed by a vector insert into a single SSE scalar fp instruction.
3150 // For example, given the following code:
3151 // __m128 foo(__m128 A, __m128 B) {
3152 // __m128 C = A + B;
3153 // return (__m128) {c[0], a[1], a[2], a[3]};
3156 // Previously we generated:
3157 // addps %xmm0, %xmm1
3158 // movss %xmm1, %xmm0
3161 // addss %xmm1, %xmm0
3163 // TODO: Some canonicalization in lowering would simplify the number of
3164 // patterns we have to try to match.
3165 multiclass scalar_math_f32_patterns<SDNode Op, string OpcPrefix> {
3166 let Predicates = [UseSSE1] in {
3167 // extracted scalar math op with insert via movss
3168 def : Pat<(v4f32 (X86Movss (v4f32 VR128:$dst), (v4f32 (scalar_to_vector
3169 (Op (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
3171 (!cast<I>(OpcPrefix#SSrr_Int) v4f32:$dst,
3172 (COPY_TO_REGCLASS FR32:$src, VR128))>;
3174 // vector math op with insert via movss
3175 def : Pat<(v4f32 (X86Movss (v4f32 VR128:$dst),
3176 (Op (v4f32 VR128:$dst), (v4f32 VR128:$src)))),
3177 (!cast<I>(OpcPrefix#SSrr_Int) v4f32:$dst, v4f32:$src)>;
3180 // With SSE 4.1, insertps/blendi are preferred to movsd, so match those too.
3181 let Predicates = [UseSSE41] in {
3182 // extracted scalar math op with insert via insertps
3183 def : Pat<(v4f32 (X86insertps (v4f32 VR128:$dst), (v4f32 (scalar_to_vector
3184 (Op (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
3185 FR32:$src))), (iPTR 0))),
3186 (!cast<I>(OpcPrefix#SSrr_Int) v4f32:$dst,
3187 (COPY_TO_REGCLASS FR32:$src, VR128))>;
3189 // extracted scalar math op with insert via blend
3190 def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst), (v4f32 (scalar_to_vector
3191 (Op (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
3192 FR32:$src))), (i8 1))),
3193 (!cast<I>(OpcPrefix#SSrr_Int) v4f32:$dst,
3194 (COPY_TO_REGCLASS FR32:$src, VR128))>;
3196 // vector math op with insert via blend
3197 def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst),
3198 (Op (v4f32 VR128:$dst), (v4f32 VR128:$src)), (i8 1))),
3199 (!cast<I>(OpcPrefix#SSrr_Int)v4f32:$dst, v4f32:$src)>;
3203 // Repeat everything for AVX, except for the movss + scalar combo...
3204 // because that one shouldn't occur with AVX codegen?
3205 let Predicates = [HasAVX] in {
3206 // extracted scalar math op with insert via insertps
3207 def : Pat<(v4f32 (X86insertps (v4f32 VR128:$dst), (v4f32 (scalar_to_vector
3208 (Op (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
3209 FR32:$src))), (iPTR 0))),
3210 (!cast<I>("V"#OpcPrefix#SSrr_Int) v4f32:$dst,
3211 (COPY_TO_REGCLASS FR32:$src, VR128))>;
3213 // extracted scalar math op with insert via blend
3214 def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst), (v4f32 (scalar_to_vector
3215 (Op (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
3216 FR32:$src))), (i8 1))),
3217 (!cast<I>("V"#OpcPrefix#SSrr_Int) v4f32:$dst,
3218 (COPY_TO_REGCLASS FR32:$src, VR128))>;
3220 // vector math op with insert via movss
3221 def : Pat<(v4f32 (X86Movss (v4f32 VR128:$dst),
3222 (Op (v4f32 VR128:$dst), (v4f32 VR128:$src)))),
3223 (!cast<I>("V"#OpcPrefix#SSrr_Int) v4f32:$dst, v4f32:$src)>;
3225 // vector math op with insert via blend
3226 def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst),
3227 (Op (v4f32 VR128:$dst), (v4f32 VR128:$src)), (i8 1))),
3228 (!cast<I>("V"#OpcPrefix#SSrr_Int) v4f32:$dst, v4f32:$src)>;
3232 defm : scalar_math_f32_patterns<fadd, "ADD">;
3233 defm : scalar_math_f32_patterns<fsub, "SUB">;
3234 defm : scalar_math_f32_patterns<fmul, "MUL">;
3235 defm : scalar_math_f32_patterns<fdiv, "DIV">;
3237 multiclass scalar_math_f64_patterns<SDNode Op, string OpcPrefix> {
3238 let Predicates = [UseSSE2] in {
3239 // extracted scalar math op with insert via movsd
3240 def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst), (v2f64 (scalar_to_vector
3241 (Op (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
3243 (!cast<I>(OpcPrefix#SDrr_Int) v2f64:$dst,
3244 (COPY_TO_REGCLASS FR64:$src, VR128))>;
3246 // vector math op with insert via movsd
3247 def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst),
3248 (Op (v2f64 VR128:$dst), (v2f64 VR128:$src)))),
3249 (!cast<I>(OpcPrefix#SDrr_Int) v2f64:$dst, v2f64:$src)>;
3252 // With SSE 4.1, blendi is preferred to movsd, so match those too.
3253 let Predicates = [UseSSE41] in {
3254 // extracted scalar math op with insert via blend
3255 def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst), (v2f64 (scalar_to_vector
3256 (Op (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
3257 FR64:$src))), (i8 1))),
3258 (!cast<I>(OpcPrefix#SDrr_Int) v2f64:$dst,
3259 (COPY_TO_REGCLASS FR64:$src, VR128))>;
3261 // vector math op with insert via blend
3262 def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst),
3263 (Op (v2f64 VR128:$dst), (v2f64 VR128:$src)), (i8 1))),
3264 (!cast<I>(OpcPrefix#SDrr_Int) v2f64:$dst, v2f64:$src)>;
3267 // Repeat everything for AVX.
3268 let Predicates = [HasAVX] in {
3269 // extracted scalar math op with insert via movsd
3270 def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst), (v2f64 (scalar_to_vector
3271 (Op (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
3273 (!cast<I>("V"#OpcPrefix#SDrr_Int) v2f64:$dst,
3274 (COPY_TO_REGCLASS FR64:$src, VR128))>;
3276 // extracted scalar math op with insert via blend
3277 def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst), (v2f64 (scalar_to_vector
3278 (Op (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
3279 FR64:$src))), (i8 1))),
3280 (!cast<I>("V"#OpcPrefix#SDrr_Int) v2f64:$dst,
3281 (COPY_TO_REGCLASS FR64:$src, VR128))>;
3283 // vector math op with insert via movsd
3284 def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst),
3285 (Op (v2f64 VR128:$dst), (v2f64 VR128:$src)))),
3286 (!cast<I>("V"#OpcPrefix#SDrr_Int) v2f64:$dst, v2f64:$src)>;
3288 // vector math op with insert via blend
3289 def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst),
3290 (Op (v2f64 VR128:$dst), (v2f64 VR128:$src)), (i8 1))),
3291 (!cast<I>("V"#OpcPrefix#SDrr_Int) v2f64:$dst, v2f64:$src)>;
3295 defm : scalar_math_f64_patterns<fadd, "ADD">;
3296 defm : scalar_math_f64_patterns<fsub, "SUB">;
3297 defm : scalar_math_f64_patterns<fmul, "MUL">;
3298 defm : scalar_math_f64_patterns<fdiv, "DIV">;
3302 /// In addition, we also have a special variant of the scalar form here to
3303 /// represent the associated intrinsic operation. This form is unlike the
3304 /// plain scalar form, in that it takes an entire vector (instead of a
3305 /// scalar) and leaves the top elements undefined.
3307 /// And, we have a special variant form for a full-vector intrinsic form.
3309 let Sched = WriteFSqrt in {
3310 def SSE_SQRTPS : OpndItins<
3311 IIC_SSE_SQRTPS_RR, IIC_SSE_SQRTPS_RM
3314 def SSE_SQRTSS : OpndItins<
3315 IIC_SSE_SQRTSS_RR, IIC_SSE_SQRTSS_RM
3318 def SSE_SQRTPD : OpndItins<
3319 IIC_SSE_SQRTPD_RR, IIC_SSE_SQRTPD_RM
3322 def SSE_SQRTSD : OpndItins<
3323 IIC_SSE_SQRTSD_RR, IIC_SSE_SQRTSD_RM
3327 let Sched = WriteFRsqrt in {
3328 def SSE_RSQRTPS : OpndItins<
3329 IIC_SSE_RSQRTPS_RR, IIC_SSE_RSQRTPS_RM
3332 def SSE_RSQRTSS : OpndItins<
3333 IIC_SSE_RSQRTSS_RR, IIC_SSE_RSQRTSS_RM
3337 let Sched = WriteFRcp in {
3338 def SSE_RCPP : OpndItins<
3339 IIC_SSE_RCPP_RR, IIC_SSE_RCPP_RM
3342 def SSE_RCPS : OpndItins<
3343 IIC_SSE_RCPS_RR, IIC_SSE_RCPS_RM
3347 /// sse1_fp_unop_s - SSE1 unops in scalar form
3348 /// For the non-AVX defs, we need $src1 to be tied to $dst because
3349 /// the HW instructions are 2 operand / destructive.
3350 multiclass sse1_fp_unop_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
3352 let Predicates = [HasAVX], hasSideEffects = 0 in {
3353 def V#NAME#SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst),
3354 (ins FR32:$src1, FR32:$src2),
3355 !strconcat("v", OpcodeStr,
3356 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3357 []>, VEX_4V, VEX_LIG, Sched<[itins.Sched]>;
3358 let mayLoad = 1 in {
3359 def V#NAME#SSm : SSI<opc, MRMSrcMem, (outs FR32:$dst),
3360 (ins FR32:$src1,f32mem:$src2),
3361 !strconcat("v", OpcodeStr,
3362 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3363 []>, VEX_4V, VEX_LIG,
3364 Sched<[itins.Sched.Folded, ReadAfterLd]>;
3365 let isCodeGenOnly = 1 in
3366 def V#NAME#SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst),
3367 (ins VR128:$src1, ssmem:$src2),
3368 !strconcat("v", OpcodeStr,
3369 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3370 []>, VEX_4V, VEX_LIG,
3371 Sched<[itins.Sched.Folded, ReadAfterLd]>;
3375 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
3376 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
3377 [(set FR32:$dst, (OpNode FR32:$src))]>, Sched<[itins.Sched]>;
3378 // For scalar unary operations, fold a load into the operation
3379 // only in OptForSize mode. It eliminates an instruction, but it also
3380 // eliminates a whole-register clobber (the load), so it introduces a
3381 // partial register update condition.
3382 def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
3383 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
3384 [(set FR32:$dst, (OpNode (load addr:$src)))], itins.rm>, XS,
3385 Requires<[UseSSE1, OptForSize]>, Sched<[itins.Sched.Folded]>;
3386 let isCodeGenOnly = 1, Constraints = "$src1 = $dst" in {
3387 def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst),
3388 (ins VR128:$src1, VR128:$src2),
3389 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
3390 [], itins.rr>, Sched<[itins.Sched]>;
3391 let mayLoad = 1, hasSideEffects = 0 in
3392 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst),
3393 (ins VR128:$src1, ssmem:$src2),
3394 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
3395 [], itins.rm>, Sched<[itins.Sched.Folded, ReadAfterLd]>;
3399 /// sse1_fp_unop_p - SSE1 unops in packed form.
3400 multiclass sse1_fp_unop_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
3402 let Predicates = [HasAVX] in {
3403 def V#NAME#PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3404 !strconcat("v", OpcodeStr,
3405 "ps\t{$src, $dst|$dst, $src}"),
3406 [(set VR128:$dst, (v4f32 (OpNode VR128:$src)))],
3407 itins.rr>, VEX, Sched<[itins.Sched]>;
3408 def V#NAME#PSm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3409 !strconcat("v", OpcodeStr,
3410 "ps\t{$src, $dst|$dst, $src}"),
3411 [(set VR128:$dst, (OpNode (loadv4f32 addr:$src)))],
3412 itins.rm>, VEX, Sched<[itins.Sched.Folded]>;
3413 def V#NAME#PSYr : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3414 !strconcat("v", OpcodeStr,
3415 "ps\t{$src, $dst|$dst, $src}"),
3416 [(set VR256:$dst, (v8f32 (OpNode VR256:$src)))],
3417 itins.rr>, VEX, VEX_L, Sched<[itins.Sched]>;
3418 def V#NAME#PSYm : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
3419 !strconcat("v", OpcodeStr,
3420 "ps\t{$src, $dst|$dst, $src}"),
3421 [(set VR256:$dst, (OpNode (loadv8f32 addr:$src)))],
3422 itins.rm>, VEX, VEX_L, Sched<[itins.Sched.Folded]>;
3425 def PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3426 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
3427 [(set VR128:$dst, (v4f32 (OpNode VR128:$src)))], itins.rr>,
3428 Sched<[itins.Sched]>;
3429 def PSm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3430 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
3431 [(set VR128:$dst, (OpNode (memopv4f32 addr:$src)))], itins.rm>,
3432 Sched<[itins.Sched.Folded]>;
3435 /// sse1_fp_unop_p_int - SSE1 intrinsics unops in packed forms.
3436 multiclass sse1_fp_unop_p_int<bits<8> opc, string OpcodeStr,
3437 Intrinsic V4F32Int, Intrinsic V8F32Int,
3439 let isCodeGenOnly = 1 in {
3440 let Predicates = [HasAVX] in {
3441 def V#NAME#PSr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3442 !strconcat("v", OpcodeStr,
3443 "ps\t{$src, $dst|$dst, $src}"),
3444 [(set VR128:$dst, (V4F32Int VR128:$src))],
3445 itins.rr>, VEX, Sched<[itins.Sched]>;
3446 def V#NAME#PSm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3447 !strconcat("v", OpcodeStr,
3448 "ps\t{$src, $dst|$dst, $src}"),
3449 [(set VR128:$dst, (V4F32Int (loadv4f32 addr:$src)))],
3450 itins.rm>, VEX, Sched<[itins.Sched.Folded]>;
3451 def V#NAME#PSYr_Int : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3452 !strconcat("v", OpcodeStr,
3453 "ps\t{$src, $dst|$dst, $src}"),
3454 [(set VR256:$dst, (V8F32Int VR256:$src))],
3455 itins.rr>, VEX, VEX_L, Sched<[itins.Sched]>;
3456 def V#NAME#PSYm_Int : PSI<opc, MRMSrcMem, (outs VR256:$dst),
3458 !strconcat("v", OpcodeStr,
3459 "ps\t{$src, $dst|$dst, $src}"),
3460 [(set VR256:$dst, (V8F32Int (loadv8f32 addr:$src)))],
3461 itins.rm>, VEX, VEX_L, Sched<[itins.Sched.Folded]>;
3464 def PSr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3465 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
3466 [(set VR128:$dst, (V4F32Int VR128:$src))],
3467 itins.rr>, Sched<[itins.Sched]>;
3468 def PSm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3469 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
3470 [(set VR128:$dst, (V4F32Int (memopv4f32 addr:$src)))],
3471 itins.rm>, Sched<[itins.Sched.Folded]>;
3472 } // isCodeGenOnly = 1
3475 /// sse2_fp_unop_s - SSE2 unops in scalar form.
3476 // FIXME: Combine the following sse2 classes with the sse1 classes above.
3477 // The only usage of these is for SQRT[S/P]D. See sse12_fp_binop* for example.
3478 multiclass sse2_fp_unop_s<bits<8> opc, string OpcodeStr,
3479 SDNode OpNode, OpndItins itins> {
3480 let Predicates = [HasAVX], hasSideEffects = 0 in {
3481 def V#NAME#SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst),
3482 (ins FR64:$src1, FR64:$src2),
3483 !strconcat("v", OpcodeStr,
3484 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3485 []>, VEX_4V, VEX_LIG, Sched<[itins.Sched]>;
3486 let mayLoad = 1 in {
3487 def V#NAME#SDm : SDI<opc, MRMSrcMem, (outs FR64:$dst),
3488 (ins FR64:$src1,f64mem:$src2),
3489 !strconcat("v", OpcodeStr,
3490 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3491 []>, VEX_4V, VEX_LIG,
3492 Sched<[itins.Sched.Folded, ReadAfterLd]>;
3493 let isCodeGenOnly = 1 in
3494 def V#NAME#SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst),
3495 (ins VR128:$src1, sdmem:$src2),
3496 !strconcat("v", OpcodeStr,
3497 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3498 []>, VEX_4V, VEX_LIG,
3499 Sched<[itins.Sched.Folded, ReadAfterLd]>;
3503 def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
3504 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
3505 [(set FR64:$dst, (OpNode FR64:$src))], itins.rr>,
3506 Sched<[itins.Sched]>;
3507 // See the comments in sse1_fp_unop_s for why this is OptForSize.
3508 def SDm : I<opc, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
3509 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
3510 [(set FR64:$dst, (OpNode (load addr:$src)))], itins.rm>, XD,
3511 Requires<[UseSSE2, OptForSize]>, Sched<[itins.Sched.Folded]>;
3512 let isCodeGenOnly = 1, Constraints = "$src1 = $dst" in {
3514 SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
3515 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
3516 [], itins.rr>, Sched<[itins.Sched]>;
3518 let mayLoad = 1, hasSideEffects = 0 in
3520 SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2),
3521 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
3522 [], itins.rm>, Sched<[itins.Sched.Folded, ReadAfterLd]>;
3523 } // isCodeGenOnly, Constraints
3526 /// sse2_fp_unop_p - SSE2 unops in vector forms.
3527 multiclass sse2_fp_unop_p<bits<8> opc, string OpcodeStr,
3528 SDNode OpNode, OpndItins itins> {
3529 let Predicates = [HasAVX] in {
3530 def V#NAME#PDr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3531 !strconcat("v", OpcodeStr,
3532 "pd\t{$src, $dst|$dst, $src}"),
3533 [(set VR128:$dst, (v2f64 (OpNode VR128:$src)))],
3534 itins.rr>, VEX, Sched<[itins.Sched]>;
3535 def V#NAME#PDm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3536 !strconcat("v", OpcodeStr,
3537 "pd\t{$src, $dst|$dst, $src}"),
3538 [(set VR128:$dst, (OpNode (loadv2f64 addr:$src)))],
3539 itins.rm>, VEX, Sched<[itins.Sched.Folded]>;
3540 def V#NAME#PDYr : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3541 !strconcat("v", OpcodeStr,
3542 "pd\t{$src, $dst|$dst, $src}"),
3543 [(set VR256:$dst, (v4f64 (OpNode VR256:$src)))],
3544 itins.rr>, VEX, VEX_L, Sched<[itins.Sched]>;
3545 def V#NAME#PDYm : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
3546 !strconcat("v", OpcodeStr,
3547 "pd\t{$src, $dst|$dst, $src}"),
3548 [(set VR256:$dst, (OpNode (loadv4f64 addr:$src)))],
3549 itins.rm>, VEX, VEX_L, Sched<[itins.Sched.Folded]>;
3552 def PDr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3553 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
3554 [(set VR128:$dst, (v2f64 (OpNode VR128:$src)))], itins.rr>,
3555 Sched<[itins.Sched]>;
3556 def PDm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3557 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
3558 [(set VR128:$dst, (OpNode (memopv2f64 addr:$src)))], itins.rm>,
3559 Sched<[itins.Sched.Folded]>;
3563 defm SQRT : sse1_fp_unop_s<0x51, "sqrt", fsqrt, SSE_SQRTSS>,
3564 sse1_fp_unop_p<0x51, "sqrt", fsqrt, SSE_SQRTPS>,
3565 sse2_fp_unop_s<0x51, "sqrt", fsqrt, SSE_SQRTSD>,
3566 sse2_fp_unop_p<0x51, "sqrt", fsqrt, SSE_SQRTPD>;
3568 // Reciprocal approximations. Note that these typically require refinement
3569 // in order to obtain suitable precision.
3570 defm RSQRT : sse1_fp_unop_s<0x52, "rsqrt", X86frsqrt, SSE_RSQRTSS>,
3571 sse1_fp_unop_p<0x52, "rsqrt", X86frsqrt, SSE_RSQRTPS>,
3572 sse1_fp_unop_p_int<0x52, "rsqrt", int_x86_sse_rsqrt_ps,
3573 int_x86_avx_rsqrt_ps_256, SSE_RSQRTPS>;
3574 defm RCP : sse1_fp_unop_s<0x53, "rcp", X86frcp, SSE_RCPS>,
3575 sse1_fp_unop_p<0x53, "rcp", X86frcp, SSE_RCPP>,
3576 sse1_fp_unop_p_int<0x53, "rcp", int_x86_sse_rcp_ps,
3577 int_x86_avx_rcp_ps_256, SSE_RCPP>;
3579 let Predicates = [UseAVX] in {
3580 def : Pat<(f32 (fsqrt FR32:$src)),
3581 (VSQRTSSr (f32 (IMPLICIT_DEF)), FR32:$src)>, Requires<[HasAVX]>;
3582 def : Pat<(f32 (fsqrt (load addr:$src))),
3583 (VSQRTSSm (f32 (IMPLICIT_DEF)), addr:$src)>,
3584 Requires<[HasAVX, OptForSize]>;
3585 def : Pat<(f64 (fsqrt FR64:$src)),
3586 (VSQRTSDr (f64 (IMPLICIT_DEF)), FR64:$src)>, Requires<[HasAVX]>;
3587 def : Pat<(f64 (fsqrt (load addr:$src))),
3588 (VSQRTSDm (f64 (IMPLICIT_DEF)), addr:$src)>,
3589 Requires<[HasAVX, OptForSize]>;
3591 def : Pat<(f32 (X86frsqrt FR32:$src)),
3592 (VRSQRTSSr (f32 (IMPLICIT_DEF)), FR32:$src)>, Requires<[HasAVX]>;
3593 def : Pat<(f32 (X86frsqrt (load addr:$src))),
3594 (VRSQRTSSm (f32 (IMPLICIT_DEF)), addr:$src)>,
3595 Requires<[HasAVX, OptForSize]>;
3597 def : Pat<(f32 (X86frcp FR32:$src)),
3598 (VRCPSSr (f32 (IMPLICIT_DEF)), FR32:$src)>, Requires<[HasAVX]>;
3599 def : Pat<(f32 (X86frcp (load addr:$src))),
3600 (VRCPSSm (f32 (IMPLICIT_DEF)), addr:$src)>,
3601 Requires<[HasAVX, OptForSize]>;
3603 let Predicates = [UseAVX] in {
3604 def : Pat<(int_x86_sse_sqrt_ss VR128:$src),
3605 (COPY_TO_REGCLASS (VSQRTSSr (f32 (IMPLICIT_DEF)),
3606 (COPY_TO_REGCLASS VR128:$src, FR32)),
3608 def : Pat<(int_x86_sse_sqrt_ss sse_load_f32:$src),
3609 (VSQRTSSm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
3611 def : Pat<(int_x86_sse2_sqrt_sd VR128:$src),
3612 (COPY_TO_REGCLASS (VSQRTSDr (f64 (IMPLICIT_DEF)),
3613 (COPY_TO_REGCLASS VR128:$src, FR64)),
3615 def : Pat<(int_x86_sse2_sqrt_sd sse_load_f64:$src),
3616 (VSQRTSDm_Int (v2f64 (IMPLICIT_DEF)), sse_load_f64:$src)>;
3619 let Predicates = [HasAVX] in {
3620 def : Pat<(int_x86_sse_rsqrt_ss VR128:$src),
3621 (COPY_TO_REGCLASS (VRSQRTSSr (f32 (IMPLICIT_DEF)),
3622 (COPY_TO_REGCLASS VR128:$src, FR32)),
3624 def : Pat<(int_x86_sse_rsqrt_ss sse_load_f32:$src),
3625 (VRSQRTSSm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
3627 def : Pat<(int_x86_sse_rcp_ss VR128:$src),
3628 (COPY_TO_REGCLASS (VRCPSSr (f32 (IMPLICIT_DEF)),
3629 (COPY_TO_REGCLASS VR128:$src, FR32)),
3631 def : Pat<(int_x86_sse_rcp_ss sse_load_f32:$src),
3632 (VRCPSSm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
3635 // These are unary operations, but they are modeled as having 2 source operands
3636 // because the high elements of the destination are unchanged in SSE.
3637 let Predicates = [UseSSE1] in {
3638 def : Pat<(int_x86_sse_rsqrt_ss VR128:$src),
3639 (RSQRTSSr_Int VR128:$src, VR128:$src)>;
3640 def : Pat<(int_x86_sse_rcp_ss VR128:$src),
3641 (RCPSSr_Int VR128:$src, VR128:$src)>;
3642 def : Pat<(int_x86_sse_sqrt_ss VR128:$src),
3643 (SQRTSSr_Int VR128:$src, VR128:$src)>;
3644 def : Pat<(int_x86_sse2_sqrt_sd VR128:$src),
3645 (SQRTSDr_Int VR128:$src, VR128:$src)>;
3648 // There is no f64 version of the reciprocal approximation instructions.
3650 //===----------------------------------------------------------------------===//
3651 // SSE 1 & 2 - Non-temporal stores
3652 //===----------------------------------------------------------------------===//
3654 let AddedComplexity = 400 in { // Prefer non-temporal versions
3655 let SchedRW = [WriteStore] in {
3656 let Predicates = [HasAVX, NoVLX] in {
3657 def VMOVNTPSmr : VPSI<0x2B, MRMDestMem, (outs),
3658 (ins f128mem:$dst, VR128:$src),
3659 "movntps\t{$src, $dst|$dst, $src}",
3660 [(alignednontemporalstore (v4f32 VR128:$src),
3662 IIC_SSE_MOVNT>, VEX;
3663 def VMOVNTPDmr : VPDI<0x2B, MRMDestMem, (outs),
3664 (ins f128mem:$dst, VR128:$src),
3665 "movntpd\t{$src, $dst|$dst, $src}",
3666 [(alignednontemporalstore (v2f64 VR128:$src),
3668 IIC_SSE_MOVNT>, VEX;
3670 let ExeDomain = SSEPackedInt in
3671 def VMOVNTDQmr : VPDI<0xE7, MRMDestMem, (outs),
3672 (ins f128mem:$dst, VR128:$src),
3673 "movntdq\t{$src, $dst|$dst, $src}",
3674 [(alignednontemporalstore (v2i64 VR128:$src),
3676 IIC_SSE_MOVNT>, VEX;
3678 def VMOVNTPSYmr : VPSI<0x2B, MRMDestMem, (outs),
3679 (ins f256mem:$dst, VR256:$src),
3680 "movntps\t{$src, $dst|$dst, $src}",
3681 [(alignednontemporalstore (v8f32 VR256:$src),
3683 IIC_SSE_MOVNT>, VEX, VEX_L;
3684 def VMOVNTPDYmr : VPDI<0x2B, MRMDestMem, (outs),
3685 (ins f256mem:$dst, VR256:$src),
3686 "movntpd\t{$src, $dst|$dst, $src}",
3687 [(alignednontemporalstore (v4f64 VR256:$src),
3689 IIC_SSE_MOVNT>, VEX, VEX_L;
3690 let ExeDomain = SSEPackedInt in
3691 def VMOVNTDQYmr : VPDI<0xE7, MRMDestMem, (outs),
3692 (ins f256mem:$dst, VR256:$src),
3693 "movntdq\t{$src, $dst|$dst, $src}",
3694 [(alignednontemporalstore (v4i64 VR256:$src),
3696 IIC_SSE_MOVNT>, VEX, VEX_L;
3699 def MOVNTPSmr : PSI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
3700 "movntps\t{$src, $dst|$dst, $src}",
3701 [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)],
3703 def MOVNTPDmr : PDI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
3704 "movntpd\t{$src, $dst|$dst, $src}",
3705 [(alignednontemporalstore(v2f64 VR128:$src), addr:$dst)],
3708 let ExeDomain = SSEPackedInt in
3709 def MOVNTDQmr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
3710 "movntdq\t{$src, $dst|$dst, $src}",
3711 [(alignednontemporalstore (v2i64 VR128:$src), addr:$dst)],
3714 // There is no AVX form for instructions below this point
3715 def MOVNTImr : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
3716 "movnti{l}\t{$src, $dst|$dst, $src}",
3717 [(nontemporalstore (i32 GR32:$src), addr:$dst)],
3719 PS, Requires<[HasSSE2]>;
3720 def MOVNTI_64mr : RI<0xC3, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
3721 "movnti{q}\t{$src, $dst|$dst, $src}",
3722 [(nontemporalstore (i64 GR64:$src), addr:$dst)],
3724 PS, Requires<[HasSSE2]>;
3725 } // SchedRW = [WriteStore]
3727 let Predicates = [HasAVX, NoVLX] in {
3728 def : Pat<(alignednontemporalstore (v4i32 VR128:$src), addr:$dst),
3729 (VMOVNTPSmr addr:$dst, VR128:$src)>;
3732 def : Pat<(alignednontemporalstore (v4i32 VR128:$src), addr:$dst),
3733 (MOVNTPSmr addr:$dst, VR128:$src)>;
3735 } // AddedComplexity
3737 //===----------------------------------------------------------------------===//
3738 // SSE 1 & 2 - Prefetch and memory fence
3739 //===----------------------------------------------------------------------===//
3741 // Prefetch intrinsic.
3742 let Predicates = [HasSSE1], SchedRW = [WriteLoad] in {
3743 def PREFETCHT0 : I<0x18, MRM1m, (outs), (ins i8mem:$src),
3744 "prefetcht0\t$src", [(prefetch addr:$src, imm, (i32 3), (i32 1))],
3745 IIC_SSE_PREFETCH>, TB;
3746 def PREFETCHT1 : I<0x18, MRM2m, (outs), (ins i8mem:$src),
3747 "prefetcht1\t$src", [(prefetch addr:$src, imm, (i32 2), (i32 1))],
3748 IIC_SSE_PREFETCH>, TB;
3749 def PREFETCHT2 : I<0x18, MRM3m, (outs), (ins i8mem:$src),
3750 "prefetcht2\t$src", [(prefetch addr:$src, imm, (i32 1), (i32 1))],
3751 IIC_SSE_PREFETCH>, TB;
3752 def PREFETCHNTA : I<0x18, MRM0m, (outs), (ins i8mem:$src),
3753 "prefetchnta\t$src", [(prefetch addr:$src, imm, (i32 0), (i32 1))],
3754 IIC_SSE_PREFETCH>, TB;
3757 // FIXME: How should flush instruction be modeled?
3758 let SchedRW = [WriteLoad] in {
3760 def CLFLUSH : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
3761 "clflush\t$src", [(int_x86_sse2_clflush addr:$src)],
3762 IIC_SSE_PREFETCH>, PS, Requires<[HasSSE2]>;
3765 let SchedRW = [WriteNop] in {
3766 // Pause. This "instruction" is encoded as "rep; nop", so even though it
3767 // was introduced with SSE2, it's backward compatible.
3768 def PAUSE : I<0x90, RawFrm, (outs), (ins),
3769 "pause", [(int_x86_sse2_pause)], IIC_SSE_PAUSE>,
3770 OBXS, Requires<[HasSSE2]>;
3773 let SchedRW = [WriteFence] in {
3774 // Load, store, and memory fence
3775 def SFENCE : I<0xAE, MRM_F8, (outs), (ins),
3776 "sfence", [(int_x86_sse_sfence)], IIC_SSE_SFENCE>,
3777 PS, Requires<[HasSSE1]>;
3778 def LFENCE : I<0xAE, MRM_E8, (outs), (ins),
3779 "lfence", [(int_x86_sse2_lfence)], IIC_SSE_LFENCE>,
3780 TB, Requires<[HasSSE2]>;
3781 def MFENCE : I<0xAE, MRM_F0, (outs), (ins),
3782 "mfence", [(int_x86_sse2_mfence)], IIC_SSE_MFENCE>,
3783 TB, Requires<[HasSSE2]>;
3786 def : Pat<(X86SFence), (SFENCE)>;
3787 def : Pat<(X86LFence), (LFENCE)>;
3788 def : Pat<(X86MFence), (MFENCE)>;
3790 //===----------------------------------------------------------------------===//
3791 // SSE 1 & 2 - Load/Store XCSR register
3792 //===----------------------------------------------------------------------===//
3794 def VLDMXCSR : VPSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
3795 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)],
3796 IIC_SSE_LDMXCSR>, VEX, Sched<[WriteLoad]>;
3797 def VSTMXCSR : VPSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
3798 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)],
3799 IIC_SSE_STMXCSR>, VEX, Sched<[WriteStore]>;
3801 let Predicates = [UseSSE1] in {
3802 def LDMXCSR : I<0xAE, MRM2m, (outs), (ins i32mem:$src),
3803 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)],
3804 IIC_SSE_LDMXCSR>, TB, Sched<[WriteLoad]>;
3805 def STMXCSR : I<0xAE, MRM3m, (outs), (ins i32mem:$dst),
3806 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)],
3807 IIC_SSE_STMXCSR>, TB, Sched<[WriteStore]>;
3810 //===---------------------------------------------------------------------===//
3811 // SSE2 - Move Aligned/Unaligned Packed Integer Instructions
3812 //===---------------------------------------------------------------------===//
3814 let ExeDomain = SSEPackedInt in { // SSE integer instructions
3816 let hasSideEffects = 0, SchedRW = [WriteMove] in {
3817 def VMOVDQArr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3818 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RR>,
3820 def VMOVDQAYrr : VPDI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3821 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RR>,
3823 def VMOVDQUrr : VSSI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3824 "movdqu\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVU_P_RR>,
3826 def VMOVDQUYrr : VSSI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3827 "movdqu\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVU_P_RR>,
3832 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
3833 SchedRW = [WriteMove] in {
3834 def VMOVDQArr_REV : VPDI<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
3835 "movdqa\t{$src, $dst|$dst, $src}", [],
3838 def VMOVDQAYrr_REV : VPDI<0x7F, MRMDestReg, (outs VR256:$dst), (ins VR256:$src),
3839 "movdqa\t{$src, $dst|$dst, $src}", [],
3840 IIC_SSE_MOVA_P_RR>, VEX, VEX_L;
3841 def VMOVDQUrr_REV : VSSI<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
3842 "movdqu\t{$src, $dst|$dst, $src}", [],
3845 def VMOVDQUYrr_REV : VSSI<0x7F, MRMDestReg, (outs VR256:$dst), (ins VR256:$src),
3846 "movdqu\t{$src, $dst|$dst, $src}", [],
3847 IIC_SSE_MOVU_P_RR>, VEX, VEX_L;
3850 let canFoldAsLoad = 1, mayLoad = 1, isReMaterializable = 1,
3851 hasSideEffects = 0, SchedRW = [WriteLoad] in {
3852 def VMOVDQArm : VPDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3853 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RM>,
3855 def VMOVDQAYrm : VPDI<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
3856 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RM>,
3858 let Predicates = [HasAVX] in {
3859 def VMOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3860 "vmovdqu\t{$src, $dst|$dst, $src}",[], IIC_SSE_MOVU_P_RM>,
3862 def VMOVDQUYrm : I<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
3863 "vmovdqu\t{$src, $dst|$dst, $src}",[], IIC_SSE_MOVU_P_RM>,
3868 let mayStore = 1, hasSideEffects = 0, SchedRW = [WriteStore] in {
3869 def VMOVDQAmr : VPDI<0x7F, MRMDestMem, (outs),
3870 (ins i128mem:$dst, VR128:$src),
3871 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_MR>,
3873 def VMOVDQAYmr : VPDI<0x7F, MRMDestMem, (outs),
3874 (ins i256mem:$dst, VR256:$src),
3875 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_MR>,
3877 let Predicates = [HasAVX] in {
3878 def VMOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
3879 "vmovdqu\t{$src, $dst|$dst, $src}",[], IIC_SSE_MOVU_P_MR>,
3881 def VMOVDQUYmr : I<0x7F, MRMDestMem, (outs), (ins i256mem:$dst, VR256:$src),
3882 "vmovdqu\t{$src, $dst|$dst, $src}",[], IIC_SSE_MOVU_P_MR>,
3887 let SchedRW = [WriteMove] in {
3888 let hasSideEffects = 0 in
3889 def MOVDQArr : PDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3890 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RR>;
3892 def MOVDQUrr : I<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3893 "movdqu\t{$src, $dst|$dst, $src}",
3894 [], IIC_SSE_MOVU_P_RR>, XS, Requires<[UseSSE2]>;
3897 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in {
3898 def MOVDQArr_REV : PDI<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
3899 "movdqa\t{$src, $dst|$dst, $src}", [],
3902 def MOVDQUrr_REV : I<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
3903 "movdqu\t{$src, $dst|$dst, $src}",
3904 [], IIC_SSE_MOVU_P_RR>, XS, Requires<[UseSSE2]>;
3908 let canFoldAsLoad = 1, mayLoad = 1, isReMaterializable = 1,
3909 hasSideEffects = 0, SchedRW = [WriteLoad] in {
3910 def MOVDQArm : PDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3911 "movdqa\t{$src, $dst|$dst, $src}",
3912 [/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/],
3914 def MOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3915 "movdqu\t{$src, $dst|$dst, $src}",
3916 [/*(set VR128:$dst, (loadv2i64 addr:$src))*/],
3918 XS, Requires<[UseSSE2]>;
3921 let mayStore = 1, hasSideEffects = 0, SchedRW = [WriteStore] in {
3922 def MOVDQAmr : PDI<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
3923 "movdqa\t{$src, $dst|$dst, $src}",
3924 [/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/],
3926 def MOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
3927 "movdqu\t{$src, $dst|$dst, $src}",
3928 [/*(store (v2i64 VR128:$src), addr:$dst)*/],
3930 XS, Requires<[UseSSE2]>;
3933 } // ExeDomain = SSEPackedInt
3935 let Predicates = [HasAVX] in {
3936 def : Pat<(int_x86_sse2_storeu_dq addr:$dst, VR128:$src),
3937 (VMOVDQUmr addr:$dst, VR128:$src)>;
3938 def : Pat<(int_x86_avx_storeu_dq_256 addr:$dst, VR256:$src),
3939 (VMOVDQUYmr addr:$dst, VR256:$src)>;
3941 let Predicates = [UseSSE2] in
3942 def : Pat<(int_x86_sse2_storeu_dq addr:$dst, VR128:$src),
3943 (MOVDQUmr addr:$dst, VR128:$src)>;
3945 //===---------------------------------------------------------------------===//
3946 // SSE2 - Packed Integer Arithmetic Instructions
3947 //===---------------------------------------------------------------------===//
3949 let Sched = WriteVecIMul in
3950 def SSE_PMADD : OpndItins<
3951 IIC_SSE_PMADD, IIC_SSE_PMADD
3954 let ExeDomain = SSEPackedInt in { // SSE integer instructions
3956 multiclass PDI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId,
3957 RegisterClass RC, PatFrag memop_frag,
3958 X86MemOperand x86memop,
3960 bit IsCommutable = 0,
3962 let isCommutable = IsCommutable in
3963 def rr : PDI<opc, MRMSrcReg, (outs RC:$dst),
3964 (ins RC:$src1, RC:$src2),
3966 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3967 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3968 [(set RC:$dst, (IntId RC:$src1, RC:$src2))], itins.rr>,
3969 Sched<[itins.Sched]>;
3970 def rm : PDI<opc, MRMSrcMem, (outs RC:$dst),
3971 (ins RC:$src1, x86memop:$src2),
3973 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3974 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3975 [(set RC:$dst, (IntId RC:$src1, (bitconvert (memop_frag addr:$src2))))],
3976 itins.rm>, Sched<[itins.Sched.Folded, ReadAfterLd]>;
3979 multiclass PDI_binop_all_int<bits<8> opc, string OpcodeStr, Intrinsic IntId128,
3980 Intrinsic IntId256, OpndItins itins,
3981 bit IsCommutable = 0> {
3982 let Predicates = [HasAVX] in
3983 defm V#NAME : PDI_binop_rm_int<opc, !strconcat("v", OpcodeStr), IntId128,
3984 VR128, loadv2i64, i128mem, itins,
3985 IsCommutable, 0>, VEX_4V;
3987 let Constraints = "$src1 = $dst" in
3988 defm NAME : PDI_binop_rm_int<opc, OpcodeStr, IntId128, VR128, memopv2i64,
3989 i128mem, itins, IsCommutable, 1>;
3991 let Predicates = [HasAVX2] in
3992 defm V#NAME#Y : PDI_binop_rm_int<opc, !strconcat("v", OpcodeStr), IntId256,
3993 VR256, loadv4i64, i256mem, itins,
3994 IsCommutable, 0>, VEX_4V, VEX_L;
3997 multiclass PDI_binop_rmi<bits<8> opc, bits<8> opc2, Format ImmForm,
3998 string OpcodeStr, SDNode OpNode,
3999 SDNode OpNode2, RegisterClass RC,
4000 ValueType DstVT, ValueType SrcVT, PatFrag bc_frag,
4001 PatFrag ld_frag, ShiftOpndItins itins,
4003 // src2 is always 128-bit
4004 def rr : PDI<opc, MRMSrcReg, (outs RC:$dst),
4005 (ins RC:$src1, VR128:$src2),
4007 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4008 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4009 [(set RC:$dst, (DstVT (OpNode RC:$src1, (SrcVT VR128:$src2))))],
4010 itins.rr>, Sched<[WriteVecShift]>;
4011 def rm : PDI<opc, MRMSrcMem, (outs RC:$dst),
4012 (ins RC:$src1, i128mem:$src2),
4014 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4015 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4016 [(set RC:$dst, (DstVT (OpNode RC:$src1,
4017 (bc_frag (ld_frag addr:$src2)))))], itins.rm>,
4018 Sched<[WriteVecShiftLd, ReadAfterLd]>;
4019 def ri : PDIi8<opc2, ImmForm, (outs RC:$dst),
4020 (ins RC:$src1, u8imm:$src2),
4022 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4023 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4024 [(set RC:$dst, (DstVT (OpNode2 RC:$src1, (i8 imm:$src2))))], itins.ri>,
4025 Sched<[WriteVecShift]>;
4028 /// PDI_binop_rm2 - Simple SSE2 binary operator with different src and dst types
4029 multiclass PDI_binop_rm2<bits<8> opc, string OpcodeStr, SDNode OpNode,
4030 ValueType DstVT, ValueType SrcVT, RegisterClass RC,
4031 PatFrag memop_frag, X86MemOperand x86memop,
4033 bit IsCommutable = 0, bit Is2Addr = 1> {
4034 let isCommutable = IsCommutable in
4035 def rr : PDI<opc, MRMSrcReg, (outs RC:$dst),
4036 (ins RC:$src1, RC:$src2),
4038 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4039 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4040 [(set RC:$dst, (DstVT (OpNode (SrcVT RC:$src1), RC:$src2)))]>,
4041 Sched<[itins.Sched]>;
4042 def rm : PDI<opc, MRMSrcMem, (outs RC:$dst),
4043 (ins RC:$src1, x86memop:$src2),
4045 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4046 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4047 [(set RC:$dst, (DstVT (OpNode (SrcVT RC:$src1),
4048 (bitconvert (memop_frag addr:$src2)))))]>,
4049 Sched<[itins.Sched.Folded, ReadAfterLd]>;
4051 } // ExeDomain = SSEPackedInt
4053 defm PADDB : PDI_binop_all<0xFC, "paddb", add, v16i8, v32i8,
4054 SSE_INTALU_ITINS_P, 1>;
4055 defm PADDW : PDI_binop_all<0xFD, "paddw", add, v8i16, v16i16,
4056 SSE_INTALU_ITINS_P, 1>;
4057 defm PADDD : PDI_binop_all<0xFE, "paddd", add, v4i32, v8i32,
4058 SSE_INTALU_ITINS_P, 1>;
4059 defm PADDQ : PDI_binop_all<0xD4, "paddq", add, v2i64, v4i64,
4060 SSE_INTALUQ_ITINS_P, 1>;
4061 defm PMULLW : PDI_binop_all<0xD5, "pmullw", mul, v8i16, v16i16,
4062 SSE_INTMUL_ITINS_P, 1>;
4063 defm PMULHUW : PDI_binop_all<0xE4, "pmulhuw", mulhu, v8i16, v16i16,
4064 SSE_INTMUL_ITINS_P, 1>;
4065 defm PMULHW : PDI_binop_all<0xE5, "pmulhw", mulhs, v8i16, v16i16,
4066 SSE_INTMUL_ITINS_P, 1>;
4067 defm PSUBB : PDI_binop_all<0xF8, "psubb", sub, v16i8, v32i8,
4068 SSE_INTALU_ITINS_P, 0>;
4069 defm PSUBW : PDI_binop_all<0xF9, "psubw", sub, v8i16, v16i16,
4070 SSE_INTALU_ITINS_P, 0>;
4071 defm PSUBD : PDI_binop_all<0xFA, "psubd", sub, v4i32, v8i32,
4072 SSE_INTALU_ITINS_P, 0>;
4073 defm PSUBQ : PDI_binop_all<0xFB, "psubq", sub, v2i64, v4i64,
4074 SSE_INTALUQ_ITINS_P, 0>;
4075 defm PSUBUSB : PDI_binop_all<0xD8, "psubusb", X86subus, v16i8, v32i8,
4076 SSE_INTALU_ITINS_P, 0>;
4077 defm PSUBUSW : PDI_binop_all<0xD9, "psubusw", X86subus, v8i16, v16i16,
4078 SSE_INTALU_ITINS_P, 0>;
4079 defm PMINUB : PDI_binop_all<0xDA, "pminub", X86umin, v16i8, v32i8,
4080 SSE_INTALU_ITINS_P, 1>;
4081 defm PMINSW : PDI_binop_all<0xEA, "pminsw", X86smin, v8i16, v16i16,
4082 SSE_INTALU_ITINS_P, 1>;
4083 defm PMAXUB : PDI_binop_all<0xDE, "pmaxub", X86umax, v16i8, v32i8,
4084 SSE_INTALU_ITINS_P, 1>;
4085 defm PMAXSW : PDI_binop_all<0xEE, "pmaxsw", X86smax, v8i16, v16i16,
4086 SSE_INTALU_ITINS_P, 1>;
4089 defm PSUBSB : PDI_binop_all_int<0xE8, "psubsb", int_x86_sse2_psubs_b,
4090 int_x86_avx2_psubs_b, SSE_INTALU_ITINS_P, 0>;
4091 defm PSUBSW : PDI_binop_all_int<0xE9, "psubsw" , int_x86_sse2_psubs_w,
4092 int_x86_avx2_psubs_w, SSE_INTALU_ITINS_P, 0>;
4093 defm PADDSB : PDI_binop_all_int<0xEC, "paddsb" , int_x86_sse2_padds_b,
4094 int_x86_avx2_padds_b, SSE_INTALU_ITINS_P, 1>;
4095 defm PADDSW : PDI_binop_all_int<0xED, "paddsw" , int_x86_sse2_padds_w,
4096 int_x86_avx2_padds_w, SSE_INTALU_ITINS_P, 1>;
4097 defm PADDUSB : PDI_binop_all_int<0xDC, "paddusb", int_x86_sse2_paddus_b,
4098 int_x86_avx2_paddus_b, SSE_INTALU_ITINS_P, 1>;
4099 defm PADDUSW : PDI_binop_all_int<0xDD, "paddusw", int_x86_sse2_paddus_w,
4100 int_x86_avx2_paddus_w, SSE_INTALU_ITINS_P, 1>;
4101 defm PMADDWD : PDI_binop_all_int<0xF5, "pmaddwd", int_x86_sse2_pmadd_wd,
4102 int_x86_avx2_pmadd_wd, SSE_PMADD, 1>;
4103 defm PAVGB : PDI_binop_all_int<0xE0, "pavgb", int_x86_sse2_pavg_b,
4104 int_x86_avx2_pavg_b, SSE_INTALU_ITINS_P, 1>;
4105 defm PAVGW : PDI_binop_all_int<0xE3, "pavgw", int_x86_sse2_pavg_w,
4106 int_x86_avx2_pavg_w, SSE_INTALU_ITINS_P, 1>;
4107 defm PSADBW : PDI_binop_all_int<0xF6, "psadbw", int_x86_sse2_psad_bw,
4108 int_x86_avx2_psad_bw, SSE_PMADD, 1>;
4110 let Predicates = [HasAVX] in
4111 defm VPMULUDQ : PDI_binop_rm2<0xF4, "vpmuludq", X86pmuludq, v2i64, v4i32, VR128,
4112 loadv2i64, i128mem, SSE_INTMUL_ITINS_P, 1, 0>,
4114 let Predicates = [HasAVX2] in
4115 defm VPMULUDQY : PDI_binop_rm2<0xF4, "vpmuludq", X86pmuludq, v4i64, v8i32,
4116 VR256, loadv4i64, i256mem,
4117 SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V, VEX_L;
4118 let Constraints = "$src1 = $dst" in
4119 defm PMULUDQ : PDI_binop_rm2<0xF4, "pmuludq", X86pmuludq, v2i64, v4i32, VR128,
4120 memopv2i64, i128mem, SSE_INTMUL_ITINS_P, 1>;
4122 //===---------------------------------------------------------------------===//
4123 // SSE2 - Packed Integer Logical Instructions
4124 //===---------------------------------------------------------------------===//
4126 let Predicates = [HasAVX] in {
4127 defm VPSLLW : PDI_binop_rmi<0xF1, 0x71, MRM6r, "vpsllw", X86vshl, X86vshli,
4128 VR128, v8i16, v8i16, bc_v8i16, loadv2i64,
4129 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
4130 defm VPSLLD : PDI_binop_rmi<0xF2, 0x72, MRM6r, "vpslld", X86vshl, X86vshli,
4131 VR128, v4i32, v4i32, bc_v4i32, loadv2i64,
4132 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
4133 defm VPSLLQ : PDI_binop_rmi<0xF3, 0x73, MRM6r, "vpsllq", X86vshl, X86vshli,
4134 VR128, v2i64, v2i64, bc_v2i64, loadv2i64,
4135 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
4137 defm VPSRLW : PDI_binop_rmi<0xD1, 0x71, MRM2r, "vpsrlw", X86vsrl, X86vsrli,
4138 VR128, v8i16, v8i16, bc_v8i16, loadv2i64,
4139 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
4140 defm VPSRLD : PDI_binop_rmi<0xD2, 0x72, MRM2r, "vpsrld", X86vsrl, X86vsrli,
4141 VR128, v4i32, v4i32, bc_v4i32, loadv2i64,
4142 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
4143 defm VPSRLQ : PDI_binop_rmi<0xD3, 0x73, MRM2r, "vpsrlq", X86vsrl, X86vsrli,
4144 VR128, v2i64, v2i64, bc_v2i64, loadv2i64,
4145 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
4147 defm VPSRAW : PDI_binop_rmi<0xE1, 0x71, MRM4r, "vpsraw", X86vsra, X86vsrai,
4148 VR128, v8i16, v8i16, bc_v8i16, loadv2i64,
4149 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
4150 defm VPSRAD : PDI_binop_rmi<0xE2, 0x72, MRM4r, "vpsrad", X86vsra, X86vsrai,
4151 VR128, v4i32, v4i32, bc_v4i32, loadv2i64,
4152 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
4154 let ExeDomain = SSEPackedInt, SchedRW = [WriteVecShift] in {
4155 // 128-bit logical shifts.
4156 def VPSLLDQri : PDIi8<0x73, MRM7r,
4157 (outs VR128:$dst), (ins VR128:$src1, u8imm:$src2),
4158 "vpslldq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4160 (v2i64 (X86vshldq VR128:$src1, (i8 imm:$src2))))]>,
4162 def VPSRLDQri : PDIi8<0x73, MRM3r,
4163 (outs VR128:$dst), (ins VR128:$src1, u8imm:$src2),
4164 "vpsrldq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4166 (v2i64 (X86vshrdq VR128:$src1, (i8 imm:$src2))))]>,
4168 // PSRADQri doesn't exist in SSE[1-3].
4170 } // Predicates = [HasAVX]
4172 let Predicates = [HasAVX2] in {
4173 defm VPSLLWY : PDI_binop_rmi<0xF1, 0x71, MRM6r, "vpsllw", X86vshl, X86vshli,
4174 VR256, v16i16, v8i16, bc_v8i16, loadv2i64,
4175 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
4176 defm VPSLLDY : PDI_binop_rmi<0xF2, 0x72, MRM6r, "vpslld", X86vshl, X86vshli,
4177 VR256, v8i32, v4i32, bc_v4i32, loadv2i64,
4178 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
4179 defm VPSLLQY : PDI_binop_rmi<0xF3, 0x73, MRM6r, "vpsllq", X86vshl, X86vshli,
4180 VR256, v4i64, v2i64, bc_v2i64, loadv2i64,
4181 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
4183 defm VPSRLWY : PDI_binop_rmi<0xD1, 0x71, MRM2r, "vpsrlw", X86vsrl, X86vsrli,
4184 VR256, v16i16, v8i16, bc_v8i16, loadv2i64,
4185 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
4186 defm VPSRLDY : PDI_binop_rmi<0xD2, 0x72, MRM2r, "vpsrld", X86vsrl, X86vsrli,
4187 VR256, v8i32, v4i32, bc_v4i32, loadv2i64,
4188 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
4189 defm VPSRLQY : PDI_binop_rmi<0xD3, 0x73, MRM2r, "vpsrlq", X86vsrl, X86vsrli,
4190 VR256, v4i64, v2i64, bc_v2i64, loadv2i64,
4191 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
4193 defm VPSRAWY : PDI_binop_rmi<0xE1, 0x71, MRM4r, "vpsraw", X86vsra, X86vsrai,
4194 VR256, v16i16, v8i16, bc_v8i16, loadv2i64,
4195 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
4196 defm VPSRADY : PDI_binop_rmi<0xE2, 0x72, MRM4r, "vpsrad", X86vsra, X86vsrai,
4197 VR256, v8i32, v4i32, bc_v4i32, loadv2i64,
4198 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
4200 let ExeDomain = SSEPackedInt, SchedRW = [WriteVecShift], hasSideEffects = 0 in {
4201 // 256-bit logical shifts.
4202 def VPSLLDQYri : PDIi8<0x73, MRM7r,
4203 (outs VR256:$dst), (ins VR256:$src1, u8imm:$src2),
4204 "vpslldq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4206 (v4i64 (X86vshldq VR256:$src1, (i8 imm:$src2))))]>,
4208 def VPSRLDQYri : PDIi8<0x73, MRM3r,
4209 (outs VR256:$dst), (ins VR256:$src1, u8imm:$src2),
4210 "vpsrldq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4212 (v4i64 (X86vshrdq VR256:$src1, (i8 imm:$src2))))]>,
4214 // PSRADQYri doesn't exist in SSE[1-3].
4216 } // Predicates = [HasAVX2]
4218 let Constraints = "$src1 = $dst" in {
4219 defm PSLLW : PDI_binop_rmi<0xF1, 0x71, MRM6r, "psllw", X86vshl, X86vshli,
4220 VR128, v8i16, v8i16, bc_v8i16, memopv2i64,
4221 SSE_INTSHIFT_ITINS_P>;
4222 defm PSLLD : PDI_binop_rmi<0xF2, 0x72, MRM6r, "pslld", X86vshl, X86vshli,
4223 VR128, v4i32, v4i32, bc_v4i32, memopv2i64,
4224 SSE_INTSHIFT_ITINS_P>;
4225 defm PSLLQ : PDI_binop_rmi<0xF3, 0x73, MRM6r, "psllq", X86vshl, X86vshli,
4226 VR128, v2i64, v2i64, bc_v2i64, memopv2i64,
4227 SSE_INTSHIFT_ITINS_P>;
4229 defm PSRLW : PDI_binop_rmi<0xD1, 0x71, MRM2r, "psrlw", X86vsrl, X86vsrli,
4230 VR128, v8i16, v8i16, bc_v8i16, memopv2i64,
4231 SSE_INTSHIFT_ITINS_P>;
4232 defm PSRLD : PDI_binop_rmi<0xD2, 0x72, MRM2r, "psrld", X86vsrl, X86vsrli,
4233 VR128, v4i32, v4i32, bc_v4i32, memopv2i64,
4234 SSE_INTSHIFT_ITINS_P>;
4235 defm PSRLQ : PDI_binop_rmi<0xD3, 0x73, MRM2r, "psrlq", X86vsrl, X86vsrli,
4236 VR128, v2i64, v2i64, bc_v2i64, memopv2i64,
4237 SSE_INTSHIFT_ITINS_P>;
4239 defm PSRAW : PDI_binop_rmi<0xE1, 0x71, MRM4r, "psraw", X86vsra, X86vsrai,
4240 VR128, v8i16, v8i16, bc_v8i16, memopv2i64,
4241 SSE_INTSHIFT_ITINS_P>;
4242 defm PSRAD : PDI_binop_rmi<0xE2, 0x72, MRM4r, "psrad", X86vsra, X86vsrai,
4243 VR128, v4i32, v4i32, bc_v4i32, memopv2i64,
4244 SSE_INTSHIFT_ITINS_P>;
4246 let ExeDomain = SSEPackedInt, SchedRW = [WriteVecShift], hasSideEffects = 0 in {
4247 // 128-bit logical shifts.
4248 def PSLLDQri : PDIi8<0x73, MRM7r,
4249 (outs VR128:$dst), (ins VR128:$src1, u8imm:$src2),
4250 "pslldq\t{$src2, $dst|$dst, $src2}",
4252 (v2i64 (X86vshldq VR128:$src1, (i8 imm:$src2))))],
4253 IIC_SSE_INTSHDQ_P_RI>;
4254 def PSRLDQri : PDIi8<0x73, MRM3r,
4255 (outs VR128:$dst), (ins VR128:$src1, u8imm:$src2),
4256 "psrldq\t{$src2, $dst|$dst, $src2}",
4258 (v2i64 (X86vshrdq VR128:$src1, (i8 imm:$src2))))],
4259 IIC_SSE_INTSHDQ_P_RI>;
4260 // PSRADQri doesn't exist in SSE[1-3].
4262 } // Constraints = "$src1 = $dst"
4264 let Predicates = [HasAVX] in {
4265 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
4266 (VPSRLDQri VR128:$src1, (BYTE_imm imm:$src2))>;
4269 let Predicates = [UseSSE2] in {
4270 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
4271 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2))>;
4274 //===---------------------------------------------------------------------===//
4275 // SSE2 - Packed Integer Comparison Instructions
4276 //===---------------------------------------------------------------------===//
4278 defm PCMPEQB : PDI_binop_all<0x74, "pcmpeqb", X86pcmpeq, v16i8, v32i8,
4279 SSE_INTALU_ITINS_P, 1>;
4280 defm PCMPEQW : PDI_binop_all<0x75, "pcmpeqw", X86pcmpeq, v8i16, v16i16,
4281 SSE_INTALU_ITINS_P, 1>;
4282 defm PCMPEQD : PDI_binop_all<0x76, "pcmpeqd", X86pcmpeq, v4i32, v8i32,
4283 SSE_INTALU_ITINS_P, 1>;
4284 defm PCMPGTB : PDI_binop_all<0x64, "pcmpgtb", X86pcmpgt, v16i8, v32i8,
4285 SSE_INTALU_ITINS_P, 0>;
4286 defm PCMPGTW : PDI_binop_all<0x65, "pcmpgtw", X86pcmpgt, v8i16, v16i16,
4287 SSE_INTALU_ITINS_P, 0>;
4288 defm PCMPGTD : PDI_binop_all<0x66, "pcmpgtd", X86pcmpgt, v4i32, v8i32,
4289 SSE_INTALU_ITINS_P, 0>;
4291 //===---------------------------------------------------------------------===//
4292 // SSE2 - Packed Integer Shuffle Instructions
4293 //===---------------------------------------------------------------------===//
4295 let ExeDomain = SSEPackedInt in {
4296 multiclass sse2_pshuffle<string OpcodeStr, ValueType vt128, ValueType vt256,
4298 let Predicates = [HasAVX] in {
4299 def V#NAME#ri : Ii8<0x70, MRMSrcReg, (outs VR128:$dst),
4300 (ins VR128:$src1, u8imm:$src2),
4301 !strconcat("v", OpcodeStr,
4302 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4304 (vt128 (OpNode VR128:$src1, (i8 imm:$src2))))],
4305 IIC_SSE_PSHUF_RI>, VEX, Sched<[WriteShuffle]>;
4306 def V#NAME#mi : Ii8<0x70, MRMSrcMem, (outs VR128:$dst),
4307 (ins i128mem:$src1, u8imm:$src2),
4308 !strconcat("v", OpcodeStr,
4309 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4311 (vt128 (OpNode (bitconvert (loadv2i64 addr:$src1)),
4312 (i8 imm:$src2))))], IIC_SSE_PSHUF_MI>, VEX,
4313 Sched<[WriteShuffleLd]>;
4316 let Predicates = [HasAVX2] in {
4317 def V#NAME#Yri : Ii8<0x70, MRMSrcReg, (outs VR256:$dst),
4318 (ins VR256:$src1, u8imm:$src2),
4319 !strconcat("v", OpcodeStr,
4320 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4322 (vt256 (OpNode VR256:$src1, (i8 imm:$src2))))],
4323 IIC_SSE_PSHUF_RI>, VEX, VEX_L, Sched<[WriteShuffle]>;
4324 def V#NAME#Ymi : Ii8<0x70, MRMSrcMem, (outs VR256:$dst),
4325 (ins i256mem:$src1, u8imm:$src2),
4326 !strconcat("v", OpcodeStr,
4327 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4329 (vt256 (OpNode (bitconvert (loadv4i64 addr:$src1)),
4330 (i8 imm:$src2))))], IIC_SSE_PSHUF_MI>, VEX, VEX_L,
4331 Sched<[WriteShuffleLd]>;
4334 let Predicates = [UseSSE2] in {
4335 def ri : Ii8<0x70, MRMSrcReg,
4336 (outs VR128:$dst), (ins VR128:$src1, u8imm:$src2),
4337 !strconcat(OpcodeStr,
4338 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4340 (vt128 (OpNode VR128:$src1, (i8 imm:$src2))))],
4341 IIC_SSE_PSHUF_RI>, Sched<[WriteShuffle]>;
4342 def mi : Ii8<0x70, MRMSrcMem,
4343 (outs VR128:$dst), (ins i128mem:$src1, u8imm:$src2),
4344 !strconcat(OpcodeStr,
4345 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4347 (vt128 (OpNode (bitconvert (memopv2i64 addr:$src1)),
4348 (i8 imm:$src2))))], IIC_SSE_PSHUF_MI>,
4349 Sched<[WriteShuffleLd, ReadAfterLd]>;
4352 } // ExeDomain = SSEPackedInt
4354 defm PSHUFD : sse2_pshuffle<"pshufd", v4i32, v8i32, X86PShufd>, PD;
4355 defm PSHUFHW : sse2_pshuffle<"pshufhw", v8i16, v16i16, X86PShufhw>, XS;
4356 defm PSHUFLW : sse2_pshuffle<"pshuflw", v8i16, v16i16, X86PShuflw>, XD;
4358 let Predicates = [HasAVX] in {
4359 def : Pat<(v4f32 (X86PShufd (loadv4f32 addr:$src1), (i8 imm:$imm))),
4360 (VPSHUFDmi addr:$src1, imm:$imm)>;
4361 def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
4362 (VPSHUFDri VR128:$src1, imm:$imm)>;
4365 let Predicates = [UseSSE2] in {
4366 def : Pat<(v4f32 (X86PShufd (memopv4f32 addr:$src1), (i8 imm:$imm))),
4367 (PSHUFDmi addr:$src1, imm:$imm)>;
4368 def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
4369 (PSHUFDri VR128:$src1, imm:$imm)>;
4372 //===---------------------------------------------------------------------===//
4373 // Packed Integer Pack Instructions (SSE & AVX)
4374 //===---------------------------------------------------------------------===//
4376 let ExeDomain = SSEPackedInt in {
4377 multiclass sse2_pack<bits<8> opc, string OpcodeStr, ValueType OutVT,
4378 ValueType ArgVT, SDNode OpNode, PatFrag bc_frag,
4379 PatFrag ld_frag, bit Is2Addr = 1> {
4380 def rr : PDI<opc, MRMSrcReg,
4381 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
4383 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4384 !strconcat(OpcodeStr,
4385 "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4387 (OutVT (OpNode (ArgVT VR128:$src1), VR128:$src2)))]>,
4388 Sched<[WriteShuffle]>;
4389 def rm : PDI<opc, MRMSrcMem,
4390 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
4392 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4393 !strconcat(OpcodeStr,
4394 "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4396 (OutVT (OpNode VR128:$src1,
4397 (bc_frag (ld_frag addr:$src2)))))]>,
4398 Sched<[WriteShuffleLd, ReadAfterLd]>;
4401 multiclass sse2_pack_y<bits<8> opc, string OpcodeStr, ValueType OutVT,
4402 ValueType ArgVT, SDNode OpNode, PatFrag bc_frag> {
4403 def Yrr : PDI<opc, MRMSrcReg,
4404 (outs VR256:$dst), (ins VR256:$src1, VR256:$src2),
4405 !strconcat(OpcodeStr,
4406 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4408 (OutVT (OpNode (ArgVT VR256:$src1), VR256:$src2)))]>,
4409 Sched<[WriteShuffle]>;
4410 def Yrm : PDI<opc, MRMSrcMem,
4411 (outs VR256:$dst), (ins VR256:$src1, i256mem:$src2),
4412 !strconcat(OpcodeStr,
4413 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4415 (OutVT (OpNode VR256:$src1,
4416 (bc_frag (loadv4i64 addr:$src2)))))]>,
4417 Sched<[WriteShuffleLd, ReadAfterLd]>;
4420 multiclass sse4_pack<bits<8> opc, string OpcodeStr, ValueType OutVT,
4421 ValueType ArgVT, SDNode OpNode, PatFrag bc_frag,
4422 PatFrag ld_frag, bit Is2Addr = 1> {
4423 def rr : SS48I<opc, MRMSrcReg,
4424 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
4426 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4427 !strconcat(OpcodeStr,
4428 "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4430 (OutVT (OpNode (ArgVT VR128:$src1), VR128:$src2)))]>,
4431 Sched<[WriteShuffle]>;
4432 def rm : SS48I<opc, MRMSrcMem,
4433 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
4435 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4436 !strconcat(OpcodeStr,
4437 "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4439 (OutVT (OpNode VR128:$src1,
4440 (bc_frag (ld_frag addr:$src2)))))]>,
4441 Sched<[WriteShuffleLd, ReadAfterLd]>;
4444 multiclass sse4_pack_y<bits<8> opc, string OpcodeStr, ValueType OutVT,
4445 ValueType ArgVT, SDNode OpNode, PatFrag bc_frag> {
4446 def Yrr : SS48I<opc, MRMSrcReg,
4447 (outs VR256:$dst), (ins VR256:$src1, VR256:$src2),
4448 !strconcat(OpcodeStr,
4449 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4451 (OutVT (OpNode (ArgVT VR256:$src1), VR256:$src2)))]>,
4452 Sched<[WriteShuffle]>;
4453 def Yrm : SS48I<opc, MRMSrcMem,
4454 (outs VR256:$dst), (ins VR256:$src1, i256mem:$src2),
4455 !strconcat(OpcodeStr,
4456 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4458 (OutVT (OpNode VR256:$src1,
4459 (bc_frag (loadv4i64 addr:$src2)))))]>,
4460 Sched<[WriteShuffleLd, ReadAfterLd]>;
4463 let Predicates = [HasAVX] in {
4464 defm VPACKSSWB : sse2_pack<0x63, "vpacksswb", v16i8, v8i16, X86Packss,
4465 bc_v8i16, loadv2i64, 0>, VEX_4V;
4466 defm VPACKSSDW : sse2_pack<0x6B, "vpackssdw", v8i16, v4i32, X86Packss,
4467 bc_v4i32, loadv2i64, 0>, VEX_4V;
4469 defm VPACKUSWB : sse2_pack<0x67, "vpackuswb", v16i8, v8i16, X86Packus,
4470 bc_v8i16, loadv2i64, 0>, VEX_4V;
4471 defm VPACKUSDW : sse4_pack<0x2B, "vpackusdw", v8i16, v4i32, X86Packus,
4472 bc_v4i32, loadv2i64, 0>, VEX_4V;
4475 let Predicates = [HasAVX2] in {
4476 defm VPACKSSWB : sse2_pack_y<0x63, "vpacksswb", v32i8, v16i16, X86Packss,
4477 bc_v16i16>, VEX_4V, VEX_L;
4478 defm VPACKSSDW : sse2_pack_y<0x6B, "vpackssdw", v16i16, v8i32, X86Packss,
4479 bc_v8i32>, VEX_4V, VEX_L;
4481 defm VPACKUSWB : sse2_pack_y<0x67, "vpackuswb", v32i8, v16i16, X86Packus,
4482 bc_v16i16>, VEX_4V, VEX_L;
4483 defm VPACKUSDW : sse4_pack_y<0x2B, "vpackusdw", v16i16, v8i32, X86Packus,
4484 bc_v8i32>, VEX_4V, VEX_L;
4487 let Constraints = "$src1 = $dst" in {
4488 defm PACKSSWB : sse2_pack<0x63, "packsswb", v16i8, v8i16, X86Packss,
4489 bc_v8i16, memopv2i64>;
4490 defm PACKSSDW : sse2_pack<0x6B, "packssdw", v8i16, v4i32, X86Packss,
4491 bc_v4i32, memopv2i64>;
4493 defm PACKUSWB : sse2_pack<0x67, "packuswb", v16i8, v8i16, X86Packus,
4494 bc_v8i16, memopv2i64>;
4496 let Predicates = [HasSSE41] in
4497 defm PACKUSDW : sse4_pack<0x2B, "packusdw", v8i16, v4i32, X86Packus,
4498 bc_v4i32, memopv2i64>;
4500 } // ExeDomain = SSEPackedInt
4502 //===---------------------------------------------------------------------===//
4503 // SSE2 - Packed Integer Unpack Instructions
4504 //===---------------------------------------------------------------------===//
4506 let ExeDomain = SSEPackedInt in {
4507 multiclass sse2_unpack<bits<8> opc, string OpcodeStr, ValueType vt,
4508 SDNode OpNode, PatFrag bc_frag, PatFrag ld_frag,
4510 def rr : PDI<opc, MRMSrcReg,
4511 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
4513 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
4514 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4515 [(set VR128:$dst, (vt (OpNode VR128:$src1, VR128:$src2)))],
4516 IIC_SSE_UNPCK>, Sched<[WriteShuffle]>;
4517 def rm : PDI<opc, MRMSrcMem,
4518 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
4520 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
4521 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4522 [(set VR128:$dst, (OpNode VR128:$src1,
4523 (bc_frag (ld_frag addr:$src2))))],
4525 Sched<[WriteShuffleLd, ReadAfterLd]>;
4528 multiclass sse2_unpack_y<bits<8> opc, string OpcodeStr, ValueType vt,
4529 SDNode OpNode, PatFrag bc_frag> {
4530 def Yrr : PDI<opc, MRMSrcReg,
4531 (outs VR256:$dst), (ins VR256:$src1, VR256:$src2),
4532 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4533 [(set VR256:$dst, (vt (OpNode VR256:$src1, VR256:$src2)))]>,
4534 Sched<[WriteShuffle]>;
4535 def Yrm : PDI<opc, MRMSrcMem,
4536 (outs VR256:$dst), (ins VR256:$src1, i256mem:$src2),
4537 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4538 [(set VR256:$dst, (OpNode VR256:$src1,
4539 (bc_frag (loadv4i64 addr:$src2))))]>,
4540 Sched<[WriteShuffleLd, ReadAfterLd]>;
4543 let Predicates = [HasAVX] in {
4544 defm VPUNPCKLBW : sse2_unpack<0x60, "vpunpcklbw", v16i8, X86Unpckl,
4545 bc_v16i8, loadv2i64, 0>, VEX_4V;
4546 defm VPUNPCKLWD : sse2_unpack<0x61, "vpunpcklwd", v8i16, X86Unpckl,
4547 bc_v8i16, loadv2i64, 0>, VEX_4V;
4548 defm VPUNPCKLDQ : sse2_unpack<0x62, "vpunpckldq", v4i32, X86Unpckl,
4549 bc_v4i32, loadv2i64, 0>, VEX_4V;
4550 defm VPUNPCKLQDQ : sse2_unpack<0x6C, "vpunpcklqdq", v2i64, X86Unpckl,
4551 bc_v2i64, loadv2i64, 0>, VEX_4V;
4553 defm VPUNPCKHBW : sse2_unpack<0x68, "vpunpckhbw", v16i8, X86Unpckh,
4554 bc_v16i8, loadv2i64, 0>, VEX_4V;
4555 defm VPUNPCKHWD : sse2_unpack<0x69, "vpunpckhwd", v8i16, X86Unpckh,
4556 bc_v8i16, loadv2i64, 0>, VEX_4V;
4557 defm VPUNPCKHDQ : sse2_unpack<0x6A, "vpunpckhdq", v4i32, X86Unpckh,
4558 bc_v4i32, loadv2i64, 0>, VEX_4V;
4559 defm VPUNPCKHQDQ : sse2_unpack<0x6D, "vpunpckhqdq", v2i64, X86Unpckh,
4560 bc_v2i64, loadv2i64, 0>, VEX_4V;
4563 let Predicates = [HasAVX2] in {
4564 defm VPUNPCKLBW : sse2_unpack_y<0x60, "vpunpcklbw", v32i8, X86Unpckl,
4565 bc_v32i8>, VEX_4V, VEX_L;
4566 defm VPUNPCKLWD : sse2_unpack_y<0x61, "vpunpcklwd", v16i16, X86Unpckl,
4567 bc_v16i16>, VEX_4V, VEX_L;
4568 defm VPUNPCKLDQ : sse2_unpack_y<0x62, "vpunpckldq", v8i32, X86Unpckl,
4569 bc_v8i32>, VEX_4V, VEX_L;
4570 defm VPUNPCKLQDQ : sse2_unpack_y<0x6C, "vpunpcklqdq", v4i64, X86Unpckl,
4571 bc_v4i64>, VEX_4V, VEX_L;
4573 defm VPUNPCKHBW : sse2_unpack_y<0x68, "vpunpckhbw", v32i8, X86Unpckh,
4574 bc_v32i8>, VEX_4V, VEX_L;
4575 defm VPUNPCKHWD : sse2_unpack_y<0x69, "vpunpckhwd", v16i16, X86Unpckh,
4576 bc_v16i16>, VEX_4V, VEX_L;
4577 defm VPUNPCKHDQ : sse2_unpack_y<0x6A, "vpunpckhdq", v8i32, X86Unpckh,
4578 bc_v8i32>, VEX_4V, VEX_L;
4579 defm VPUNPCKHQDQ : sse2_unpack_y<0x6D, "vpunpckhqdq", v4i64, X86Unpckh,
4580 bc_v4i64>, VEX_4V, VEX_L;
4583 let Constraints = "$src1 = $dst" in {
4584 defm PUNPCKLBW : sse2_unpack<0x60, "punpcklbw", v16i8, X86Unpckl,
4585 bc_v16i8, memopv2i64>;
4586 defm PUNPCKLWD : sse2_unpack<0x61, "punpcklwd", v8i16, X86Unpckl,
4587 bc_v8i16, memopv2i64>;
4588 defm PUNPCKLDQ : sse2_unpack<0x62, "punpckldq", v4i32, X86Unpckl,
4589 bc_v4i32, memopv2i64>;
4590 defm PUNPCKLQDQ : sse2_unpack<0x6C, "punpcklqdq", v2i64, X86Unpckl,
4591 bc_v2i64, memopv2i64>;
4593 defm PUNPCKHBW : sse2_unpack<0x68, "punpckhbw", v16i8, X86Unpckh,
4594 bc_v16i8, memopv2i64>;
4595 defm PUNPCKHWD : sse2_unpack<0x69, "punpckhwd", v8i16, X86Unpckh,
4596 bc_v8i16, memopv2i64>;
4597 defm PUNPCKHDQ : sse2_unpack<0x6A, "punpckhdq", v4i32, X86Unpckh,
4598 bc_v4i32, memopv2i64>;
4599 defm PUNPCKHQDQ : sse2_unpack<0x6D, "punpckhqdq", v2i64, X86Unpckh,
4600 bc_v2i64, memopv2i64>;
4602 } // ExeDomain = SSEPackedInt
4604 //===---------------------------------------------------------------------===//
4605 // SSE2 - Packed Integer Extract and Insert
4606 //===---------------------------------------------------------------------===//
4608 let ExeDomain = SSEPackedInt in {
4609 multiclass sse2_pinsrw<bit Is2Addr = 1> {
4610 def rri : Ii8<0xC4, MRMSrcReg,
4611 (outs VR128:$dst), (ins VR128:$src1,
4612 GR32orGR64:$src2, u8imm:$src3),
4614 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
4615 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4617 (X86pinsrw VR128:$src1, GR32orGR64:$src2, imm:$src3))],
4618 IIC_SSE_PINSRW>, Sched<[WriteShuffle]>;
4619 def rmi : Ii8<0xC4, MRMSrcMem,
4620 (outs VR128:$dst), (ins VR128:$src1,
4621 i16mem:$src2, u8imm:$src3),
4623 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
4624 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4626 (X86pinsrw VR128:$src1, (extloadi16 addr:$src2),
4627 imm:$src3))], IIC_SSE_PINSRW>,
4628 Sched<[WriteShuffleLd, ReadAfterLd]>;
4632 let Predicates = [HasAVX] in
4633 def VPEXTRWri : Ii8<0xC5, MRMSrcReg,
4634 (outs GR32orGR64:$dst), (ins VR128:$src1, u8imm:$src2),
4635 "vpextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4636 [(set GR32orGR64:$dst, (X86pextrw (v8i16 VR128:$src1),
4637 imm:$src2))]>, PD, VEX,
4638 Sched<[WriteShuffle]>;
4639 def PEXTRWri : PDIi8<0xC5, MRMSrcReg,
4640 (outs GR32orGR64:$dst), (ins VR128:$src1, u8imm:$src2),
4641 "pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4642 [(set GR32orGR64:$dst, (X86pextrw (v8i16 VR128:$src1),
4643 imm:$src2))], IIC_SSE_PEXTRW>,
4644 Sched<[WriteShuffleLd, ReadAfterLd]>;
4647 let Predicates = [HasAVX] in
4648 defm VPINSRW : sse2_pinsrw<0>, PD, VEX_4V;
4650 let Predicates = [UseSSE2], Constraints = "$src1 = $dst" in
4651 defm PINSRW : sse2_pinsrw, PD;
4653 } // ExeDomain = SSEPackedInt
4655 //===---------------------------------------------------------------------===//
4656 // SSE2 - Packed Mask Creation
4657 //===---------------------------------------------------------------------===//
4659 let ExeDomain = SSEPackedInt, SchedRW = [WriteVecLogic] in {
4661 def VPMOVMSKBrr : VPDI<0xD7, MRMSrcReg, (outs GR32orGR64:$dst),
4663 "pmovmskb\t{$src, $dst|$dst, $src}",
4664 [(set GR32orGR64:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))],
4665 IIC_SSE_MOVMSK>, VEX;
4667 let Predicates = [HasAVX2] in {
4668 def VPMOVMSKBYrr : VPDI<0xD7, MRMSrcReg, (outs GR32orGR64:$dst),
4670 "pmovmskb\t{$src, $dst|$dst, $src}",
4671 [(set GR32orGR64:$dst, (int_x86_avx2_pmovmskb VR256:$src))]>,
4675 def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32orGR64:$dst), (ins VR128:$src),
4676 "pmovmskb\t{$src, $dst|$dst, $src}",
4677 [(set GR32orGR64:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))],
4680 } // ExeDomain = SSEPackedInt
4682 //===---------------------------------------------------------------------===//
4683 // SSE2 - Conditional Store
4684 //===---------------------------------------------------------------------===//
4686 let ExeDomain = SSEPackedInt, SchedRW = [WriteStore] in {
4688 let Uses = [EDI], Predicates = [HasAVX,Not64BitMode] in
4689 def VMASKMOVDQU : VPDI<0xF7, MRMSrcReg, (outs),
4690 (ins VR128:$src, VR128:$mask),
4691 "maskmovdqu\t{$mask, $src|$src, $mask}",
4692 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)],
4693 IIC_SSE_MASKMOV>, VEX;
4694 let Uses = [RDI], Predicates = [HasAVX,In64BitMode] in
4695 def VMASKMOVDQU64 : VPDI<0xF7, MRMSrcReg, (outs),
4696 (ins VR128:$src, VR128:$mask),
4697 "maskmovdqu\t{$mask, $src|$src, $mask}",
4698 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)],
4699 IIC_SSE_MASKMOV>, VEX;
4701 let Uses = [EDI], Predicates = [UseSSE2,Not64BitMode] in
4702 def MASKMOVDQU : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
4703 "maskmovdqu\t{$mask, $src|$src, $mask}",
4704 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)],
4706 let Uses = [RDI], Predicates = [UseSSE2,In64BitMode] in
4707 def MASKMOVDQU64 : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
4708 "maskmovdqu\t{$mask, $src|$src, $mask}",
4709 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)],
4712 } // ExeDomain = SSEPackedInt
4714 //===---------------------------------------------------------------------===//
4715 // SSE2 - Move Doubleword
4716 //===---------------------------------------------------------------------===//
4718 //===---------------------------------------------------------------------===//
4719 // Move Int Doubleword to Packed Double Int
4721 def VMOVDI2PDIrr : VS2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
4722 "movd\t{$src, $dst|$dst, $src}",
4724 (v4i32 (scalar_to_vector GR32:$src)))], IIC_SSE_MOVDQ>,
4725 VEX, Sched<[WriteMove]>;
4726 def VMOVDI2PDIrm : VS2I<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
4727 "movd\t{$src, $dst|$dst, $src}",
4729 (v4i32 (scalar_to_vector (loadi32 addr:$src))))],
4731 VEX, Sched<[WriteLoad]>;
4732 def VMOV64toPQIrr : VRS2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
4733 "movq\t{$src, $dst|$dst, $src}",
4735 (v2i64 (scalar_to_vector GR64:$src)))],
4736 IIC_SSE_MOVDQ>, VEX, Sched<[WriteMove]>;
4737 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0, mayLoad = 1 in
4738 def VMOV64toPQIrm : VRS2I<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
4739 "movq\t{$src, $dst|$dst, $src}",
4740 [], IIC_SSE_MOVDQ>, VEX, Sched<[WriteLoad]>;
4741 let isCodeGenOnly = 1 in
4742 def VMOV64toSDrr : VRS2I<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
4743 "movq\t{$src, $dst|$dst, $src}",
4744 [(set FR64:$dst, (bitconvert GR64:$src))],
4745 IIC_SSE_MOVDQ>, VEX, Sched<[WriteMove]>;
4747 def MOVDI2PDIrr : S2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
4748 "movd\t{$src, $dst|$dst, $src}",
4750 (v4i32 (scalar_to_vector GR32:$src)))], IIC_SSE_MOVDQ>,
4752 def MOVDI2PDIrm : S2I<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
4753 "movd\t{$src, $dst|$dst, $src}",
4755 (v4i32 (scalar_to_vector (loadi32 addr:$src))))],
4756 IIC_SSE_MOVDQ>, Sched<[WriteLoad]>;
4757 def MOV64toPQIrr : RS2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
4758 "mov{d|q}\t{$src, $dst|$dst, $src}",
4760 (v2i64 (scalar_to_vector GR64:$src)))],
4761 IIC_SSE_MOVDQ>, Sched<[WriteMove]>;
4762 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0, mayLoad = 1 in
4763 def MOV64toPQIrm : RS2I<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
4764 "mov{d|q}\t{$src, $dst|$dst, $src}",
4765 [], IIC_SSE_MOVDQ>, Sched<[WriteLoad]>;
4766 let isCodeGenOnly = 1 in
4767 def MOV64toSDrr : RS2I<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
4768 "mov{d|q}\t{$src, $dst|$dst, $src}",
4769 [(set FR64:$dst, (bitconvert GR64:$src))],
4770 IIC_SSE_MOVDQ>, Sched<[WriteMove]>;
4772 //===---------------------------------------------------------------------===//
4773 // Move Int Doubleword to Single Scalar
4775 let isCodeGenOnly = 1 in {
4776 def VMOVDI2SSrr : VS2I<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
4777 "movd\t{$src, $dst|$dst, $src}",
4778 [(set FR32:$dst, (bitconvert GR32:$src))],
4779 IIC_SSE_MOVDQ>, VEX, Sched<[WriteMove]>;
4781 def VMOVDI2SSrm : VS2I<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
4782 "movd\t{$src, $dst|$dst, $src}",
4783 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))],
4785 VEX, Sched<[WriteLoad]>;
4786 def MOVDI2SSrr : S2I<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
4787 "movd\t{$src, $dst|$dst, $src}",
4788 [(set FR32:$dst, (bitconvert GR32:$src))],
4789 IIC_SSE_MOVDQ>, Sched<[WriteMove]>;
4791 def MOVDI2SSrm : S2I<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
4792 "movd\t{$src, $dst|$dst, $src}",
4793 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))],
4794 IIC_SSE_MOVDQ>, Sched<[WriteLoad]>;
4797 //===---------------------------------------------------------------------===//
4798 // Move Packed Doubleword Int to Packed Double Int
4800 def VMOVPDI2DIrr : VS2I<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
4801 "movd\t{$src, $dst|$dst, $src}",
4802 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
4803 (iPTR 0)))], IIC_SSE_MOVD_ToGP>, VEX,
4805 def VMOVPDI2DImr : VS2I<0x7E, MRMDestMem, (outs),
4806 (ins i32mem:$dst, VR128:$src),
4807 "movd\t{$src, $dst|$dst, $src}",
4808 [(store (i32 (vector_extract (v4i32 VR128:$src),
4809 (iPTR 0))), addr:$dst)], IIC_SSE_MOVDQ>,
4810 VEX, Sched<[WriteStore]>;
4811 def MOVPDI2DIrr : S2I<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
4812 "movd\t{$src, $dst|$dst, $src}",
4813 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
4814 (iPTR 0)))], IIC_SSE_MOVD_ToGP>,
4816 def MOVPDI2DImr : S2I<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src),
4817 "movd\t{$src, $dst|$dst, $src}",
4818 [(store (i32 (vector_extract (v4i32 VR128:$src),
4819 (iPTR 0))), addr:$dst)],
4820 IIC_SSE_MOVDQ>, Sched<[WriteStore]>;
4822 def : Pat<(v8i32 (X86Vinsert (v8i32 immAllZerosV), GR32:$src2, (iPTR 0))),
4823 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIrr GR32:$src2), sub_xmm)>;
4825 def : Pat<(v4i64 (X86Vinsert (bc_v4i64 (v8i32 immAllZerosV)), GR64:$src2, (iPTR 0))),
4826 (SUBREG_TO_REG (i32 0), (VMOV64toPQIrr GR64:$src2), sub_xmm)>;
4828 def : Pat<(v8i32 (X86Vinsert undef, GR32:$src2, (iPTR 0))),
4829 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIrr GR32:$src2), sub_xmm)>;
4831 def : Pat<(v4i64 (X86Vinsert undef, GR64:$src2, (iPTR 0))),
4832 (SUBREG_TO_REG (i32 0), (VMOV64toPQIrr GR64:$src2), sub_xmm)>;
4834 //===---------------------------------------------------------------------===//
4835 // Move Packed Doubleword Int first element to Doubleword Int
4837 let SchedRW = [WriteMove] in {
4838 def VMOVPQIto64rr : VRS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
4839 "movq\t{$src, $dst|$dst, $src}",
4840 [(set GR64:$dst, (vector_extract (v2i64 VR128:$src),
4845 def MOVPQIto64rr : RS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
4846 "mov{d|q}\t{$src, $dst|$dst, $src}",
4847 [(set GR64:$dst, (vector_extract (v2i64 VR128:$src),
4852 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0, mayStore = 1 in
4853 def VMOVPQIto64rm : VRS2I<0x7E, MRMDestMem, (outs i64mem:$dst),
4854 (ins VR128:$src), "movq\t{$src, $dst|$dst, $src}",
4855 [], IIC_SSE_MOVDQ>, VEX, Sched<[WriteStore]>;
4856 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0, mayStore = 1 in
4857 def MOVPQIto64rm : RS2I<0x7E, MRMDestMem, (outs i64mem:$dst), (ins VR128:$src),
4858 "mov{d|q}\t{$src, $dst|$dst, $src}",
4859 [], IIC_SSE_MOVDQ>, Sched<[WriteStore]>;
4861 //===---------------------------------------------------------------------===//
4862 // Bitcast FR64 <-> GR64
4864 let isCodeGenOnly = 1 in {
4865 let Predicates = [UseAVX] in
4866 def VMOV64toSDrm : VS2SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
4867 "movq\t{$src, $dst|$dst, $src}",
4868 [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>,
4869 VEX, Sched<[WriteLoad]>;
4870 def VMOVSDto64rr : VRS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
4871 "movq\t{$src, $dst|$dst, $src}",
4872 [(set GR64:$dst, (bitconvert FR64:$src))],
4873 IIC_SSE_MOVDQ>, VEX, Sched<[WriteMove]>;
4874 def VMOVSDto64mr : VRS2I<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
4875 "movq\t{$src, $dst|$dst, $src}",
4876 [(store (i64 (bitconvert FR64:$src)), addr:$dst)],
4877 IIC_SSE_MOVDQ>, VEX, Sched<[WriteStore]>;
4879 def MOV64toSDrm : S2SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
4880 "movq\t{$src, $dst|$dst, $src}",
4881 [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))],
4882 IIC_SSE_MOVDQ>, Sched<[WriteLoad]>;
4883 def MOVSDto64rr : RS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
4884 "mov{d|q}\t{$src, $dst|$dst, $src}",
4885 [(set GR64:$dst, (bitconvert FR64:$src))],
4886 IIC_SSE_MOVD_ToGP>, Sched<[WriteMove]>;
4887 def MOVSDto64mr : RS2I<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
4888 "movq\t{$src, $dst|$dst, $src}",
4889 [(store (i64 (bitconvert FR64:$src)), addr:$dst)],
4890 IIC_SSE_MOVDQ>, Sched<[WriteStore]>;
4893 //===---------------------------------------------------------------------===//
4894 // Move Scalar Single to Double Int
4896 let isCodeGenOnly = 1 in {
4897 def VMOVSS2DIrr : VS2I<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
4898 "movd\t{$src, $dst|$dst, $src}",
4899 [(set GR32:$dst, (bitconvert FR32:$src))],
4900 IIC_SSE_MOVD_ToGP>, VEX, Sched<[WriteMove]>;
4901 def VMOVSS2DImr : VS2I<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
4902 "movd\t{$src, $dst|$dst, $src}",
4903 [(store (i32 (bitconvert FR32:$src)), addr:$dst)],
4904 IIC_SSE_MOVDQ>, VEX, Sched<[WriteStore]>;
4905 def MOVSS2DIrr : S2I<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
4906 "movd\t{$src, $dst|$dst, $src}",
4907 [(set GR32:$dst, (bitconvert FR32:$src))],
4908 IIC_SSE_MOVD_ToGP>, Sched<[WriteMove]>;
4909 def MOVSS2DImr : S2I<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
4910 "movd\t{$src, $dst|$dst, $src}",
4911 [(store (i32 (bitconvert FR32:$src)), addr:$dst)],
4912 IIC_SSE_MOVDQ>, Sched<[WriteStore]>;
4915 //===---------------------------------------------------------------------===//
4916 // Patterns and instructions to describe movd/movq to XMM register zero-extends
4918 let isCodeGenOnly = 1, SchedRW = [WriteMove] in {
4919 let AddedComplexity = 15 in {
4920 def VMOVZQI2PQIrr : VS2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
4921 "movq\t{$src, $dst|$dst, $src}", // X86-64 only
4922 [(set VR128:$dst, (v2i64 (X86vzmovl
4923 (v2i64 (scalar_to_vector GR64:$src)))))],
4926 def MOVZQI2PQIrr : RS2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
4927 "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
4928 [(set VR128:$dst, (v2i64 (X86vzmovl
4929 (v2i64 (scalar_to_vector GR64:$src)))))],
4932 } // isCodeGenOnly, SchedRW
4934 let Predicates = [UseAVX] in {
4935 let AddedComplexity = 15 in
4936 def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector GR32:$src)))),
4937 (VMOVDI2PDIrr GR32:$src)>;
4939 // AVX 128-bit movd/movq instruction write zeros in the high 128-bit part.
4940 let AddedComplexity = 20 in {
4941 def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector (loadi32 addr:$src))))),
4942 (VMOVDI2PDIrm addr:$src)>;
4943 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
4944 (VMOVDI2PDIrm addr:$src)>;
4945 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
4946 (VMOVDI2PDIrm addr:$src)>;
4948 // Use regular 128-bit instructions to match 256-bit scalar_to_vec+zext.
4949 def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
4950 (v4i32 (scalar_to_vector GR32:$src)),(iPTR 0)))),
4951 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIrr GR32:$src), sub_xmm)>;
4952 def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
4953 (v2i64 (scalar_to_vector GR64:$src)),(iPTR 0)))),
4954 (SUBREG_TO_REG (i64 0), (VMOVZQI2PQIrr GR64:$src), sub_xmm)>;
4957 let Predicates = [UseSSE2] in {
4958 let AddedComplexity = 15 in
4959 def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector GR32:$src)))),
4960 (MOVDI2PDIrr GR32:$src)>;
4962 let AddedComplexity = 20 in {
4963 def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector (loadi32 addr:$src))))),
4964 (MOVDI2PDIrm addr:$src)>;
4965 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
4966 (MOVDI2PDIrm addr:$src)>;
4967 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
4968 (MOVDI2PDIrm addr:$src)>;
4972 // These are the correct encodings of the instructions so that we know how to
4973 // read correct assembly, even though we continue to emit the wrong ones for
4974 // compatibility with Darwin's buggy assembler.
4975 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
4976 (MOV64toPQIrr VR128:$dst, GR64:$src), 0>;
4977 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
4978 (MOVPQIto64rr GR64:$dst, VR128:$src), 0>;
4979 // Allow "vmovd" but print "vmovq" since we don't need compatibility for AVX.
4980 def : InstAlias<"vmovd\t{$src, $dst|$dst, $src}",
4981 (VMOV64toPQIrr VR128:$dst, GR64:$src), 0>;
4982 def : InstAlias<"vmovd\t{$src, $dst|$dst, $src}",
4983 (VMOVPQIto64rr GR64:$dst, VR128:$src), 0>;
4985 //===---------------------------------------------------------------------===//
4986 // SSE2 - Move Quadword
4987 //===---------------------------------------------------------------------===//
4989 //===---------------------------------------------------------------------===//
4990 // Move Quadword Int to Packed Quadword Int
4993 let ExeDomain = SSEPackedInt, SchedRW = [WriteLoad] in {
4994 def VMOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
4995 "vmovq\t{$src, $dst|$dst, $src}",
4997 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
4998 VEX, Requires<[UseAVX]>;
4999 def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
5000 "movq\t{$src, $dst|$dst, $src}",
5002 (v2i64 (scalar_to_vector (loadi64 addr:$src))))],
5004 Requires<[UseSSE2]>; // SSE2 instruction with XS Prefix
5005 } // ExeDomain, SchedRW
5007 //===---------------------------------------------------------------------===//
5008 // Move Packed Quadword Int to Quadword Int
5010 let ExeDomain = SSEPackedInt, SchedRW = [WriteStore] in {
5011 def VMOVPQI2QImr : VS2I<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
5012 "movq\t{$src, $dst|$dst, $src}",
5013 [(store (i64 (vector_extract (v2i64 VR128:$src),
5014 (iPTR 0))), addr:$dst)],
5015 IIC_SSE_MOVDQ>, VEX;
5016 def MOVPQI2QImr : S2I<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
5017 "movq\t{$src, $dst|$dst, $src}",
5018 [(store (i64 (vector_extract (v2i64 VR128:$src),
5019 (iPTR 0))), addr:$dst)],
5021 } // ExeDomain, SchedRW
5023 // For disassembler only
5024 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
5025 SchedRW = [WriteVecLogic] in {
5026 def VMOVPQI2QIrr : VS2I<0xD6, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
5027 "movq\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVQ_RR>, VEX;
5028 def MOVPQI2QIrr : S2I<0xD6, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
5029 "movq\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVQ_RR>;
5032 //===---------------------------------------------------------------------===//
5033 // Store / copy lower 64-bits of a XMM register.
5035 let Predicates = [UseAVX] in
5036 def : Pat<(int_x86_sse2_storel_dq addr:$dst, VR128:$src),
5037 (VMOVPQI2QImr addr:$dst, VR128:$src)>;
5038 let Predicates = [UseSSE2] in
5039 def : Pat<(int_x86_sse2_storel_dq addr:$dst, VR128:$src),
5040 (MOVPQI2QImr addr:$dst, VR128:$src)>;
5042 let ExeDomain = SSEPackedInt, isCodeGenOnly = 1, AddedComplexity = 20 in {
5043 def VMOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
5044 "vmovq\t{$src, $dst|$dst, $src}",
5046 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
5047 (loadi64 addr:$src))))))],
5049 XS, VEX, Requires<[UseAVX]>, Sched<[WriteLoad]>;
5051 def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
5052 "movq\t{$src, $dst|$dst, $src}",
5054 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
5055 (loadi64 addr:$src))))))],
5057 XS, Requires<[UseSSE2]>, Sched<[WriteLoad]>;
5058 } // ExeDomain, isCodeGenOnly, AddedComplexity
5060 let Predicates = [UseAVX], AddedComplexity = 20 in {
5061 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4f32 addr:$src)))),
5062 (VMOVZQI2PQIrm addr:$src)>;
5063 def : Pat<(v2i64 (X86vzload addr:$src)),
5064 (VMOVZQI2PQIrm addr:$src)>;
5067 let Predicates = [UseSSE2], AddedComplexity = 20 in {
5068 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4f32 addr:$src)))),
5069 (MOVZQI2PQIrm addr:$src)>;
5070 def : Pat<(v2i64 (X86vzload addr:$src)), (MOVZQI2PQIrm addr:$src)>;
5073 let Predicates = [HasAVX] in {
5074 def : Pat<(v4i64 (alignedX86vzload addr:$src)),
5075 (SUBREG_TO_REG (i32 0), (VMOVAPSrm addr:$src), sub_xmm)>;
5076 def : Pat<(v4i64 (X86vzload addr:$src)),
5077 (SUBREG_TO_REG (i32 0), (VMOVUPSrm addr:$src), sub_xmm)>;
5080 //===---------------------------------------------------------------------===//
5081 // Moving from XMM to XMM and clear upper 64 bits. Note, there is a bug in
5082 // IA32 document. movq xmm1, xmm2 does clear the high bits.
5084 let ExeDomain = SSEPackedInt, SchedRW = [WriteVecLogic] in {
5085 let AddedComplexity = 15 in
5086 def VMOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
5087 "vmovq\t{$src, $dst|$dst, $src}",
5088 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))],
5090 XS, VEX, Requires<[UseAVX]>;
5091 let AddedComplexity = 15 in
5092 def MOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
5093 "movq\t{$src, $dst|$dst, $src}",
5094 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))],
5096 XS, Requires<[UseSSE2]>;
5097 } // ExeDomain, SchedRW
5099 let ExeDomain = SSEPackedInt, isCodeGenOnly = 1, SchedRW = [WriteVecLogicLd] in {
5100 let AddedComplexity = 20 in
5101 def VMOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
5102 "vmovq\t{$src, $dst|$dst, $src}",
5103 [(set VR128:$dst, (v2i64 (X86vzmovl
5104 (loadv2i64 addr:$src))))],
5106 XS, VEX, Requires<[UseAVX]>;
5107 let AddedComplexity = 20 in {
5108 def MOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
5109 "movq\t{$src, $dst|$dst, $src}",
5110 [(set VR128:$dst, (v2i64 (X86vzmovl
5111 (loadv2i64 addr:$src))))],
5113 XS, Requires<[UseSSE2]>;
5115 } // ExeDomain, isCodeGenOnly, SchedRW
5117 let AddedComplexity = 20 in {
5118 let Predicates = [UseAVX] in {
5119 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
5120 (VMOVZPQILo2PQIrr VR128:$src)>;
5122 let Predicates = [UseSSE2] in {
5123 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
5124 (MOVZPQILo2PQIrr VR128:$src)>;
5128 //===---------------------------------------------------------------------===//
5129 // SSE3 - Replicate Single FP - MOVSHDUP and MOVSLDUP
5130 //===---------------------------------------------------------------------===//
5131 multiclass sse3_replicate_sfp<bits<8> op, SDNode OpNode, string OpcodeStr,
5132 ValueType vt, RegisterClass RC, PatFrag mem_frag,
5133 X86MemOperand x86memop> {
5134 def rr : S3SI<op, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
5135 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5136 [(set RC:$dst, (vt (OpNode RC:$src)))],
5137 IIC_SSE_MOV_LH>, Sched<[WriteFShuffle]>;
5138 def rm : S3SI<op, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
5139 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5140 [(set RC:$dst, (OpNode (mem_frag addr:$src)))],
5141 IIC_SSE_MOV_LH>, Sched<[WriteLoad]>;
5144 let Predicates = [HasAVX] in {
5145 defm VMOVSHDUP : sse3_replicate_sfp<0x16, X86Movshdup, "vmovshdup",
5146 v4f32, VR128, loadv4f32, f128mem>, VEX;
5147 defm VMOVSLDUP : sse3_replicate_sfp<0x12, X86Movsldup, "vmovsldup",
5148 v4f32, VR128, loadv4f32, f128mem>, VEX;
5149 defm VMOVSHDUPY : sse3_replicate_sfp<0x16, X86Movshdup, "vmovshdup",
5150 v8f32, VR256, loadv8f32, f256mem>, VEX, VEX_L;
5151 defm VMOVSLDUPY : sse3_replicate_sfp<0x12, X86Movsldup, "vmovsldup",
5152 v8f32, VR256, loadv8f32, f256mem>, VEX, VEX_L;
5154 defm MOVSHDUP : sse3_replicate_sfp<0x16, X86Movshdup, "movshdup", v4f32, VR128,
5155 memopv4f32, f128mem>;
5156 defm MOVSLDUP : sse3_replicate_sfp<0x12, X86Movsldup, "movsldup", v4f32, VR128,
5157 memopv4f32, f128mem>;
5159 let Predicates = [HasAVX] in {
5160 def : Pat<(v4i32 (X86Movshdup VR128:$src)),
5161 (VMOVSHDUPrr VR128:$src)>;
5162 def : Pat<(v4i32 (X86Movshdup (bc_v4i32 (loadv2i64 addr:$src)))),
5163 (VMOVSHDUPrm addr:$src)>;
5164 def : Pat<(v4i32 (X86Movsldup VR128:$src)),
5165 (VMOVSLDUPrr VR128:$src)>;
5166 def : Pat<(v4i32 (X86Movsldup (bc_v4i32 (loadv2i64 addr:$src)))),
5167 (VMOVSLDUPrm addr:$src)>;
5168 def : Pat<(v8i32 (X86Movshdup VR256:$src)),
5169 (VMOVSHDUPYrr VR256:$src)>;
5170 def : Pat<(v8i32 (X86Movshdup (bc_v8i32 (loadv4i64 addr:$src)))),
5171 (VMOVSHDUPYrm addr:$src)>;
5172 def : Pat<(v8i32 (X86Movsldup VR256:$src)),
5173 (VMOVSLDUPYrr VR256:$src)>;
5174 def : Pat<(v8i32 (X86Movsldup (bc_v8i32 (loadv4i64 addr:$src)))),
5175 (VMOVSLDUPYrm addr:$src)>;
5178 let Predicates = [UseSSE3] in {
5179 def : Pat<(v4i32 (X86Movshdup VR128:$src)),
5180 (MOVSHDUPrr VR128:$src)>;
5181 def : Pat<(v4i32 (X86Movshdup (bc_v4i32 (memopv2i64 addr:$src)))),
5182 (MOVSHDUPrm addr:$src)>;
5183 def : Pat<(v4i32 (X86Movsldup VR128:$src)),
5184 (MOVSLDUPrr VR128:$src)>;
5185 def : Pat<(v4i32 (X86Movsldup (bc_v4i32 (memopv2i64 addr:$src)))),
5186 (MOVSLDUPrm addr:$src)>;
5189 //===---------------------------------------------------------------------===//
5190 // SSE3 - Replicate Double FP - MOVDDUP
5191 //===---------------------------------------------------------------------===//
5193 multiclass sse3_replicate_dfp<string OpcodeStr> {
5194 def rr : S3DI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
5195 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5196 [(set VR128:$dst, (v2f64 (X86Movddup VR128:$src)))],
5197 IIC_SSE_MOV_LH>, Sched<[WriteFShuffle]>;
5198 def rm : S3DI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
5199 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5202 (scalar_to_vector (loadf64 addr:$src)))))],
5203 IIC_SSE_MOV_LH>, Sched<[WriteLoad]>;
5206 // FIXME: Merge with above classe when there're patterns for the ymm version
5207 multiclass sse3_replicate_dfp_y<string OpcodeStr> {
5208 def rr : S3DI<0x12, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
5209 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5210 [(set VR256:$dst, (v4f64 (X86Movddup VR256:$src)))]>,
5211 Sched<[WriteFShuffle]>;
5212 def rm : S3DI<0x12, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
5213 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5216 (scalar_to_vector (loadf64 addr:$src)))))]>,
5220 let Predicates = [HasAVX] in {
5221 defm VMOVDDUP : sse3_replicate_dfp<"vmovddup">, VEX;
5222 defm VMOVDDUPY : sse3_replicate_dfp_y<"vmovddup">, VEX, VEX_L;
5225 defm MOVDDUP : sse3_replicate_dfp<"movddup">;
5227 let Predicates = [HasAVX] in {
5228 def : Pat<(X86Movddup (loadv2f64 addr:$src)),
5229 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5230 def : Pat<(X86Movddup (bc_v2f64 (loadv4f32 addr:$src))),
5231 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5232 def : Pat<(X86Movddup (bc_v2f64 (loadv2i64 addr:$src))),
5233 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5234 def : Pat<(X86Movddup (bc_v2f64
5235 (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
5236 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5239 def : Pat<(X86Movddup (loadv4f64 addr:$src)),
5240 (VMOVDDUPYrm addr:$src)>;
5241 def : Pat<(X86Movddup (loadv4i64 addr:$src)),
5242 (VMOVDDUPYrm addr:$src)>;
5243 def : Pat<(X86Movddup (v4i64 (scalar_to_vector (loadi64 addr:$src)))),
5244 (VMOVDDUPYrm addr:$src)>;
5245 def : Pat<(X86Movddup (v4i64 VR256:$src)),
5246 (VMOVDDUPYrr VR256:$src)>;
5249 let Predicates = [UseAVX, OptForSize] in {
5250 def : Pat<(v2f64 (X86VBroadcast (loadf64 addr:$src))),
5251 (VMOVDDUPrm addr:$src)>;
5252 def : Pat<(v2i64 (X86VBroadcast (loadi64 addr:$src))),
5253 (VMOVDDUPrm addr:$src)>;
5256 let Predicates = [UseSSE3] in {
5257 def : Pat<(X86Movddup (memopv2f64 addr:$src)),
5258 (MOVDDUPrm addr:$src)>;
5259 def : Pat<(X86Movddup (bc_v2f64 (memopv4f32 addr:$src))),
5260 (MOVDDUPrm addr:$src)>;
5261 def : Pat<(X86Movddup (bc_v2f64 (memopv2i64 addr:$src))),
5262 (MOVDDUPrm addr:$src)>;
5263 def : Pat<(X86Movddup (bc_v2f64
5264 (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
5265 (MOVDDUPrm addr:$src)>;
5268 //===---------------------------------------------------------------------===//
5269 // SSE3 - Move Unaligned Integer
5270 //===---------------------------------------------------------------------===//
5272 let SchedRW = [WriteLoad] in {
5273 let Predicates = [HasAVX] in {
5274 def VLDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
5275 "vlddqu\t{$src, $dst|$dst, $src}",
5276 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>, VEX;
5277 def VLDDQUYrm : S3DI<0xF0, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
5278 "vlddqu\t{$src, $dst|$dst, $src}",
5279 [(set VR256:$dst, (int_x86_avx_ldu_dq_256 addr:$src))]>,
5282 def LDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
5283 "lddqu\t{$src, $dst|$dst, $src}",
5284 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))],
5288 //===---------------------------------------------------------------------===//
5289 // SSE3 - Arithmetic
5290 //===---------------------------------------------------------------------===//
5292 multiclass sse3_addsub<Intrinsic Int, string OpcodeStr, RegisterClass RC,
5293 X86MemOperand x86memop, OpndItins itins,
5294 PatFrag ld_frag, bit Is2Addr = 1> {
5295 def rr : I<0xD0, MRMSrcReg,
5296 (outs RC:$dst), (ins RC:$src1, RC:$src2),
5298 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5299 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5300 [(set RC:$dst, (Int RC:$src1, RC:$src2))], itins.rr>,
5301 Sched<[itins.Sched]>;
5302 def rm : I<0xD0, MRMSrcMem,
5303 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
5305 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5306 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5307 [(set RC:$dst, (Int RC:$src1, (ld_frag addr:$src2)))], itins.rr>,
5308 Sched<[itins.Sched.Folded, ReadAfterLd]>;
5311 let Predicates = [HasAVX] in {
5312 let ExeDomain = SSEPackedSingle in {
5313 defm VADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "vaddsubps", VR128,
5314 f128mem, SSE_ALU_F32P, loadv4f32, 0>, XD, VEX_4V;
5315 defm VADDSUBPSY : sse3_addsub<int_x86_avx_addsub_ps_256, "vaddsubps", VR256,
5316 f256mem, SSE_ALU_F32P, loadv8f32, 0>, XD, VEX_4V, VEX_L;
5318 let ExeDomain = SSEPackedDouble in {
5319 defm VADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "vaddsubpd", VR128,
5320 f128mem, SSE_ALU_F64P, loadv2f64, 0>, PD, VEX_4V;
5321 defm VADDSUBPDY : sse3_addsub<int_x86_avx_addsub_pd_256, "vaddsubpd", VR256,
5322 f256mem, SSE_ALU_F64P, loadv4f64, 0>, PD, VEX_4V, VEX_L;
5325 let Constraints = "$src1 = $dst", Predicates = [UseSSE3] in {
5326 let ExeDomain = SSEPackedSingle in
5327 defm ADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "addsubps", VR128,
5328 f128mem, SSE_ALU_F32P, memopv4f32>, XD;
5329 let ExeDomain = SSEPackedDouble in
5330 defm ADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "addsubpd", VR128,
5331 f128mem, SSE_ALU_F64P, memopv2f64>, PD;
5334 // Patterns used to select 'addsub' instructions.
5335 let Predicates = [HasAVX] in {
5336 def : Pat<(v4f32 (X86Addsub (v4f32 VR128:$lhs), (v4f32 VR128:$rhs))),
5337 (VADDSUBPSrr VR128:$lhs, VR128:$rhs)>;
5338 def : Pat<(v4f32 (X86Addsub (v4f32 VR128:$lhs), (loadv4f32 addr:$rhs))),
5339 (VADDSUBPSrm VR128:$lhs, f128mem:$rhs)>;
5340 def : Pat<(v2f64 (X86Addsub (v2f64 VR128:$lhs), (v2f64 VR128:$rhs))),
5341 (VADDSUBPDrr VR128:$lhs, VR128:$rhs)>;
5342 def : Pat<(v2f64 (X86Addsub (v2f64 VR128:$lhs), (loadv2f64 addr:$rhs))),
5343 (VADDSUBPDrm VR128:$lhs, f128mem:$rhs)>;
5345 def : Pat<(v8f32 (X86Addsub (v8f32 VR256:$lhs), (v8f32 VR256:$rhs))),
5346 (VADDSUBPSYrr VR256:$lhs, VR256:$rhs)>;
5347 def : Pat<(v8f32 (X86Addsub (v8f32 VR256:$lhs), (loadv8f32 addr:$rhs))),
5348 (VADDSUBPSYrm VR256:$lhs, f256mem:$rhs)>;
5349 def : Pat<(v4f64 (X86Addsub (v4f64 VR256:$lhs), (v4f64 VR256:$rhs))),
5350 (VADDSUBPDYrr VR256:$lhs, VR256:$rhs)>;
5351 def : Pat<(v4f64 (X86Addsub (v4f64 VR256:$lhs), (loadv4f64 addr:$rhs))),
5352 (VADDSUBPDYrm VR256:$lhs, f256mem:$rhs)>;
5355 let Predicates = [UseSSE3] in {
5356 def : Pat<(v4f32 (X86Addsub (v4f32 VR128:$lhs), (v4f32 VR128:$rhs))),
5357 (ADDSUBPSrr VR128:$lhs, VR128:$rhs)>;
5358 def : Pat<(v4f32 (X86Addsub (v4f32 VR128:$lhs), (memopv4f32 addr:$rhs))),
5359 (ADDSUBPSrm VR128:$lhs, f128mem:$rhs)>;
5360 def : Pat<(v2f64 (X86Addsub (v2f64 VR128:$lhs), (v2f64 VR128:$rhs))),
5361 (ADDSUBPDrr VR128:$lhs, VR128:$rhs)>;
5362 def : Pat<(v2f64 (X86Addsub (v2f64 VR128:$lhs), (memopv2f64 addr:$rhs))),
5363 (ADDSUBPDrm VR128:$lhs, f128mem:$rhs)>;
5366 //===---------------------------------------------------------------------===//
5367 // SSE3 Instructions
5368 //===---------------------------------------------------------------------===//
5371 multiclass S3D_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
5372 X86MemOperand x86memop, SDNode OpNode, PatFrag ld_frag,
5374 def rr : S3DI<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
5376 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5377 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5378 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], IIC_SSE_HADDSUB_RR>,
5381 def rm : S3DI<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
5383 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5384 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5385 [(set RC:$dst, (vt (OpNode RC:$src1, (ld_frag addr:$src2))))],
5386 IIC_SSE_HADDSUB_RM>, Sched<[WriteFAddLd, ReadAfterLd]>;
5388 multiclass S3_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
5389 X86MemOperand x86memop, SDNode OpNode, PatFrag ld_frag,
5391 def rr : S3I<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
5393 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5394 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5395 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], IIC_SSE_HADDSUB_RR>,
5398 def rm : S3I<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
5400 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5401 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5402 [(set RC:$dst, (vt (OpNode RC:$src1, (ld_frag addr:$src2))))],
5403 IIC_SSE_HADDSUB_RM>, Sched<[WriteFAddLd, ReadAfterLd]>;
5406 let Predicates = [HasAVX] in {
5407 let ExeDomain = SSEPackedSingle in {
5408 defm VHADDPS : S3D_Int<0x7C, "vhaddps", v4f32, VR128, f128mem,
5409 X86fhadd, loadv4f32, 0>, VEX_4V;
5410 defm VHSUBPS : S3D_Int<0x7D, "vhsubps", v4f32, VR128, f128mem,
5411 X86fhsub, loadv4f32, 0>, VEX_4V;
5412 defm VHADDPSY : S3D_Int<0x7C, "vhaddps", v8f32, VR256, f256mem,
5413 X86fhadd, loadv8f32, 0>, VEX_4V, VEX_L;
5414 defm VHSUBPSY : S3D_Int<0x7D, "vhsubps", v8f32, VR256, f256mem,
5415 X86fhsub, loadv8f32, 0>, VEX_4V, VEX_L;
5417 let ExeDomain = SSEPackedDouble in {
5418 defm VHADDPD : S3_Int <0x7C, "vhaddpd", v2f64, VR128, f128mem,
5419 X86fhadd, loadv2f64, 0>, VEX_4V;
5420 defm VHSUBPD : S3_Int <0x7D, "vhsubpd", v2f64, VR128, f128mem,
5421 X86fhsub, loadv2f64, 0>, VEX_4V;
5422 defm VHADDPDY : S3_Int <0x7C, "vhaddpd", v4f64, VR256, f256mem,
5423 X86fhadd, loadv4f64, 0>, VEX_4V, VEX_L;
5424 defm VHSUBPDY : S3_Int <0x7D, "vhsubpd", v4f64, VR256, f256mem,
5425 X86fhsub, loadv4f64, 0>, VEX_4V, VEX_L;
5429 let Constraints = "$src1 = $dst" in {
5430 let ExeDomain = SSEPackedSingle in {
5431 defm HADDPS : S3D_Int<0x7C, "haddps", v4f32, VR128, f128mem, X86fhadd,
5433 defm HSUBPS : S3D_Int<0x7D, "hsubps", v4f32, VR128, f128mem, X86fhsub,
5436 let ExeDomain = SSEPackedDouble in {
5437 defm HADDPD : S3_Int<0x7C, "haddpd", v2f64, VR128, f128mem, X86fhadd,
5439 defm HSUBPD : S3_Int<0x7D, "hsubpd", v2f64, VR128, f128mem, X86fhsub,
5444 //===---------------------------------------------------------------------===//
5445 // SSSE3 - Packed Absolute Instructions
5446 //===---------------------------------------------------------------------===//
5449 /// SS3I_unop_rm_int - Simple SSSE3 unary op whose type can be v*{i8,i16,i32}.
5450 multiclass SS3I_unop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId128,
5452 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
5454 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5455 [(set VR128:$dst, (IntId128 VR128:$src))], IIC_SSE_PABS_RR>,
5456 Sched<[WriteVecALU]>;
5458 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
5460 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5463 (bitconvert (ld_frag addr:$src))))], IIC_SSE_PABS_RM>,
5464 Sched<[WriteVecALULd]>;
5467 /// SS3I_unop_rm_int_y - Simple SSSE3 unary op whose type can be v*{i8,i16,i32}.
5468 multiclass SS3I_unop_rm_int_y<bits<8> opc, string OpcodeStr,
5469 Intrinsic IntId256> {
5470 def rr256 : SS38I<opc, MRMSrcReg, (outs VR256:$dst),
5472 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5473 [(set VR256:$dst, (IntId256 VR256:$src))]>,
5474 Sched<[WriteVecALU]>;
5476 def rm256 : SS38I<opc, MRMSrcMem, (outs VR256:$dst),
5478 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5481 (bitconvert (loadv4i64 addr:$src))))]>,
5482 Sched<[WriteVecALULd]>;
5485 // Helper fragments to match sext vXi1 to vXiY.
5486 def v16i1sextv16i8 : PatLeaf<(v16i8 (X86pcmpgt (bc_v16i8 (v4i32 immAllZerosV)),
5488 def v8i1sextv8i16 : PatLeaf<(v8i16 (X86vsrai VR128:$src, (i8 15)))>;
5489 def v4i1sextv4i32 : PatLeaf<(v4i32 (X86vsrai VR128:$src, (i8 31)))>;
5490 def v32i1sextv32i8 : PatLeaf<(v32i8 (X86pcmpgt (bc_v32i8 (v8i32 immAllZerosV)),
5492 def v16i1sextv16i16: PatLeaf<(v16i16 (X86vsrai VR256:$src, (i8 15)))>;
5493 def v8i1sextv8i32 : PatLeaf<(v8i32 (X86vsrai VR256:$src, (i8 31)))>;
5495 let Predicates = [HasAVX] in {
5496 defm VPABSB : SS3I_unop_rm_int<0x1C, "vpabsb", int_x86_ssse3_pabs_b_128,
5498 defm VPABSW : SS3I_unop_rm_int<0x1D, "vpabsw", int_x86_ssse3_pabs_w_128,
5500 defm VPABSD : SS3I_unop_rm_int<0x1E, "vpabsd", int_x86_ssse3_pabs_d_128,
5504 (bc_v2i64 (v16i1sextv16i8)),
5505 (bc_v2i64 (add (v16i8 VR128:$src), (v16i1sextv16i8)))),
5506 (VPABSBrr128 VR128:$src)>;
5508 (bc_v2i64 (v8i1sextv8i16)),
5509 (bc_v2i64 (add (v8i16 VR128:$src), (v8i1sextv8i16)))),
5510 (VPABSWrr128 VR128:$src)>;
5512 (bc_v2i64 (v4i1sextv4i32)),
5513 (bc_v2i64 (add (v4i32 VR128:$src), (v4i1sextv4i32)))),
5514 (VPABSDrr128 VR128:$src)>;
5517 let Predicates = [HasAVX2] in {
5518 defm VPABSB : SS3I_unop_rm_int_y<0x1C, "vpabsb",
5519 int_x86_avx2_pabs_b>, VEX, VEX_L;
5520 defm VPABSW : SS3I_unop_rm_int_y<0x1D, "vpabsw",
5521 int_x86_avx2_pabs_w>, VEX, VEX_L;
5522 defm VPABSD : SS3I_unop_rm_int_y<0x1E, "vpabsd",
5523 int_x86_avx2_pabs_d>, VEX, VEX_L;
5526 (bc_v4i64 (v32i1sextv32i8)),
5527 (bc_v4i64 (add (v32i8 VR256:$src), (v32i1sextv32i8)))),
5528 (VPABSBrr256 VR256:$src)>;
5530 (bc_v4i64 (v16i1sextv16i16)),
5531 (bc_v4i64 (add (v16i16 VR256:$src), (v16i1sextv16i16)))),
5532 (VPABSWrr256 VR256:$src)>;
5534 (bc_v4i64 (v8i1sextv8i32)),
5535 (bc_v4i64 (add (v8i32 VR256:$src), (v8i1sextv8i32)))),
5536 (VPABSDrr256 VR256:$src)>;
5539 defm PABSB : SS3I_unop_rm_int<0x1C, "pabsb", int_x86_ssse3_pabs_b_128,
5541 defm PABSW : SS3I_unop_rm_int<0x1D, "pabsw", int_x86_ssse3_pabs_w_128,
5543 defm PABSD : SS3I_unop_rm_int<0x1E, "pabsd", int_x86_ssse3_pabs_d_128,
5546 let Predicates = [HasSSSE3] in {
5548 (bc_v2i64 (v16i1sextv16i8)),
5549 (bc_v2i64 (add (v16i8 VR128:$src), (v16i1sextv16i8)))),
5550 (PABSBrr128 VR128:$src)>;
5552 (bc_v2i64 (v8i1sextv8i16)),
5553 (bc_v2i64 (add (v8i16 VR128:$src), (v8i1sextv8i16)))),
5554 (PABSWrr128 VR128:$src)>;
5556 (bc_v2i64 (v4i1sextv4i32)),
5557 (bc_v2i64 (add (v4i32 VR128:$src), (v4i1sextv4i32)))),
5558 (PABSDrr128 VR128:$src)>;
5561 //===---------------------------------------------------------------------===//
5562 // SSSE3 - Packed Binary Operator Instructions
5563 //===---------------------------------------------------------------------===//
5565 let Sched = WriteVecALU in {
5566 def SSE_PHADDSUBD : OpndItins<
5567 IIC_SSE_PHADDSUBD_RR, IIC_SSE_PHADDSUBD_RM
5569 def SSE_PHADDSUBSW : OpndItins<
5570 IIC_SSE_PHADDSUBSW_RR, IIC_SSE_PHADDSUBSW_RM
5572 def SSE_PHADDSUBW : OpndItins<
5573 IIC_SSE_PHADDSUBW_RR, IIC_SSE_PHADDSUBW_RM
5576 let Sched = WriteShuffle in
5577 def SSE_PSHUFB : OpndItins<
5578 IIC_SSE_PSHUFB_RR, IIC_SSE_PSHUFB_RM
5580 let Sched = WriteVecALU in
5581 def SSE_PSIGN : OpndItins<
5582 IIC_SSE_PSIGN_RR, IIC_SSE_PSIGN_RM
5584 let Sched = WriteVecIMul in
5585 def SSE_PMULHRSW : OpndItins<
5586 IIC_SSE_PMULHRSW, IIC_SSE_PMULHRSW
5589 /// SS3I_binop_rm - Simple SSSE3 bin op
5590 multiclass SS3I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
5591 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
5592 X86MemOperand x86memop, OpndItins itins,
5594 let isCommutable = 1 in
5595 def rr : SS38I<opc, MRMSrcReg, (outs RC:$dst),
5596 (ins RC:$src1, RC:$src2),
5598 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5599 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5600 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2)))], itins.rr>,
5601 Sched<[itins.Sched]>;
5602 def rm : SS38I<opc, MRMSrcMem, (outs RC:$dst),
5603 (ins RC:$src1, x86memop:$src2),
5605 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5606 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5608 (OpVT (OpNode RC:$src1,
5609 (bitconvert (memop_frag addr:$src2)))))], itins.rm>,
5610 Sched<[itins.Sched.Folded, ReadAfterLd]>;
5613 /// SS3I_binop_rm_int - Simple SSSE3 bin op whose type can be v*{i8,i16,i32}.
5614 multiclass SS3I_binop_rm_int<bits<8> opc, string OpcodeStr,
5615 Intrinsic IntId128, OpndItins itins,
5616 PatFrag ld_frag, bit Is2Addr = 1> {
5617 let isCommutable = 1 in
5618 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
5619 (ins VR128:$src1, VR128:$src2),
5621 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5622 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5623 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
5624 Sched<[itins.Sched]>;
5625 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
5626 (ins VR128:$src1, i128mem:$src2),
5628 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5629 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5631 (IntId128 VR128:$src1,
5632 (bitconvert (ld_frag addr:$src2))))]>,
5633 Sched<[itins.Sched.Folded, ReadAfterLd]>;
5636 multiclass SS3I_binop_rm_int_y<bits<8> opc, string OpcodeStr,
5638 X86FoldableSchedWrite Sched> {
5639 let isCommutable = 1 in
5640 def rr256 : SS38I<opc, MRMSrcReg, (outs VR256:$dst),
5641 (ins VR256:$src1, VR256:$src2),
5642 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5643 [(set VR256:$dst, (IntId256 VR256:$src1, VR256:$src2))]>,
5645 def rm256 : SS38I<opc, MRMSrcMem, (outs VR256:$dst),
5646 (ins VR256:$src1, i256mem:$src2),
5647 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5649 (IntId256 VR256:$src1, (bitconvert (loadv4i64 addr:$src2))))]>,
5650 Sched<[Sched.Folded, ReadAfterLd]>;
5653 let ImmT = NoImm, Predicates = [HasAVX] in {
5654 let isCommutable = 0 in {
5655 defm VPHADDW : SS3I_binop_rm<0x01, "vphaddw", X86hadd, v8i16, VR128,
5657 SSE_PHADDSUBW, 0>, VEX_4V;
5658 defm VPHADDD : SS3I_binop_rm<0x02, "vphaddd", X86hadd, v4i32, VR128,
5660 SSE_PHADDSUBD, 0>, VEX_4V;
5661 defm VPHSUBW : SS3I_binop_rm<0x05, "vphsubw", X86hsub, v8i16, VR128,
5663 SSE_PHADDSUBW, 0>, VEX_4V;
5664 defm VPHSUBD : SS3I_binop_rm<0x06, "vphsubd", X86hsub, v4i32, VR128,
5666 SSE_PHADDSUBD, 0>, VEX_4V;
5667 defm VPSIGNB : SS3I_binop_rm<0x08, "vpsignb", X86psign, v16i8, VR128,
5669 SSE_PSIGN, 0>, VEX_4V;
5670 defm VPSIGNW : SS3I_binop_rm<0x09, "vpsignw", X86psign, v8i16, VR128,
5672 SSE_PSIGN, 0>, VEX_4V;
5673 defm VPSIGND : SS3I_binop_rm<0x0A, "vpsignd", X86psign, v4i32, VR128,
5675 SSE_PSIGN, 0>, VEX_4V;
5676 defm VPSHUFB : SS3I_binop_rm<0x00, "vpshufb", X86pshufb, v16i8, VR128,
5678 SSE_PSHUFB, 0>, VEX_4V;
5679 defm VPHADDSW : SS3I_binop_rm_int<0x03, "vphaddsw",
5680 int_x86_ssse3_phadd_sw_128,
5681 SSE_PHADDSUBSW, loadv2i64, 0>, VEX_4V;
5682 defm VPHSUBSW : SS3I_binop_rm_int<0x07, "vphsubsw",
5683 int_x86_ssse3_phsub_sw_128,
5684 SSE_PHADDSUBSW, loadv2i64, 0>, VEX_4V;
5685 defm VPMADDUBSW : SS3I_binop_rm_int<0x04, "vpmaddubsw",
5686 int_x86_ssse3_pmadd_ub_sw_128,
5687 SSE_PMADD, loadv2i64, 0>, VEX_4V;
5689 defm VPMULHRSW : SS3I_binop_rm_int<0x0B, "vpmulhrsw",
5690 int_x86_ssse3_pmul_hr_sw_128,
5691 SSE_PMULHRSW, loadv2i64, 0>, VEX_4V;
5694 let ImmT = NoImm, Predicates = [HasAVX2] in {
5695 let isCommutable = 0 in {
5696 defm VPHADDWY : SS3I_binop_rm<0x01, "vphaddw", X86hadd, v16i16, VR256,
5698 SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
5699 defm VPHADDDY : SS3I_binop_rm<0x02, "vphaddd", X86hadd, v8i32, VR256,
5701 SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
5702 defm VPHSUBWY : SS3I_binop_rm<0x05, "vphsubw", X86hsub, v16i16, VR256,
5704 SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
5705 defm VPHSUBDY : SS3I_binop_rm<0x06, "vphsubd", X86hsub, v8i32, VR256,
5707 SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
5708 defm VPSIGNBY : SS3I_binop_rm<0x08, "vpsignb", X86psign, v32i8, VR256,
5710 SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
5711 defm VPSIGNWY : SS3I_binop_rm<0x09, "vpsignw", X86psign, v16i16, VR256,
5713 SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
5714 defm VPSIGNDY : SS3I_binop_rm<0x0A, "vpsignd", X86psign, v8i32, VR256,
5716 SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
5717 defm VPSHUFBY : SS3I_binop_rm<0x00, "vpshufb", X86pshufb, v32i8, VR256,
5719 SSE_PSHUFB, 0>, VEX_4V, VEX_L;
5720 defm VPHADDSW : SS3I_binop_rm_int_y<0x03, "vphaddsw",
5721 int_x86_avx2_phadd_sw,
5722 WriteVecALU>, VEX_4V, VEX_L;
5723 defm VPHSUBSW : SS3I_binop_rm_int_y<0x07, "vphsubsw",
5724 int_x86_avx2_phsub_sw,
5725 WriteVecALU>, VEX_4V, VEX_L;
5726 defm VPMADDUBSW : SS3I_binop_rm_int_y<0x04, "vpmaddubsw",
5727 int_x86_avx2_pmadd_ub_sw,
5728 WriteVecIMul>, VEX_4V, VEX_L;
5730 defm VPMULHRSW : SS3I_binop_rm_int_y<0x0B, "vpmulhrsw",
5731 int_x86_avx2_pmul_hr_sw,
5732 WriteVecIMul>, VEX_4V, VEX_L;
5735 // None of these have i8 immediate fields.
5736 let ImmT = NoImm, Constraints = "$src1 = $dst" in {
5737 let isCommutable = 0 in {
5738 defm PHADDW : SS3I_binop_rm<0x01, "phaddw", X86hadd, v8i16, VR128,
5739 memopv2i64, i128mem, SSE_PHADDSUBW>;
5740 defm PHADDD : SS3I_binop_rm<0x02, "phaddd", X86hadd, v4i32, VR128,
5741 memopv2i64, i128mem, SSE_PHADDSUBD>;
5742 defm PHSUBW : SS3I_binop_rm<0x05, "phsubw", X86hsub, v8i16, VR128,
5743 memopv2i64, i128mem, SSE_PHADDSUBW>;
5744 defm PHSUBD : SS3I_binop_rm<0x06, "phsubd", X86hsub, v4i32, VR128,
5745 memopv2i64, i128mem, SSE_PHADDSUBD>;
5746 defm PSIGNB : SS3I_binop_rm<0x08, "psignb", X86psign, v16i8, VR128,
5747 memopv2i64, i128mem, SSE_PSIGN>;
5748 defm PSIGNW : SS3I_binop_rm<0x09, "psignw", X86psign, v8i16, VR128,
5749 memopv2i64, i128mem, SSE_PSIGN>;
5750 defm PSIGND : SS3I_binop_rm<0x0A, "psignd", X86psign, v4i32, VR128,
5751 memopv2i64, i128mem, SSE_PSIGN>;
5752 defm PSHUFB : SS3I_binop_rm<0x00, "pshufb", X86pshufb, v16i8, VR128,
5753 memopv2i64, i128mem, SSE_PSHUFB>;
5754 defm PHADDSW : SS3I_binop_rm_int<0x03, "phaddsw",
5755 int_x86_ssse3_phadd_sw_128,
5756 SSE_PHADDSUBSW, memopv2i64>;
5757 defm PHSUBSW : SS3I_binop_rm_int<0x07, "phsubsw",
5758 int_x86_ssse3_phsub_sw_128,
5759 SSE_PHADDSUBSW, memopv2i64>;
5760 defm PMADDUBSW : SS3I_binop_rm_int<0x04, "pmaddubsw",
5761 int_x86_ssse3_pmadd_ub_sw_128,
5762 SSE_PMADD, memopv2i64>;
5764 defm PMULHRSW : SS3I_binop_rm_int<0x0B, "pmulhrsw",
5765 int_x86_ssse3_pmul_hr_sw_128,
5766 SSE_PMULHRSW, memopv2i64>;
5769 //===---------------------------------------------------------------------===//
5770 // SSSE3 - Packed Align Instruction Patterns
5771 //===---------------------------------------------------------------------===//
5773 multiclass ssse3_palignr<string asm, bit Is2Addr = 1> {
5774 let hasSideEffects = 0 in {
5775 def R128rr : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
5776 (ins VR128:$src1, VR128:$src2, u8imm:$src3),
5778 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5780 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5781 [], IIC_SSE_PALIGNRR>, Sched<[WriteShuffle]>;
5783 def R128rm : SS3AI<0x0F, MRMSrcMem, (outs VR128:$dst),
5784 (ins VR128:$src1, i128mem:$src2, u8imm:$src3),
5786 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5788 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5789 [], IIC_SSE_PALIGNRM>, Sched<[WriteShuffleLd, ReadAfterLd]>;
5793 multiclass ssse3_palignr_y<string asm, bit Is2Addr = 1> {
5794 let hasSideEffects = 0 in {
5795 def R256rr : SS3AI<0x0F, MRMSrcReg, (outs VR256:$dst),
5796 (ins VR256:$src1, VR256:$src2, u8imm:$src3),
5798 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
5799 []>, Sched<[WriteShuffle]>;
5801 def R256rm : SS3AI<0x0F, MRMSrcMem, (outs VR256:$dst),
5802 (ins VR256:$src1, i256mem:$src2, u8imm:$src3),
5804 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
5805 []>, Sched<[WriteShuffleLd, ReadAfterLd]>;
5809 let Predicates = [HasAVX] in
5810 defm VPALIGN : ssse3_palignr<"vpalignr", 0>, VEX_4V;
5811 let Predicates = [HasAVX2] in
5812 defm VPALIGN : ssse3_palignr_y<"vpalignr", 0>, VEX_4V, VEX_L;
5813 let Constraints = "$src1 = $dst", Predicates = [UseSSSE3] in
5814 defm PALIGN : ssse3_palignr<"palignr">;
5816 let Predicates = [HasAVX2] in {
5817 def : Pat<(v8i32 (X86PAlignr VR256:$src1, VR256:$src2, (i8 imm:$imm))),
5818 (VPALIGNR256rr VR256:$src2, VR256:$src1, imm:$imm)>;
5819 def : Pat<(v8f32 (X86PAlignr VR256:$src1, VR256:$src2, (i8 imm:$imm))),
5820 (VPALIGNR256rr VR256:$src2, VR256:$src1, imm:$imm)>;
5821 def : Pat<(v16i16 (X86PAlignr VR256:$src1, VR256:$src2, (i8 imm:$imm))),
5822 (VPALIGNR256rr VR256:$src2, VR256:$src1, imm:$imm)>;
5823 def : Pat<(v32i8 (X86PAlignr VR256:$src1, VR256:$src2, (i8 imm:$imm))),
5824 (VPALIGNR256rr VR256:$src2, VR256:$src1, imm:$imm)>;
5827 let Predicates = [HasAVX] in {
5828 def : Pat<(v4i32 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5829 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5830 def : Pat<(v4f32 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5831 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5832 def : Pat<(v8i16 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5833 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5834 def : Pat<(v16i8 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5835 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5838 let Predicates = [UseSSSE3] in {
5839 def : Pat<(v4i32 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5840 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5841 def : Pat<(v4f32 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5842 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5843 def : Pat<(v8i16 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5844 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5845 def : Pat<(v16i8 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5846 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5849 //===---------------------------------------------------------------------===//
5850 // SSSE3 - Thread synchronization
5851 //===---------------------------------------------------------------------===//
5853 let SchedRW = [WriteSystem] in {
5854 let usesCustomInserter = 1 in {
5855 def MONITOR : PseudoI<(outs), (ins i32mem:$src1, GR32:$src2, GR32:$src3),
5856 [(int_x86_sse3_monitor addr:$src1, GR32:$src2, GR32:$src3)]>,
5857 Requires<[HasSSE3]>;
5860 let Uses = [EAX, ECX, EDX] in
5861 def MONITORrrr : I<0x01, MRM_C8, (outs), (ins), "monitor", [], IIC_SSE_MONITOR>,
5862 TB, Requires<[HasSSE3]>;
5863 let Uses = [ECX, EAX] in
5864 def MWAITrr : I<0x01, MRM_C9, (outs), (ins), "mwait",
5865 [(int_x86_sse3_mwait ECX, EAX)], IIC_SSE_MWAIT>,
5866 TB, Requires<[HasSSE3]>;
5869 def : InstAlias<"mwait\t{%eax, %ecx|ecx, eax}", (MWAITrr)>, Requires<[Not64BitMode]>;
5870 def : InstAlias<"mwait\t{%rax, %rcx|rcx, rax}", (MWAITrr)>, Requires<[In64BitMode]>;
5872 def : InstAlias<"monitor\t{%eax, %ecx, %edx|edx, ecx, eax}", (MONITORrrr)>,
5873 Requires<[Not64BitMode]>;
5874 def : InstAlias<"monitor\t{%rax, %rcx, %rdx|rdx, rcx, rax}", (MONITORrrr)>,
5875 Requires<[In64BitMode]>;
5877 //===----------------------------------------------------------------------===//
5878 // SSE4.1 - Packed Move with Sign/Zero Extend
5879 //===----------------------------------------------------------------------===//
5881 multiclass SS41I_pmovx_rrrm<bits<8> opc, string OpcodeStr, X86MemOperand MemOp,
5882 RegisterClass OutRC, RegisterClass InRC,
5884 def rr : SS48I<opc, MRMSrcReg, (outs OutRC:$dst), (ins InRC:$src),
5885 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5887 Sched<[itins.Sched]>;
5889 def rm : SS48I<opc, MRMSrcMem, (outs OutRC:$dst), (ins MemOp:$src),
5890 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5892 itins.rm>, Sched<[itins.Sched.Folded]>;
5895 multiclass SS41I_pmovx_rm_all<bits<8> opc, string OpcodeStr,
5896 X86MemOperand MemOp, X86MemOperand MemYOp,
5897 OpndItins SSEItins, OpndItins AVXItins,
5898 OpndItins AVX2Itins> {
5899 defm NAME : SS41I_pmovx_rrrm<opc, OpcodeStr, MemOp, VR128, VR128, SSEItins>;
5900 let Predicates = [HasAVX] in
5901 defm V#NAME : SS41I_pmovx_rrrm<opc, !strconcat("v", OpcodeStr), MemOp,
5902 VR128, VR128, AVXItins>, VEX;
5903 let Predicates = [HasAVX2] in
5904 defm V#NAME#Y : SS41I_pmovx_rrrm<opc, !strconcat("v", OpcodeStr), MemYOp,
5905 VR256, VR128, AVX2Itins>, VEX, VEX_L;
5908 multiclass SS41I_pmovx_rm<bits<8> opc, string OpcodeStr,
5909 X86MemOperand MemOp, X86MemOperand MemYOp> {
5910 defm PMOVSX#NAME : SS41I_pmovx_rm_all<opc, !strconcat("pmovsx", OpcodeStr),
5912 SSE_INTALU_ITINS_SHUFF_P,
5913 DEFAULT_ITINS_SHUFFLESCHED,
5914 DEFAULT_ITINS_SHUFFLESCHED>;
5915 defm PMOVZX#NAME : SS41I_pmovx_rm_all<!add(opc, 0x10),
5916 !strconcat("pmovzx", OpcodeStr),
5918 SSE_INTALU_ITINS_SHUFF_P,
5919 DEFAULT_ITINS_SHUFFLESCHED,
5920 DEFAULT_ITINS_SHUFFLESCHED>;
5923 defm BW : SS41I_pmovx_rm<0x20, "bw", i64mem, i128mem>;
5924 defm WD : SS41I_pmovx_rm<0x23, "wd", i64mem, i128mem>;
5925 defm DQ : SS41I_pmovx_rm<0x25, "dq", i64mem, i128mem>;
5927 defm BD : SS41I_pmovx_rm<0x21, "bd", i32mem, i64mem>;
5928 defm WQ : SS41I_pmovx_rm<0x24, "wq", i32mem, i64mem>;
5930 defm BQ : SS41I_pmovx_rm<0x22, "bq", i16mem, i32mem>;
5933 multiclass SS41I_pmovx_avx2_patterns<string OpcPrefix, string ExtTy, SDNode ExtOp> {
5934 // Register-Register patterns
5935 def : Pat<(v16i16 (ExtOp (v16i8 VR128:$src))),
5936 (!cast<I>(OpcPrefix#BWYrr) VR128:$src)>;
5937 def : Pat<(v8i32 (ExtOp (v16i8 VR128:$src))),
5938 (!cast<I>(OpcPrefix#BDYrr) VR128:$src)>;
5939 def : Pat<(v4i64 (ExtOp (v16i8 VR128:$src))),
5940 (!cast<I>(OpcPrefix#BQYrr) VR128:$src)>;
5942 def : Pat<(v8i32 (ExtOp (v8i16 VR128:$src))),
5943 (!cast<I>(OpcPrefix#WDYrr) VR128:$src)>;
5944 def : Pat<(v4i64 (ExtOp (v8i16 VR128:$src))),
5945 (!cast<I>(OpcPrefix#WQYrr) VR128:$src)>;
5947 def : Pat<(v4i64 (ExtOp (v4i32 VR128:$src))),
5948 (!cast<I>(OpcPrefix#DQYrr) VR128:$src)>;
5950 // On AVX2, we also support 256bit inputs.
5951 // FIXME: remove these patterns when the old shuffle lowering goes away.
5952 def : Pat<(v16i16 (ExtOp (v32i8 VR256:$src))),
5953 (!cast<I>(OpcPrefix#BWYrr) (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
5954 def : Pat<(v8i32 (ExtOp (v32i8 VR256:$src))),
5955 (!cast<I>(OpcPrefix#BDYrr) (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
5956 def : Pat<(v4i64 (ExtOp (v32i8 VR256:$src))),
5957 (!cast<I>(OpcPrefix#BQYrr) (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
5959 def : Pat<(v8i32 (ExtOp (v16i16 VR256:$src))),
5960 (!cast<I>(OpcPrefix#WDYrr) (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
5961 def : Pat<(v4i64 (ExtOp (v16i16 VR256:$src))),
5962 (!cast<I>(OpcPrefix#WQYrr) (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
5964 def : Pat<(v4i64 (ExtOp (v8i32 VR256:$src))),
5965 (!cast<I>(OpcPrefix#DQYrr) (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
5967 // Simple Register-Memory patterns
5968 def : Pat<(v16i16 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
5969 (!cast<I>(OpcPrefix#BWYrm) addr:$src)>;
5970 def : Pat<(v8i32 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
5971 (!cast<I>(OpcPrefix#BDYrm) addr:$src)>;
5972 def : Pat<(v4i64 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
5973 (!cast<I>(OpcPrefix#BQYrm) addr:$src)>;
5975 def : Pat<(v8i32 (!cast<PatFrag>(ExtTy#"extloadvi16") addr:$src)),
5976 (!cast<I>(OpcPrefix#WDYrm) addr:$src)>;
5977 def : Pat<(v4i64 (!cast<PatFrag>(ExtTy#"extloadvi16") addr:$src)),
5978 (!cast<I>(OpcPrefix#WQYrm) addr:$src)>;
5980 def : Pat<(v4i64 (!cast<PatFrag>(ExtTy#"extloadvi32") addr:$src)),
5981 (!cast<I>(OpcPrefix#DQYrm) addr:$src)>;
5983 // AVX2 Register-Memory patterns
5984 def : Pat<(v16i16 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
5985 (!cast<I>(OpcPrefix#BWYrm) addr:$src)>;
5986 def : Pat<(v16i16 (ExtOp (v16i8 (vzmovl_v2i64 addr:$src)))),
5987 (!cast<I>(OpcPrefix#BWYrm) addr:$src)>;
5988 def : Pat<(v16i16 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))),
5989 (!cast<I>(OpcPrefix#BWYrm) addr:$src)>;
5990 def : Pat<(v16i16 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
5991 (!cast<I>(OpcPrefix#BWYrm) addr:$src)>;
5993 def : Pat<(v8i32 (ExtOp (bc_v16i8 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
5994 (!cast<I>(OpcPrefix#BDYrm) addr:$src)>;
5995 def : Pat<(v8i32 (ExtOp (v16i8 (vzmovl_v2i64 addr:$src)))),
5996 (!cast<I>(OpcPrefix#BDYrm) addr:$src)>;
5997 def : Pat<(v8i32 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))),
5998 (!cast<I>(OpcPrefix#BDYrm) addr:$src)>;
5999 def : Pat<(v8i32 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
6000 (!cast<I>(OpcPrefix#BDYrm) addr:$src)>;
6002 def : Pat<(v4i64 (ExtOp (bc_v16i8 (v4i32 (scalar_to_vector (loadi32 addr:$src)))))),
6003 (!cast<I>(OpcPrefix#BQYrm) addr:$src)>;
6004 def : Pat<(v4i64 (ExtOp (v16i8 (vzmovl_v4i32 addr:$src)))),
6005 (!cast<I>(OpcPrefix#BQYrm) addr:$src)>;
6006 def : Pat<(v4i64 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))),
6007 (!cast<I>(OpcPrefix#BQYrm) addr:$src)>;
6008 def : Pat<(v4i64 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
6009 (!cast<I>(OpcPrefix#BQYrm) addr:$src)>;
6011 def : Pat<(v8i32 (ExtOp (bc_v8i16 (loadv2i64 addr:$src)))),
6012 (!cast<I>(OpcPrefix#WDYrm) addr:$src)>;
6013 def : Pat<(v8i32 (ExtOp (v8i16 (vzmovl_v2i64 addr:$src)))),
6014 (!cast<I>(OpcPrefix#WDYrm) addr:$src)>;
6015 def : Pat<(v8i32 (ExtOp (v8i16 (vzload_v2i64 addr:$src)))),
6016 (!cast<I>(OpcPrefix#WDYrm) addr:$src)>;
6017 def : Pat<(v8i32 (ExtOp (bc_v8i16 (loadv2i64 addr:$src)))),
6018 (!cast<I>(OpcPrefix#WDYrm) addr:$src)>;
6020 def : Pat<(v4i64 (ExtOp (bc_v8i16 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
6021 (!cast<I>(OpcPrefix#WQYrm) addr:$src)>;
6022 def : Pat<(v4i64 (ExtOp (v8i16 (vzmovl_v2i64 addr:$src)))),
6023 (!cast<I>(OpcPrefix#WQYrm) addr:$src)>;
6024 def : Pat<(v4i64 (ExtOp (v8i16 (vzload_v2i64 addr:$src)))),
6025 (!cast<I>(OpcPrefix#WQYrm) addr:$src)>;
6026 def : Pat<(v4i64 (ExtOp (bc_v8i16 (loadv2i64 addr:$src)))),
6027 (!cast<I>(OpcPrefix#WQYrm) addr:$src)>;
6029 def : Pat<(v4i64 (ExtOp (bc_v4i32 (loadv2i64 addr:$src)))),
6030 (!cast<I>(OpcPrefix#DQYrm) addr:$src)>;
6031 def : Pat<(v4i64 (ExtOp (v4i32 (vzmovl_v2i64 addr:$src)))),
6032 (!cast<I>(OpcPrefix#DQYrm) addr:$src)>;
6033 def : Pat<(v4i64 (ExtOp (v4i32 (vzload_v2i64 addr:$src)))),
6034 (!cast<I>(OpcPrefix#DQYrm) addr:$src)>;
6035 def : Pat<(v4i64 (ExtOp (bc_v4i32 (loadv2i64 addr:$src)))),
6036 (!cast<I>(OpcPrefix#DQYrm) addr:$src)>;
6039 let Predicates = [HasAVX2] in {
6040 defm : SS41I_pmovx_avx2_patterns<"VPMOVSX", "s", X86vsext>;
6041 defm : SS41I_pmovx_avx2_patterns<"VPMOVZX", "z", X86vzext>;
6044 // SSE4.1/AVX patterns.
6045 multiclass SS41I_pmovx_patterns<string OpcPrefix, string ExtTy,
6046 SDNode ExtOp, PatFrag ExtLoad16> {
6047 def : Pat<(v8i16 (ExtOp (v16i8 VR128:$src))),
6048 (!cast<I>(OpcPrefix#BWrr) VR128:$src)>;
6049 def : Pat<(v4i32 (ExtOp (v16i8 VR128:$src))),
6050 (!cast<I>(OpcPrefix#BDrr) VR128:$src)>;
6051 def : Pat<(v2i64 (ExtOp (v16i8 VR128:$src))),
6052 (!cast<I>(OpcPrefix#BQrr) VR128:$src)>;
6054 def : Pat<(v4i32 (ExtOp (v8i16 VR128:$src))),
6055 (!cast<I>(OpcPrefix#WDrr) VR128:$src)>;
6056 def : Pat<(v2i64 (ExtOp (v8i16 VR128:$src))),
6057 (!cast<I>(OpcPrefix#WQrr) VR128:$src)>;
6059 def : Pat<(v2i64 (ExtOp (v4i32 VR128:$src))),
6060 (!cast<I>(OpcPrefix#DQrr) VR128:$src)>;
6062 def : Pat<(v8i16 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
6063 (!cast<I>(OpcPrefix#BWrm) addr:$src)>;
6064 def : Pat<(v4i32 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
6065 (!cast<I>(OpcPrefix#BDrm) addr:$src)>;
6066 def : Pat<(v2i64 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
6067 (!cast<I>(OpcPrefix#BQrm) addr:$src)>;
6069 def : Pat<(v4i32 (!cast<PatFrag>(ExtTy#"extloadvi16") addr:$src)),
6070 (!cast<I>(OpcPrefix#WDrm) addr:$src)>;
6071 def : Pat<(v2i64 (!cast<PatFrag>(ExtTy#"extloadvi16") addr:$src)),
6072 (!cast<I>(OpcPrefix#WQrm) addr:$src)>;
6074 def : Pat<(v2i64 (!cast<PatFrag>(ExtTy#"extloadvi32") addr:$src)),
6075 (!cast<I>(OpcPrefix#DQrm) addr:$src)>;
6077 def : Pat<(v8i16 (ExtOp (bc_v16i8 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
6078 (!cast<I>(OpcPrefix#BWrm) addr:$src)>;
6079 def : Pat<(v8i16 (ExtOp (bc_v16i8 (v2f64 (scalar_to_vector (loadf64 addr:$src)))))),
6080 (!cast<I>(OpcPrefix#BWrm) addr:$src)>;
6081 def : Pat<(v8i16 (ExtOp (v16i8 (vzmovl_v2i64 addr:$src)))),
6082 (!cast<I>(OpcPrefix#BWrm) addr:$src)>;
6083 def : Pat<(v8i16 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))),
6084 (!cast<I>(OpcPrefix#BWrm) addr:$src)>;
6085 def : Pat<(v8i16 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
6086 (!cast<I>(OpcPrefix#BWrm) addr:$src)>;
6088 def : Pat<(v4i32 (ExtOp (bc_v16i8 (v4i32 (scalar_to_vector (loadi32 addr:$src)))))),
6089 (!cast<I>(OpcPrefix#BDrm) addr:$src)>;
6090 def : Pat<(v4i32 (ExtOp (v16i8 (vzmovl_v4i32 addr:$src)))),
6091 (!cast<I>(OpcPrefix#BDrm) addr:$src)>;
6092 def : Pat<(v4i32 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))),
6093 (!cast<I>(OpcPrefix#BDrm) addr:$src)>;
6094 def : Pat<(v4i32 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
6095 (!cast<I>(OpcPrefix#BDrm) addr:$src)>;
6097 def : Pat<(v2i64 (ExtOp (bc_v16i8 (v4i32 (scalar_to_vector (ExtLoad16 addr:$src)))))),
6098 (!cast<I>(OpcPrefix#BQrm) addr:$src)>;
6099 def : Pat<(v2i64 (ExtOp (v16i8 (vzmovl_v4i32 addr:$src)))),
6100 (!cast<I>(OpcPrefix#BQrm) addr:$src)>;
6101 def : Pat<(v2i64 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))),
6102 (!cast<I>(OpcPrefix#BQrm) addr:$src)>;
6103 def : Pat<(v2i64 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
6104 (!cast<I>(OpcPrefix#BQrm) addr:$src)>;
6106 def : Pat<(v4i32 (ExtOp (bc_v8i16 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
6107 (!cast<I>(OpcPrefix#WDrm) addr:$src)>;
6108 def : Pat<(v4i32 (ExtOp (bc_v8i16 (v2f64 (scalar_to_vector (loadf64 addr:$src)))))),
6109 (!cast<I>(OpcPrefix#WDrm) addr:$src)>;
6110 def : Pat<(v4i32 (ExtOp (v8i16 (vzmovl_v2i64 addr:$src)))),
6111 (!cast<I>(OpcPrefix#WDrm) addr:$src)>;
6112 def : Pat<(v4i32 (ExtOp (v8i16 (vzload_v2i64 addr:$src)))),
6113 (!cast<I>(OpcPrefix#WDrm) addr:$src)>;
6114 def : Pat<(v4i32 (ExtOp (bc_v8i16 (loadv2i64 addr:$src)))),
6115 (!cast<I>(OpcPrefix#WDrm) addr:$src)>;
6117 def : Pat<(v2i64 (ExtOp (bc_v8i16 (v4i32 (scalar_to_vector (loadi32 addr:$src)))))),
6118 (!cast<I>(OpcPrefix#WQrm) addr:$src)>;
6119 def : Pat<(v2i64 (ExtOp (v8i16 (vzmovl_v4i32 addr:$src)))),
6120 (!cast<I>(OpcPrefix#WQrm) addr:$src)>;
6121 def : Pat<(v2i64 (ExtOp (v8i16 (vzload_v2i64 addr:$src)))),
6122 (!cast<I>(OpcPrefix#WQrm) addr:$src)>;
6123 def : Pat<(v2i64 (ExtOp (bc_v8i16 (loadv2i64 addr:$src)))),
6124 (!cast<I>(OpcPrefix#WQrm) addr:$src)>;
6126 def : Pat<(v2i64 (ExtOp (bc_v4i32 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
6127 (!cast<I>(OpcPrefix#DQrm) addr:$src)>;
6128 def : Pat<(v2i64 (ExtOp (bc_v4i32 (v2f64 (scalar_to_vector (loadf64 addr:$src)))))),
6129 (!cast<I>(OpcPrefix#DQrm) addr:$src)>;
6130 def : Pat<(v2i64 (ExtOp (v4i32 (vzmovl_v2i64 addr:$src)))),
6131 (!cast<I>(OpcPrefix#DQrm) addr:$src)>;
6132 def : Pat<(v2i64 (ExtOp (v4i32 (vzload_v2i64 addr:$src)))),
6133 (!cast<I>(OpcPrefix#DQrm) addr:$src)>;
6134 def : Pat<(v2i64 (ExtOp (bc_v4i32 (loadv2i64 addr:$src)))),
6135 (!cast<I>(OpcPrefix#DQrm) addr:$src)>;
6138 let Predicates = [HasAVX] in {
6139 defm : SS41I_pmovx_patterns<"VPMOVSX", "s", X86vsext, extloadi32i16>;
6140 defm : SS41I_pmovx_patterns<"VPMOVZX", "z", X86vzext, loadi16_anyext>;
6143 let Predicates = [UseSSE41] in {
6144 defm : SS41I_pmovx_patterns<"PMOVSX", "s", X86vsext, extloadi32i16>;
6145 defm : SS41I_pmovx_patterns<"PMOVZX", "z", X86vzext, loadi16_anyext>;
6148 //===----------------------------------------------------------------------===//
6149 // SSE4.1 - Extract Instructions
6150 //===----------------------------------------------------------------------===//
6152 /// SS41I_binop_ext8 - SSE 4.1 extract 8 bits to 32 bit reg or 8 bit mem
6153 multiclass SS41I_extract8<bits<8> opc, string OpcodeStr> {
6154 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32orGR64:$dst),
6155 (ins VR128:$src1, u8imm:$src2),
6156 !strconcat(OpcodeStr,
6157 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6158 [(set GR32orGR64:$dst, (X86pextrb (v16i8 VR128:$src1),
6160 Sched<[WriteShuffle]>;
6161 let hasSideEffects = 0, mayStore = 1,
6162 SchedRW = [WriteShuffleLd, WriteRMW] in
6163 def mr : SS4AIi8<opc, MRMDestMem, (outs),
6164 (ins i8mem:$dst, VR128:$src1, u8imm:$src2),
6165 !strconcat(OpcodeStr,
6166 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6167 [(store (i8 (trunc (assertzext (X86pextrb (v16i8 VR128:$src1),
6168 imm:$src2)))), addr:$dst)]>;
6171 let Predicates = [HasAVX] in
6172 defm VPEXTRB : SS41I_extract8<0x14, "vpextrb">, VEX;
6174 defm PEXTRB : SS41I_extract8<0x14, "pextrb">;
6177 /// SS41I_extract16 - SSE 4.1 extract 16 bits to memory destination
6178 multiclass SS41I_extract16<bits<8> opc, string OpcodeStr> {
6179 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in
6180 def rr_REV : SS4AIi8<opc, MRMDestReg, (outs GR32orGR64:$dst),
6181 (ins VR128:$src1, u8imm:$src2),
6182 !strconcat(OpcodeStr,
6183 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6184 []>, Sched<[WriteShuffle]>;
6186 let hasSideEffects = 0, mayStore = 1,
6187 SchedRW = [WriteShuffleLd, WriteRMW] in
6188 def mr : SS4AIi8<opc, MRMDestMem, (outs),
6189 (ins i16mem:$dst, VR128:$src1, u8imm:$src2),
6190 !strconcat(OpcodeStr,
6191 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6192 [(store (i16 (trunc (assertzext (X86pextrw (v8i16 VR128:$src1),
6193 imm:$src2)))), addr:$dst)]>;
6196 let Predicates = [HasAVX] in
6197 defm VPEXTRW : SS41I_extract16<0x15, "vpextrw">, VEX;
6199 defm PEXTRW : SS41I_extract16<0x15, "pextrw">;
6202 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
6203 multiclass SS41I_extract32<bits<8> opc, string OpcodeStr> {
6204 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
6205 (ins VR128:$src1, u8imm:$src2),
6206 !strconcat(OpcodeStr,
6207 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6209 (extractelt (v4i32 VR128:$src1), imm:$src2))]>,
6210 Sched<[WriteShuffle]>;
6211 let SchedRW = [WriteShuffleLd, WriteRMW] in
6212 def mr : SS4AIi8<opc, MRMDestMem, (outs),
6213 (ins i32mem:$dst, VR128:$src1, u8imm:$src2),
6214 !strconcat(OpcodeStr,
6215 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6216 [(store (extractelt (v4i32 VR128:$src1), imm:$src2),
6220 let Predicates = [HasAVX] in
6221 defm VPEXTRD : SS41I_extract32<0x16, "vpextrd">, VEX;
6223 defm PEXTRD : SS41I_extract32<0x16, "pextrd">;
6225 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
6226 multiclass SS41I_extract64<bits<8> opc, string OpcodeStr> {
6227 def rr : SS4AIi8<opc, MRMDestReg, (outs GR64:$dst),
6228 (ins VR128:$src1, u8imm:$src2),
6229 !strconcat(OpcodeStr,
6230 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6232 (extractelt (v2i64 VR128:$src1), imm:$src2))]>,
6233 Sched<[WriteShuffle]>, REX_W;
6234 let SchedRW = [WriteShuffleLd, WriteRMW] in
6235 def mr : SS4AIi8<opc, MRMDestMem, (outs),
6236 (ins i64mem:$dst, VR128:$src1, u8imm:$src2),
6237 !strconcat(OpcodeStr,
6238 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6239 [(store (extractelt (v2i64 VR128:$src1), imm:$src2),
6240 addr:$dst)]>, REX_W;
6243 let Predicates = [HasAVX] in
6244 defm VPEXTRQ : SS41I_extract64<0x16, "vpextrq">, VEX, VEX_W;
6246 defm PEXTRQ : SS41I_extract64<0x16, "pextrq">;
6248 /// SS41I_extractf32 - SSE 4.1 extract 32 bits fp value to int reg or memory
6250 multiclass SS41I_extractf32<bits<8> opc, string OpcodeStr,
6251 OpndItins itins = DEFAULT_ITINS> {
6252 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32orGR64:$dst),
6253 (ins VR128:$src1, u8imm:$src2),
6254 !strconcat(OpcodeStr,
6255 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6256 [(set GR32orGR64:$dst,
6257 (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2))],
6258 itins.rr>, Sched<[WriteFBlend]>;
6259 let SchedRW = [WriteFBlendLd, WriteRMW] in
6260 def mr : SS4AIi8<opc, MRMDestMem, (outs),
6261 (ins f32mem:$dst, VR128:$src1, u8imm:$src2),
6262 !strconcat(OpcodeStr,
6263 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6264 [(store (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2),
6265 addr:$dst)], itins.rm>;
6268 let ExeDomain = SSEPackedSingle in {
6269 let Predicates = [UseAVX] in
6270 defm VEXTRACTPS : SS41I_extractf32<0x17, "vextractps">, VEX;
6271 defm EXTRACTPS : SS41I_extractf32<0x17, "extractps", SSE_EXTRACT_ITINS>;
6274 // Also match an EXTRACTPS store when the store is done as f32 instead of i32.
6275 def : Pat<(store (f32 (bitconvert (extractelt (bc_v4i32 (v4f32 VR128:$src1)),
6278 (VEXTRACTPSmr addr:$dst, VR128:$src1, imm:$src2)>,
6280 def : Pat<(store (f32 (bitconvert (extractelt (bc_v4i32 (v4f32 VR128:$src1)),
6283 (EXTRACTPSmr addr:$dst, VR128:$src1, imm:$src2)>,
6284 Requires<[UseSSE41]>;
6286 //===----------------------------------------------------------------------===//
6287 // SSE4.1 - Insert Instructions
6288 //===----------------------------------------------------------------------===//
6290 multiclass SS41I_insert8<bits<8> opc, string asm, bit Is2Addr = 1> {
6291 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
6292 (ins VR128:$src1, GR32orGR64:$src2, u8imm:$src3),
6294 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6296 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6298 (X86pinsrb VR128:$src1, GR32orGR64:$src2, imm:$src3))]>,
6299 Sched<[WriteShuffle]>;
6300 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
6301 (ins VR128:$src1, i8mem:$src2, u8imm:$src3),
6303 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6305 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6307 (X86pinsrb VR128:$src1, (extloadi8 addr:$src2),
6308 imm:$src3))]>, Sched<[WriteShuffleLd, ReadAfterLd]>;
6311 let Predicates = [HasAVX] in
6312 defm VPINSRB : SS41I_insert8<0x20, "vpinsrb", 0>, VEX_4V;
6313 let Constraints = "$src1 = $dst" in
6314 defm PINSRB : SS41I_insert8<0x20, "pinsrb">;
6316 multiclass SS41I_insert32<bits<8> opc, string asm, bit Is2Addr = 1> {
6317 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
6318 (ins VR128:$src1, GR32:$src2, u8imm:$src3),
6320 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6322 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6324 (v4i32 (insertelt VR128:$src1, GR32:$src2, imm:$src3)))]>,
6325 Sched<[WriteShuffle]>;
6326 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
6327 (ins VR128:$src1, i32mem:$src2, u8imm:$src3),
6329 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6331 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6333 (v4i32 (insertelt VR128:$src1, (loadi32 addr:$src2),
6334 imm:$src3)))]>, Sched<[WriteShuffleLd, ReadAfterLd]>;
6337 let Predicates = [HasAVX] in
6338 defm VPINSRD : SS41I_insert32<0x22, "vpinsrd", 0>, VEX_4V;
6339 let Constraints = "$src1 = $dst" in
6340 defm PINSRD : SS41I_insert32<0x22, "pinsrd">;
6342 multiclass SS41I_insert64<bits<8> opc, string asm, bit Is2Addr = 1> {
6343 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
6344 (ins VR128:$src1, GR64:$src2, u8imm:$src3),
6346 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6348 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6350 (v2i64 (insertelt VR128:$src1, GR64:$src2, imm:$src3)))]>,
6351 Sched<[WriteShuffle]>;
6352 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
6353 (ins VR128:$src1, i64mem:$src2, u8imm:$src3),
6355 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6357 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6359 (v2i64 (insertelt VR128:$src1, (loadi64 addr:$src2),
6360 imm:$src3)))]>, Sched<[WriteShuffleLd, ReadAfterLd]>;
6363 let Predicates = [HasAVX] in
6364 defm VPINSRQ : SS41I_insert64<0x22, "vpinsrq", 0>, VEX_4V, VEX_W;
6365 let Constraints = "$src1 = $dst" in
6366 defm PINSRQ : SS41I_insert64<0x22, "pinsrq">, REX_W;
6368 // insertps has a few different modes, there's the first two here below which
6369 // are optimized inserts that won't zero arbitrary elements in the destination
6370 // vector. The next one matches the intrinsic and could zero arbitrary elements
6371 // in the target vector.
6372 multiclass SS41I_insertf32<bits<8> opc, string asm, bit Is2Addr = 1,
6373 OpndItins itins = DEFAULT_ITINS> {
6374 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
6375 (ins VR128:$src1, VR128:$src2, u8imm:$src3),
6377 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6379 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6381 (X86insertps VR128:$src1, VR128:$src2, imm:$src3))], itins.rr>,
6382 Sched<[WriteFShuffle]>;
6383 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
6384 (ins VR128:$src1, f32mem:$src2, u8imm:$src3),
6386 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6388 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6390 (X86insertps VR128:$src1,
6391 (v4f32 (scalar_to_vector (loadf32 addr:$src2))),
6392 imm:$src3))], itins.rm>,
6393 Sched<[WriteFShuffleLd, ReadAfterLd]>;
6396 let ExeDomain = SSEPackedSingle in {
6397 let Predicates = [UseAVX] in
6398 defm VINSERTPS : SS41I_insertf32<0x21, "vinsertps", 0>, VEX_4V;
6399 let Constraints = "$src1 = $dst" in
6400 defm INSERTPS : SS41I_insertf32<0x21, "insertps", 1, SSE_INSERT_ITINS>;
6403 let Predicates = [UseSSE41] in {
6404 // If we're inserting an element from a load or a null pshuf of a load,
6405 // fold the load into the insertps instruction.
6406 def : Pat<(v4f32 (X86insertps (v4f32 VR128:$src1), (X86PShufd (v4f32
6407 (scalar_to_vector (loadf32 addr:$src2))), (i8 0)),
6409 (INSERTPSrm VR128:$src1, addr:$src2, imm:$src3)>;
6410 def : Pat<(v4f32 (X86insertps (v4f32 VR128:$src1), (X86PShufd
6411 (loadv4f32 addr:$src2), (i8 0)), imm:$src3)),
6412 (INSERTPSrm VR128:$src1, addr:$src2, imm:$src3)>;
6415 let Predicates = [UseAVX] in {
6416 // If we're inserting an element from a vbroadcast of a load, fold the
6417 // load into the X86insertps instruction.
6418 def : Pat<(v4f32 (X86insertps (v4f32 VR128:$src1),
6419 (X86VBroadcast (loadf32 addr:$src2)), imm:$src3)),
6420 (VINSERTPSrm VR128:$src1, addr:$src2, imm:$src3)>;
6421 def : Pat<(v4f32 (X86insertps (v4f32 VR128:$src1),
6422 (X86VBroadcast (loadv4f32 addr:$src2)), imm:$src3)),
6423 (VINSERTPSrm VR128:$src1, addr:$src2, imm:$src3)>;
6426 //===----------------------------------------------------------------------===//
6427 // SSE4.1 - Round Instructions
6428 //===----------------------------------------------------------------------===//
6430 multiclass sse41_fp_unop_rm<bits<8> opcps, bits<8> opcpd, string OpcodeStr,
6431 X86MemOperand x86memop, RegisterClass RC,
6432 PatFrag mem_frag32, PatFrag mem_frag64,
6433 Intrinsic V4F32Int, Intrinsic V2F64Int> {
6434 let ExeDomain = SSEPackedSingle in {
6435 // Intrinsic operation, reg.
6436 // Vector intrinsic operation, reg
6437 def PSr : SS4AIi8<opcps, MRMSrcReg,
6438 (outs RC:$dst), (ins RC:$src1, i32u8imm:$src2),
6439 !strconcat(OpcodeStr,
6440 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6441 [(set RC:$dst, (V4F32Int RC:$src1, imm:$src2))],
6442 IIC_SSE_ROUNDPS_REG>, Sched<[WriteFAdd]>;
6444 // Vector intrinsic operation, mem
6445 def PSm : SS4AIi8<opcps, MRMSrcMem,
6446 (outs RC:$dst), (ins x86memop:$src1, i32u8imm:$src2),
6447 !strconcat(OpcodeStr,
6448 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6450 (V4F32Int (mem_frag32 addr:$src1),imm:$src2))],
6451 IIC_SSE_ROUNDPS_MEM>, Sched<[WriteFAddLd]>;
6452 } // ExeDomain = SSEPackedSingle
6454 let ExeDomain = SSEPackedDouble in {
6455 // Vector intrinsic operation, reg
6456 def PDr : SS4AIi8<opcpd, MRMSrcReg,
6457 (outs RC:$dst), (ins RC:$src1, i32u8imm:$src2),
6458 !strconcat(OpcodeStr,
6459 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6460 [(set RC:$dst, (V2F64Int RC:$src1, imm:$src2))],
6461 IIC_SSE_ROUNDPS_REG>, Sched<[WriteFAdd]>;
6463 // Vector intrinsic operation, mem
6464 def PDm : SS4AIi8<opcpd, MRMSrcMem,
6465 (outs RC:$dst), (ins x86memop:$src1, i32u8imm:$src2),
6466 !strconcat(OpcodeStr,
6467 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6469 (V2F64Int (mem_frag64 addr:$src1),imm:$src2))],
6470 IIC_SSE_ROUNDPS_REG>, Sched<[WriteFAddLd]>;
6471 } // ExeDomain = SSEPackedDouble
6474 multiclass sse41_fp_binop_rm<bits<8> opcss, bits<8> opcsd,
6477 Intrinsic F64Int, bit Is2Addr = 1> {
6478 let ExeDomain = GenericDomain in {
6480 let hasSideEffects = 0 in
6481 def SSr : SS4AIi8<opcss, MRMSrcReg,
6482 (outs FR32:$dst), (ins FR32:$src1, FR32:$src2, i32u8imm:$src3),
6484 !strconcat(OpcodeStr,
6485 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6486 !strconcat(OpcodeStr,
6487 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6488 []>, Sched<[WriteFAdd]>;
6490 // Intrinsic operation, reg.
6491 let isCodeGenOnly = 1 in
6492 def SSr_Int : SS4AIi8<opcss, MRMSrcReg,
6493 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32u8imm:$src3),
6495 !strconcat(OpcodeStr,
6496 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6497 !strconcat(OpcodeStr,
6498 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6499 [(set VR128:$dst, (F32Int VR128:$src1, VR128:$src2, imm:$src3))]>,
6502 // Intrinsic operation, mem.
6503 def SSm : SS4AIi8<opcss, MRMSrcMem,
6504 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32u8imm:$src3),
6506 !strconcat(OpcodeStr,
6507 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6508 !strconcat(OpcodeStr,
6509 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6511 (F32Int VR128:$src1, sse_load_f32:$src2, imm:$src3))]>,
6512 Sched<[WriteFAddLd, ReadAfterLd]>;
6515 let hasSideEffects = 0 in
6516 def SDr : SS4AIi8<opcsd, MRMSrcReg,
6517 (outs FR64:$dst), (ins FR64:$src1, FR64:$src2, i32u8imm:$src3),
6519 !strconcat(OpcodeStr,
6520 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6521 !strconcat(OpcodeStr,
6522 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6523 []>, Sched<[WriteFAdd]>;
6525 // Intrinsic operation, reg.
6526 let isCodeGenOnly = 1 in
6527 def SDr_Int : SS4AIi8<opcsd, MRMSrcReg,
6528 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32u8imm:$src3),
6530 !strconcat(OpcodeStr,
6531 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6532 !strconcat(OpcodeStr,
6533 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6534 [(set VR128:$dst, (F64Int VR128:$src1, VR128:$src2, imm:$src3))]>,
6537 // Intrinsic operation, mem.
6538 def SDm : SS4AIi8<opcsd, MRMSrcMem,
6539 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32u8imm:$src3),
6541 !strconcat(OpcodeStr,
6542 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6543 !strconcat(OpcodeStr,
6544 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6546 (F64Int VR128:$src1, sse_load_f64:$src2, imm:$src3))]>,
6547 Sched<[WriteFAddLd, ReadAfterLd]>;
6548 } // ExeDomain = GenericDomain
6551 // FP round - roundss, roundps, roundsd, roundpd
6552 let Predicates = [HasAVX] in {
6554 defm VROUND : sse41_fp_unop_rm<0x08, 0x09, "vround", f128mem, VR128,
6555 loadv4f32, loadv2f64,
6556 int_x86_sse41_round_ps,
6557 int_x86_sse41_round_pd>, VEX;
6558 defm VROUNDY : sse41_fp_unop_rm<0x08, 0x09, "vround", f256mem, VR256,
6559 loadv8f32, loadv4f64,
6560 int_x86_avx_round_ps_256,
6561 int_x86_avx_round_pd_256>, VEX, VEX_L;
6562 defm VROUND : sse41_fp_binop_rm<0x0A, 0x0B, "vround",
6563 int_x86_sse41_round_ss,
6564 int_x86_sse41_round_sd, 0>, VEX_4V, VEX_LIG;
6567 let Predicates = [UseAVX] in {
6568 def : Pat<(ffloor FR32:$src),
6569 (VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x1))>;
6570 def : Pat<(f64 (ffloor FR64:$src)),
6571 (VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x1))>;
6572 def : Pat<(f32 (fnearbyint FR32:$src)),
6573 (VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0xC))>;
6574 def : Pat<(f64 (fnearbyint FR64:$src)),
6575 (VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0xC))>;
6576 def : Pat<(f32 (fceil FR32:$src)),
6577 (VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x2))>;
6578 def : Pat<(f64 (fceil FR64:$src)),
6579 (VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x2))>;
6580 def : Pat<(f32 (frint FR32:$src)),
6581 (VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x4))>;
6582 def : Pat<(f64 (frint FR64:$src)),
6583 (VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x4))>;
6584 def : Pat<(f32 (ftrunc FR32:$src)),
6585 (VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x3))>;
6586 def : Pat<(f64 (ftrunc FR64:$src)),
6587 (VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x3))>;
6590 let Predicates = [HasAVX] in {
6591 def : Pat<(v4f32 (ffloor VR128:$src)),
6592 (VROUNDPSr VR128:$src, (i32 0x1))>;
6593 def : Pat<(v4f32 (fnearbyint VR128:$src)),
6594 (VROUNDPSr VR128:$src, (i32 0xC))>;
6595 def : Pat<(v4f32 (fceil VR128:$src)),
6596 (VROUNDPSr VR128:$src, (i32 0x2))>;
6597 def : Pat<(v4f32 (frint VR128:$src)),
6598 (VROUNDPSr VR128:$src, (i32 0x4))>;
6599 def : Pat<(v4f32 (ftrunc VR128:$src)),
6600 (VROUNDPSr VR128:$src, (i32 0x3))>;
6602 def : Pat<(v2f64 (ffloor VR128:$src)),
6603 (VROUNDPDr VR128:$src, (i32 0x1))>;
6604 def : Pat<(v2f64 (fnearbyint VR128:$src)),
6605 (VROUNDPDr VR128:$src, (i32 0xC))>;
6606 def : Pat<(v2f64 (fceil VR128:$src)),
6607 (VROUNDPDr VR128:$src, (i32 0x2))>;
6608 def : Pat<(v2f64 (frint VR128:$src)),
6609 (VROUNDPDr VR128:$src, (i32 0x4))>;
6610 def : Pat<(v2f64 (ftrunc VR128:$src)),
6611 (VROUNDPDr VR128:$src, (i32 0x3))>;
6613 def : Pat<(v8f32 (ffloor VR256:$src)),
6614 (VROUNDYPSr VR256:$src, (i32 0x1))>;
6615 def : Pat<(v8f32 (fnearbyint VR256:$src)),
6616 (VROUNDYPSr VR256:$src, (i32 0xC))>;
6617 def : Pat<(v8f32 (fceil VR256:$src)),
6618 (VROUNDYPSr VR256:$src, (i32 0x2))>;
6619 def : Pat<(v8f32 (frint VR256:$src)),
6620 (VROUNDYPSr VR256:$src, (i32 0x4))>;
6621 def : Pat<(v8f32 (ftrunc VR256:$src)),
6622 (VROUNDYPSr VR256:$src, (i32 0x3))>;
6624 def : Pat<(v4f64 (ffloor VR256:$src)),
6625 (VROUNDYPDr VR256:$src, (i32 0x1))>;
6626 def : Pat<(v4f64 (fnearbyint VR256:$src)),
6627 (VROUNDYPDr VR256:$src, (i32 0xC))>;
6628 def : Pat<(v4f64 (fceil VR256:$src)),
6629 (VROUNDYPDr VR256:$src, (i32 0x2))>;
6630 def : Pat<(v4f64 (frint VR256:$src)),
6631 (VROUNDYPDr VR256:$src, (i32 0x4))>;
6632 def : Pat<(v4f64 (ftrunc VR256:$src)),
6633 (VROUNDYPDr VR256:$src, (i32 0x3))>;
6636 defm ROUND : sse41_fp_unop_rm<0x08, 0x09, "round", f128mem, VR128,
6637 memopv4f32, memopv2f64,
6638 int_x86_sse41_round_ps, int_x86_sse41_round_pd>;
6639 let Constraints = "$src1 = $dst" in
6640 defm ROUND : sse41_fp_binop_rm<0x0A, 0x0B, "round",
6641 int_x86_sse41_round_ss, int_x86_sse41_round_sd>;
6643 let Predicates = [UseSSE41] in {
6644 def : Pat<(ffloor FR32:$src),
6645 (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x1))>;
6646 def : Pat<(f64 (ffloor FR64:$src)),
6647 (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x1))>;
6648 def : Pat<(f32 (fnearbyint FR32:$src)),
6649 (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0xC))>;
6650 def : Pat<(f64 (fnearbyint FR64:$src)),
6651 (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0xC))>;
6652 def : Pat<(f32 (fceil FR32:$src)),
6653 (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x2))>;
6654 def : Pat<(f64 (fceil FR64:$src)),
6655 (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x2))>;
6656 def : Pat<(f32 (frint FR32:$src)),
6657 (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x4))>;
6658 def : Pat<(f64 (frint FR64:$src)),
6659 (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x4))>;
6660 def : Pat<(f32 (ftrunc FR32:$src)),
6661 (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x3))>;
6662 def : Pat<(f64 (ftrunc FR64:$src)),
6663 (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x3))>;
6665 def : Pat<(v4f32 (ffloor VR128:$src)),
6666 (ROUNDPSr VR128:$src, (i32 0x1))>;
6667 def : Pat<(v4f32 (fnearbyint VR128:$src)),
6668 (ROUNDPSr VR128:$src, (i32 0xC))>;
6669 def : Pat<(v4f32 (fceil VR128:$src)),
6670 (ROUNDPSr VR128:$src, (i32 0x2))>;
6671 def : Pat<(v4f32 (frint VR128:$src)),
6672 (ROUNDPSr VR128:$src, (i32 0x4))>;
6673 def : Pat<(v4f32 (ftrunc VR128:$src)),
6674 (ROUNDPSr VR128:$src, (i32 0x3))>;
6676 def : Pat<(v2f64 (ffloor VR128:$src)),
6677 (ROUNDPDr VR128:$src, (i32 0x1))>;
6678 def : Pat<(v2f64 (fnearbyint VR128:$src)),
6679 (ROUNDPDr VR128:$src, (i32 0xC))>;
6680 def : Pat<(v2f64 (fceil VR128:$src)),
6681 (ROUNDPDr VR128:$src, (i32 0x2))>;
6682 def : Pat<(v2f64 (frint VR128:$src)),
6683 (ROUNDPDr VR128:$src, (i32 0x4))>;
6684 def : Pat<(v2f64 (ftrunc VR128:$src)),
6685 (ROUNDPDr VR128:$src, (i32 0x3))>;
6688 //===----------------------------------------------------------------------===//
6689 // SSE4.1 - Packed Bit Test
6690 //===----------------------------------------------------------------------===//
6692 // ptest instruction we'll lower to this in X86ISelLowering primarily from
6693 // the intel intrinsic that corresponds to this.
6694 let Defs = [EFLAGS], Predicates = [HasAVX] in {
6695 def VPTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
6696 "vptest\t{$src2, $src1|$src1, $src2}",
6697 [(set EFLAGS, (X86ptest VR128:$src1, (v2i64 VR128:$src2)))]>,
6698 Sched<[WriteVecLogic]>, VEX;
6699 def VPTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
6700 "vptest\t{$src2, $src1|$src1, $src2}",
6701 [(set EFLAGS,(X86ptest VR128:$src1, (loadv2i64 addr:$src2)))]>,
6702 Sched<[WriteVecLogicLd, ReadAfterLd]>, VEX;
6704 def VPTESTYrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR256:$src1, VR256:$src2),
6705 "vptest\t{$src2, $src1|$src1, $src2}",
6706 [(set EFLAGS, (X86ptest VR256:$src1, (v4i64 VR256:$src2)))]>,
6707 Sched<[WriteVecLogic]>, VEX, VEX_L;
6708 def VPTESTYrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR256:$src1, i256mem:$src2),
6709 "vptest\t{$src2, $src1|$src1, $src2}",
6710 [(set EFLAGS,(X86ptest VR256:$src1, (loadv4i64 addr:$src2)))]>,
6711 Sched<[WriteVecLogicLd, ReadAfterLd]>, VEX, VEX_L;
6714 let Defs = [EFLAGS] in {
6715 def PTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
6716 "ptest\t{$src2, $src1|$src1, $src2}",
6717 [(set EFLAGS, (X86ptest VR128:$src1, (v2i64 VR128:$src2)))]>,
6718 Sched<[WriteVecLogic]>;
6719 def PTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
6720 "ptest\t{$src2, $src1|$src1, $src2}",
6721 [(set EFLAGS, (X86ptest VR128:$src1, (memopv2i64 addr:$src2)))]>,
6722 Sched<[WriteVecLogicLd, ReadAfterLd]>;
6725 // The bit test instructions below are AVX only
6726 multiclass avx_bittest<bits<8> opc, string OpcodeStr, RegisterClass RC,
6727 X86MemOperand x86memop, PatFrag mem_frag, ValueType vt> {
6728 def rr : SS48I<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
6729 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
6730 [(set EFLAGS, (X86testp RC:$src1, (vt RC:$src2)))]>,
6731 Sched<[WriteVecLogic]>, VEX;
6732 def rm : SS48I<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
6733 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
6734 [(set EFLAGS, (X86testp RC:$src1, (mem_frag addr:$src2)))]>,
6735 Sched<[WriteVecLogicLd, ReadAfterLd]>, VEX;
6738 let Defs = [EFLAGS], Predicates = [HasAVX] in {
6739 let ExeDomain = SSEPackedSingle in {
6740 defm VTESTPS : avx_bittest<0x0E, "vtestps", VR128, f128mem, loadv4f32, v4f32>;
6741 defm VTESTPSY : avx_bittest<0x0E, "vtestps", VR256, f256mem, loadv8f32, v8f32>,
6744 let ExeDomain = SSEPackedDouble in {
6745 defm VTESTPD : avx_bittest<0x0F, "vtestpd", VR128, f128mem, loadv2f64, v2f64>;
6746 defm VTESTPDY : avx_bittest<0x0F, "vtestpd", VR256, f256mem, loadv4f64, v4f64>,
6751 //===----------------------------------------------------------------------===//
6752 // SSE4.1 - Misc Instructions
6753 //===----------------------------------------------------------------------===//
6755 let Defs = [EFLAGS], Predicates = [HasPOPCNT] in {
6756 def POPCNT16rr : I<0xB8, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
6757 "popcnt{w}\t{$src, $dst|$dst, $src}",
6758 [(set GR16:$dst, (ctpop GR16:$src)), (implicit EFLAGS)],
6759 IIC_SSE_POPCNT_RR>, Sched<[WriteFAdd]>,
6761 def POPCNT16rm : I<0xB8, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
6762 "popcnt{w}\t{$src, $dst|$dst, $src}",
6763 [(set GR16:$dst, (ctpop (loadi16 addr:$src))),
6764 (implicit EFLAGS)], IIC_SSE_POPCNT_RM>,
6765 Sched<[WriteFAddLd]>, OpSize16, XS;
6767 def POPCNT32rr : I<0xB8, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
6768 "popcnt{l}\t{$src, $dst|$dst, $src}",
6769 [(set GR32:$dst, (ctpop GR32:$src)), (implicit EFLAGS)],
6770 IIC_SSE_POPCNT_RR>, Sched<[WriteFAdd]>,
6773 def POPCNT32rm : I<0xB8, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
6774 "popcnt{l}\t{$src, $dst|$dst, $src}",
6775 [(set GR32:$dst, (ctpop (loadi32 addr:$src))),
6776 (implicit EFLAGS)], IIC_SSE_POPCNT_RM>,
6777 Sched<[WriteFAddLd]>, OpSize32, XS;
6779 def POPCNT64rr : RI<0xB8, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
6780 "popcnt{q}\t{$src, $dst|$dst, $src}",
6781 [(set GR64:$dst, (ctpop GR64:$src)), (implicit EFLAGS)],
6782 IIC_SSE_POPCNT_RR>, Sched<[WriteFAdd]>, XS;
6783 def POPCNT64rm : RI<0xB8, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
6784 "popcnt{q}\t{$src, $dst|$dst, $src}",
6785 [(set GR64:$dst, (ctpop (loadi64 addr:$src))),
6786 (implicit EFLAGS)], IIC_SSE_POPCNT_RM>,
6787 Sched<[WriteFAddLd]>, XS;
6792 // SS41I_unop_rm_int_v16 - SSE 4.1 unary operator whose type is v8i16.
6793 multiclass SS41I_unop_rm_int_v16<bits<8> opc, string OpcodeStr,
6794 Intrinsic IntId128, PatFrag ld_frag,
6795 X86FoldableSchedWrite Sched> {
6796 def rr128 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
6798 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
6799 [(set VR128:$dst, (IntId128 VR128:$src))]>,
6801 def rm128 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
6803 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
6805 (IntId128 (bitconvert (ld_frag addr:$src))))]>,
6806 Sched<[Sched.Folded]>;
6809 // PHMIN has the same profile as PSAD, thus we use the same scheduling
6810 // model, although the naming is misleading.
6811 let Predicates = [HasAVX] in
6812 defm VPHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "vphminposuw",
6813 int_x86_sse41_phminposuw, loadv2i64,
6815 defm PHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "phminposuw",
6816 int_x86_sse41_phminposuw, memopv2i64,
6819 /// SS48I_binop_rm - Simple SSE41 binary operator.
6820 multiclass SS48I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
6821 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
6822 X86MemOperand x86memop, bit Is2Addr = 1,
6823 OpndItins itins = SSE_INTALU_ITINS_P> {
6824 let isCommutable = 1 in
6825 def rr : SS48I<opc, MRMSrcReg, (outs RC:$dst),
6826 (ins RC:$src1, RC:$src2),
6828 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
6829 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
6830 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2)))]>,
6831 Sched<[itins.Sched]>;
6832 def rm : SS48I<opc, MRMSrcMem, (outs RC:$dst),
6833 (ins RC:$src1, x86memop:$src2),
6835 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
6836 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
6838 (OpVT (OpNode RC:$src1, (bitconvert (memop_frag addr:$src2)))))]>,
6839 Sched<[itins.Sched.Folded, ReadAfterLd]>;
6842 /// SS48I_binop_rm2 - Simple SSE41 binary operator with different src and dst
6844 multiclass SS48I_binop_rm2<bits<8> opc, string OpcodeStr, SDNode OpNode,
6845 ValueType DstVT, ValueType SrcVT, RegisterClass RC,
6846 PatFrag memop_frag, X86MemOperand x86memop,
6848 bit IsCommutable = 0, bit Is2Addr = 1> {
6849 let isCommutable = IsCommutable in
6850 def rr : SS48I<opc, MRMSrcReg, (outs RC:$dst),
6851 (ins RC:$src1, RC:$src2),
6853 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
6854 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
6855 [(set RC:$dst, (DstVT (OpNode (SrcVT RC:$src1), RC:$src2)))]>,
6856 Sched<[itins.Sched]>;
6857 def rm : SS48I<opc, MRMSrcMem, (outs RC:$dst),
6858 (ins RC:$src1, x86memop:$src2),
6860 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
6861 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
6862 [(set RC:$dst, (DstVT (OpNode (SrcVT RC:$src1),
6863 (bitconvert (memop_frag addr:$src2)))))]>,
6864 Sched<[itins.Sched.Folded, ReadAfterLd]>;
6867 let Predicates = [HasAVX, NoVLX] in {
6868 let isCommutable = 0 in
6869 defm VPMINSB : SS48I_binop_rm<0x38, "vpminsb", X86smin, v16i8, VR128,
6870 loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
6872 defm VPMINSD : SS48I_binop_rm<0x39, "vpminsd", X86smin, v4i32, VR128,
6873 loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
6875 defm VPMINUD : SS48I_binop_rm<0x3B, "vpminud", X86umin, v4i32, VR128,
6876 loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
6878 defm VPMINUW : SS48I_binop_rm<0x3A, "vpminuw", X86umin, v8i16, VR128,
6879 loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
6881 defm VPMAXSB : SS48I_binop_rm<0x3C, "vpmaxsb", X86smax, v16i8, VR128,
6882 loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
6884 defm VPMAXSD : SS48I_binop_rm<0x3D, "vpmaxsd", X86smax, v4i32, VR128,
6885 loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
6887 defm VPMAXUD : SS48I_binop_rm<0x3F, "vpmaxud", X86umax, v4i32, VR128,
6888 loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
6890 defm VPMAXUW : SS48I_binop_rm<0x3E, "vpmaxuw", X86umax, v8i16, VR128,
6891 loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
6893 defm VPMULDQ : SS48I_binop_rm2<0x28, "vpmuldq", X86pmuldq, v2i64, v4i32,
6894 VR128, loadv2i64, i128mem,
6895 SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V;
6898 let Predicates = [HasAVX2, NoVLX] in {
6899 let isCommutable = 0 in
6900 defm VPMINSBY : SS48I_binop_rm<0x38, "vpminsb", X86smin, v32i8, VR256,
6901 loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
6903 defm VPMINSDY : SS48I_binop_rm<0x39, "vpminsd", X86smin, v8i32, VR256,
6904 loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
6906 defm VPMINUDY : SS48I_binop_rm<0x3B, "vpminud", X86umin, v8i32, VR256,
6907 loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
6909 defm VPMINUWY : SS48I_binop_rm<0x3A, "vpminuw", X86umin, v16i16, VR256,
6910 loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
6912 defm VPMAXSBY : SS48I_binop_rm<0x3C, "vpmaxsb", X86smax, v32i8, VR256,
6913 loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
6915 defm VPMAXSDY : SS48I_binop_rm<0x3D, "vpmaxsd", X86smax, v8i32, VR256,
6916 loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
6918 defm VPMAXUDY : SS48I_binop_rm<0x3F, "vpmaxud", X86umax, v8i32, VR256,
6919 loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
6921 defm VPMAXUWY : SS48I_binop_rm<0x3E, "vpmaxuw", X86umax, v16i16, VR256,
6922 loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
6924 defm VPMULDQY : SS48I_binop_rm2<0x28, "vpmuldq", X86pmuldq, v4i64, v8i32,
6925 VR256, loadv4i64, i256mem,
6926 SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V, VEX_L;
6929 let Constraints = "$src1 = $dst" in {
6930 let isCommutable = 0 in
6931 defm PMINSB : SS48I_binop_rm<0x38, "pminsb", X86smin, v16i8, VR128,
6932 memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
6933 defm PMINSD : SS48I_binop_rm<0x39, "pminsd", X86smin, v4i32, VR128,
6934 memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
6935 defm PMINUD : SS48I_binop_rm<0x3B, "pminud", X86umin, v4i32, VR128,
6936 memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
6937 defm PMINUW : SS48I_binop_rm<0x3A, "pminuw", X86umin, v8i16, VR128,
6938 memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
6939 defm PMAXSB : SS48I_binop_rm<0x3C, "pmaxsb", X86smax, v16i8, VR128,
6940 memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
6941 defm PMAXSD : SS48I_binop_rm<0x3D, "pmaxsd", X86smax, v4i32, VR128,
6942 memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
6943 defm PMAXUD : SS48I_binop_rm<0x3F, "pmaxud", X86umax, v4i32, VR128,
6944 memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
6945 defm PMAXUW : SS48I_binop_rm<0x3E, "pmaxuw", X86umax, v8i16, VR128,
6946 memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
6947 defm PMULDQ : SS48I_binop_rm2<0x28, "pmuldq", X86pmuldq, v2i64, v4i32,
6948 VR128, memopv2i64, i128mem,
6949 SSE_INTMUL_ITINS_P, 1>;
6952 let Predicates = [HasAVX, NoVLX] in {
6953 defm VPMULLD : SS48I_binop_rm<0x40, "vpmulld", mul, v4i32, VR128,
6954 memopv2i64, i128mem, 0, SSE_PMULLD_ITINS>,
6956 defm VPCMPEQQ : SS48I_binop_rm<0x29, "vpcmpeqq", X86pcmpeq, v2i64, VR128,
6957 memopv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
6960 let Predicates = [HasAVX2] in {
6961 defm VPMULLDY : SS48I_binop_rm<0x40, "vpmulld", mul, v8i32, VR256,
6962 loadv4i64, i256mem, 0, SSE_PMULLD_ITINS>,
6964 defm VPCMPEQQY : SS48I_binop_rm<0x29, "vpcmpeqq", X86pcmpeq, v4i64, VR256,
6965 loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
6969 let Constraints = "$src1 = $dst" in {
6970 defm PMULLD : SS48I_binop_rm<0x40, "pmulld", mul, v4i32, VR128,
6971 memopv2i64, i128mem, 1, SSE_PMULLD_ITINS>;
6972 defm PCMPEQQ : SS48I_binop_rm<0x29, "pcmpeqq", X86pcmpeq, v2i64, VR128,
6973 memopv2i64, i128mem, 1, SSE_INTALUQ_ITINS_P>;
6976 /// SS41I_binop_rmi_int - SSE 4.1 binary operator with 8-bit immediate
6977 multiclass SS41I_binop_rmi_int<bits<8> opc, string OpcodeStr,
6978 Intrinsic IntId, RegisterClass RC, PatFrag memop_frag,
6979 X86MemOperand x86memop, bit Is2Addr = 1,
6980 OpndItins itins = DEFAULT_ITINS> {
6981 let isCommutable = 1 in
6982 def rri : SS4AIi8<opc, MRMSrcReg, (outs RC:$dst),
6983 (ins RC:$src1, RC:$src2, u8imm:$src3),
6985 !strconcat(OpcodeStr,
6986 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6987 !strconcat(OpcodeStr,
6988 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6989 [(set RC:$dst, (IntId RC:$src1, RC:$src2, imm:$src3))], itins.rr>,
6990 Sched<[itins.Sched]>;
6991 def rmi : SS4AIi8<opc, MRMSrcMem, (outs RC:$dst),
6992 (ins RC:$src1, x86memop:$src2, u8imm:$src3),
6994 !strconcat(OpcodeStr,
6995 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6996 !strconcat(OpcodeStr,
6997 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
7000 (bitconvert (memop_frag addr:$src2)), imm:$src3))], itins.rm>,
7001 Sched<[itins.Sched.Folded, ReadAfterLd]>;
7004 let Predicates = [HasAVX] in {
7005 let isCommutable = 0 in {
7006 defm VMPSADBW : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_sse41_mpsadbw,
7007 VR128, loadv2i64, i128mem, 0,
7008 DEFAULT_ITINS_MPSADSCHED>, VEX_4V;
7011 let ExeDomain = SSEPackedSingle in {
7012 defm VBLENDPS : SS41I_binop_rmi_int<0x0C, "vblendps", int_x86_sse41_blendps,
7013 VR128, loadv4f32, f128mem, 0,
7014 DEFAULT_ITINS_FBLENDSCHED>, VEX_4V;
7015 defm VBLENDPSY : SS41I_binop_rmi_int<0x0C, "vblendps",
7016 int_x86_avx_blend_ps_256, VR256, loadv8f32,
7017 f256mem, 0, DEFAULT_ITINS_FBLENDSCHED>,
7020 let ExeDomain = SSEPackedDouble in {
7021 defm VBLENDPD : SS41I_binop_rmi_int<0x0D, "vblendpd", int_x86_sse41_blendpd,
7022 VR128, loadv2f64, f128mem, 0,
7023 DEFAULT_ITINS_FBLENDSCHED>, VEX_4V;
7024 defm VBLENDPDY : SS41I_binop_rmi_int<0x0D, "vblendpd",
7025 int_x86_avx_blend_pd_256,VR256, loadv4f64,
7026 f256mem, 0, DEFAULT_ITINS_FBLENDSCHED>,
7029 defm VPBLENDW : SS41I_binop_rmi_int<0x0E, "vpblendw", int_x86_sse41_pblendw,
7030 VR128, loadv2i64, i128mem, 0,
7031 DEFAULT_ITINS_BLENDSCHED>, VEX_4V;
7033 let ExeDomain = SSEPackedSingle in
7034 defm VDPPS : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_sse41_dpps,
7035 VR128, loadv4f32, f128mem, 0,
7036 SSE_DPPS_ITINS>, VEX_4V;
7037 let ExeDomain = SSEPackedDouble in
7038 defm VDPPD : SS41I_binop_rmi_int<0x41, "vdppd", int_x86_sse41_dppd,
7039 VR128, loadv2f64, f128mem, 0,
7040 SSE_DPPS_ITINS>, VEX_4V;
7041 let ExeDomain = SSEPackedSingle in
7042 defm VDPPSY : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_avx_dp_ps_256,
7043 VR256, loadv8f32, i256mem, 0,
7044 SSE_DPPS_ITINS>, VEX_4V, VEX_L;
7047 let Predicates = [HasAVX2] in {
7048 let isCommutable = 0 in {
7049 defm VMPSADBWY : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_avx2_mpsadbw,
7050 VR256, loadv4i64, i256mem, 0,
7051 DEFAULT_ITINS_MPSADSCHED>, VEX_4V, VEX_L;
7053 defm VPBLENDWY : SS41I_binop_rmi_int<0x0E, "vpblendw", int_x86_avx2_pblendw,
7054 VR256, loadv4i64, i256mem, 0,
7055 DEFAULT_ITINS_BLENDSCHED>, VEX_4V, VEX_L;
7058 let Constraints = "$src1 = $dst" in {
7059 let isCommutable = 0 in {
7060 defm MPSADBW : SS41I_binop_rmi_int<0x42, "mpsadbw", int_x86_sse41_mpsadbw,
7061 VR128, memopv2i64, i128mem,
7062 1, SSE_MPSADBW_ITINS>;
7064 let ExeDomain = SSEPackedSingle in
7065 defm BLENDPS : SS41I_binop_rmi_int<0x0C, "blendps", int_x86_sse41_blendps,
7066 VR128, memopv4f32, f128mem,
7067 1, SSE_INTALU_ITINS_FBLEND_P>;
7068 let ExeDomain = SSEPackedDouble in
7069 defm BLENDPD : SS41I_binop_rmi_int<0x0D, "blendpd", int_x86_sse41_blendpd,
7070 VR128, memopv2f64, f128mem,
7071 1, SSE_INTALU_ITINS_FBLEND_P>;
7072 defm PBLENDW : SS41I_binop_rmi_int<0x0E, "pblendw", int_x86_sse41_pblendw,
7073 VR128, memopv2i64, i128mem,
7074 1, SSE_INTALU_ITINS_BLEND_P>;
7075 let ExeDomain = SSEPackedSingle in
7076 defm DPPS : SS41I_binop_rmi_int<0x40, "dpps", int_x86_sse41_dpps,
7077 VR128, memopv4f32, f128mem, 1,
7079 let ExeDomain = SSEPackedDouble in
7080 defm DPPD : SS41I_binop_rmi_int<0x41, "dppd", int_x86_sse41_dppd,
7081 VR128, memopv2f64, f128mem, 1,
7085 /// SS41I_quaternary_int_avx - AVX SSE 4.1 with 4 operators
7086 multiclass SS41I_quaternary_int_avx<bits<8> opc, string OpcodeStr,
7087 RegisterClass RC, X86MemOperand x86memop,
7088 PatFrag mem_frag, Intrinsic IntId,
7089 X86FoldableSchedWrite Sched> {
7090 def rr : Ii8<opc, MRMSrcReg, (outs RC:$dst),
7091 (ins RC:$src1, RC:$src2, RC:$src3),
7092 !strconcat(OpcodeStr,
7093 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
7094 [(set RC:$dst, (IntId RC:$src1, RC:$src2, RC:$src3))],
7095 NoItinerary, SSEPackedInt>, TAPD, VEX_4V, VEX_I8IMM,
7098 def rm : Ii8<opc, MRMSrcMem, (outs RC:$dst),
7099 (ins RC:$src1, x86memop:$src2, RC:$src3),
7100 !strconcat(OpcodeStr,
7101 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
7103 (IntId RC:$src1, (bitconvert (mem_frag addr:$src2)),
7105 NoItinerary, SSEPackedInt>, TAPD, VEX_4V, VEX_I8IMM,
7106 Sched<[Sched.Folded, ReadAfterLd]>;
7109 let Predicates = [HasAVX] in {
7110 let ExeDomain = SSEPackedDouble in {
7111 defm VBLENDVPD : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR128, f128mem,
7112 loadv2f64, int_x86_sse41_blendvpd,
7114 defm VBLENDVPDY : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR256, f256mem,
7115 loadv4f64, int_x86_avx_blendv_pd_256,
7116 WriteFVarBlend>, VEX_L;
7117 } // ExeDomain = SSEPackedDouble
7118 let ExeDomain = SSEPackedSingle in {
7119 defm VBLENDVPS : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR128, f128mem,
7120 loadv4f32, int_x86_sse41_blendvps,
7122 defm VBLENDVPSY : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR256, f256mem,
7123 loadv8f32, int_x86_avx_blendv_ps_256,
7124 WriteFVarBlend>, VEX_L;
7125 } // ExeDomain = SSEPackedSingle
7126 defm VPBLENDVB : SS41I_quaternary_int_avx<0x4C, "vpblendvb", VR128, i128mem,
7127 loadv2i64, int_x86_sse41_pblendvb,
7131 let Predicates = [HasAVX2] in {
7132 defm VPBLENDVBY : SS41I_quaternary_int_avx<0x4C, "vpblendvb", VR256, i256mem,
7133 loadv4i64, int_x86_avx2_pblendvb,
7134 WriteVarBlend>, VEX_L;
7137 let Predicates = [HasAVX] in {
7138 def : Pat<(v16i8 (vselect (v16i8 VR128:$mask), (v16i8 VR128:$src1),
7139 (v16i8 VR128:$src2))),
7140 (VPBLENDVBrr VR128:$src2, VR128:$src1, VR128:$mask)>;
7141 def : Pat<(v4i32 (vselect (v4i32 VR128:$mask), (v4i32 VR128:$src1),
7142 (v4i32 VR128:$src2))),
7143 (VBLENDVPSrr VR128:$src2, VR128:$src1, VR128:$mask)>;
7144 def : Pat<(v4f32 (vselect (v4i32 VR128:$mask), (v4f32 VR128:$src1),
7145 (v4f32 VR128:$src2))),
7146 (VBLENDVPSrr VR128:$src2, VR128:$src1, VR128:$mask)>;
7147 def : Pat<(v2i64 (vselect (v2i64 VR128:$mask), (v2i64 VR128:$src1),
7148 (v2i64 VR128:$src2))),
7149 (VBLENDVPDrr VR128:$src2, VR128:$src1, VR128:$mask)>;
7150 def : Pat<(v2f64 (vselect (v2i64 VR128:$mask), (v2f64 VR128:$src1),
7151 (v2f64 VR128:$src2))),
7152 (VBLENDVPDrr VR128:$src2, VR128:$src1, VR128:$mask)>;
7153 def : Pat<(v8i32 (vselect (v8i32 VR256:$mask), (v8i32 VR256:$src1),
7154 (v8i32 VR256:$src2))),
7155 (VBLENDVPSYrr VR256:$src2, VR256:$src1, VR256:$mask)>;
7156 def : Pat<(v8f32 (vselect (v8i32 VR256:$mask), (v8f32 VR256:$src1),
7157 (v8f32 VR256:$src2))),
7158 (VBLENDVPSYrr VR256:$src2, VR256:$src1, VR256:$mask)>;
7159 def : Pat<(v4i64 (vselect (v4i64 VR256:$mask), (v4i64 VR256:$src1),
7160 (v4i64 VR256:$src2))),
7161 (VBLENDVPDYrr VR256:$src2, VR256:$src1, VR256:$mask)>;
7162 def : Pat<(v4f64 (vselect (v4i64 VR256:$mask), (v4f64 VR256:$src1),
7163 (v4f64 VR256:$src2))),
7164 (VBLENDVPDYrr VR256:$src2, VR256:$src1, VR256:$mask)>;
7166 def : Pat<(v8f32 (X86Blendi (v8f32 VR256:$src1), (v8f32 VR256:$src2),
7168 (VBLENDPSYrri VR256:$src1, VR256:$src2, imm:$mask)>;
7169 def : Pat<(v4f64 (X86Blendi (v4f64 VR256:$src1), (v4f64 VR256:$src2),
7171 (VBLENDPDYrri VR256:$src1, VR256:$src2, imm:$mask)>;
7173 def : Pat<(v8i16 (X86Blendi (v8i16 VR128:$src1), (v8i16 VR128:$src2),
7175 (VPBLENDWrri VR128:$src1, VR128:$src2, imm:$mask)>;
7176 def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$src1), (v4f32 VR128:$src2),
7178 (VBLENDPSrri VR128:$src1, VR128:$src2, imm:$mask)>;
7179 def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$src1), (v2f64 VR128:$src2),
7181 (VBLENDPDrri VR128:$src1, VR128:$src2, imm:$mask)>;
7184 let Predicates = [HasAVX2] in {
7185 def : Pat<(v32i8 (vselect (v32i8 VR256:$mask), (v32i8 VR256:$src1),
7186 (v32i8 VR256:$src2))),
7187 (VPBLENDVBYrr VR256:$src2, VR256:$src1, VR256:$mask)>;
7188 def : Pat<(v16i16 (X86Blendi (v16i16 VR256:$src1), (v16i16 VR256:$src2),
7190 (VPBLENDWYrri VR256:$src1, VR256:$src2, imm:$mask)>;
7194 let Predicates = [UseAVX] in {
7195 let AddedComplexity = 15 in {
7196 // Move scalar to XMM zero-extended, zeroing a VR128 then do a
7197 // MOVS{S,D} to the lower bits.
7198 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),
7199 (VMOVSSrr (v4f32 (V_SET0)), FR32:$src)>;
7200 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
7201 (VBLENDPSrri (v4f32 (V_SET0)), VR128:$src, (i8 1))>;
7202 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
7203 (VPBLENDWrri (v4i32 (V_SET0)), VR128:$src, (i8 3))>;
7204 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))),
7205 (VMOVSDrr (v2f64 (V_SET0)), FR64:$src)>;
7207 // Move low f32 and clear high bits.
7208 def : Pat<(v8f32 (X86vzmovl (v8f32 VR256:$src))),
7209 (VBLENDPSYrri (v8f32 (AVX_SET0)), VR256:$src, (i8 1))>;
7210 def : Pat<(v8i32 (X86vzmovl (v8i32 VR256:$src))),
7211 (VBLENDPSYrri (v8i32 (AVX_SET0)), VR256:$src, (i8 1))>;
7214 def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
7215 (v4f32 (scalar_to_vector FR32:$src)), (iPTR 0)))),
7216 (SUBREG_TO_REG (i32 0),
7217 (v4f32 (VMOVSSrr (v4f32 (V_SET0)), FR32:$src)),
7219 def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,
7220 (v2f64 (scalar_to_vector FR64:$src)), (iPTR 0)))),
7221 (SUBREG_TO_REG (i64 0),
7222 (v2f64 (VMOVSDrr (v2f64 (V_SET0)), FR64:$src)),
7225 // Move low f64 and clear high bits.
7226 def : Pat<(v4f64 (X86vzmovl (v4f64 VR256:$src))),
7227 (VBLENDPDYrri (v4f64 (AVX_SET0)), VR256:$src, (i8 1))>;
7229 def : Pat<(v4i64 (X86vzmovl (v4i64 VR256:$src))),
7230 (VBLENDPDYrri (v4i64 (AVX_SET0)), VR256:$src, (i8 1))>;
7233 let Predicates = [UseSSE41] in {
7234 // With SSE41 we can use blends for these patterns.
7235 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
7236 (BLENDPSrri (v4f32 (V_SET0)), VR128:$src, (i8 1))>;
7237 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
7238 (PBLENDWrri (v4i32 (V_SET0)), VR128:$src, (i8 3))>;
7239 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
7240 (BLENDPDrri (v2f64 (V_SET0)), VR128:$src, (i8 1))>;
7244 /// SS41I_ternary_int - SSE 4.1 ternary operator
7245 let Uses = [XMM0], Constraints = "$src1 = $dst" in {
7246 multiclass SS41I_ternary_int<bits<8> opc, string OpcodeStr, PatFrag mem_frag,
7247 X86MemOperand x86memop, Intrinsic IntId,
7248 OpndItins itins = DEFAULT_ITINS> {
7249 def rr0 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
7250 (ins VR128:$src1, VR128:$src2),
7251 !strconcat(OpcodeStr,
7252 "\t{$src2, $dst|$dst, $src2}"),
7253 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2, XMM0))],
7254 itins.rr>, Sched<[itins.Sched]>;
7256 def rm0 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
7257 (ins VR128:$src1, x86memop:$src2),
7258 !strconcat(OpcodeStr,
7259 "\t{$src2, $dst|$dst, $src2}"),
7262 (bitconvert (mem_frag addr:$src2)), XMM0))],
7263 itins.rm>, Sched<[itins.Sched.Folded, ReadAfterLd]>;
7267 let ExeDomain = SSEPackedDouble in
7268 defm BLENDVPD : SS41I_ternary_int<0x15, "blendvpd", memopv2f64, f128mem,
7269 int_x86_sse41_blendvpd,
7270 DEFAULT_ITINS_FBLENDSCHED>;
7271 let ExeDomain = SSEPackedSingle in
7272 defm BLENDVPS : SS41I_ternary_int<0x14, "blendvps", memopv4f32, f128mem,
7273 int_x86_sse41_blendvps,
7274 DEFAULT_ITINS_FBLENDSCHED>;
7275 defm PBLENDVB : SS41I_ternary_int<0x10, "pblendvb", memopv2i64, i128mem,
7276 int_x86_sse41_pblendvb,
7277 DEFAULT_ITINS_VARBLENDSCHED>;
7279 // Aliases with the implicit xmm0 argument
7280 def : InstAlias<"blendvpd\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}",
7281 (BLENDVPDrr0 VR128:$dst, VR128:$src2)>;
7282 def : InstAlias<"blendvpd\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}",
7283 (BLENDVPDrm0 VR128:$dst, f128mem:$src2)>;
7284 def : InstAlias<"blendvps\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}",
7285 (BLENDVPSrr0 VR128:$dst, VR128:$src2)>;
7286 def : InstAlias<"blendvps\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}",
7287 (BLENDVPSrm0 VR128:$dst, f128mem:$src2)>;
7288 def : InstAlias<"pblendvb\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}",
7289 (PBLENDVBrr0 VR128:$dst, VR128:$src2)>;
7290 def : InstAlias<"pblendvb\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}",
7291 (PBLENDVBrm0 VR128:$dst, i128mem:$src2)>;
7293 let Predicates = [UseSSE41] in {
7294 def : Pat<(v16i8 (vselect (v16i8 XMM0), (v16i8 VR128:$src1),
7295 (v16i8 VR128:$src2))),
7296 (PBLENDVBrr0 VR128:$src2, VR128:$src1)>;
7297 def : Pat<(v4i32 (vselect (v4i32 XMM0), (v4i32 VR128:$src1),
7298 (v4i32 VR128:$src2))),
7299 (BLENDVPSrr0 VR128:$src2, VR128:$src1)>;
7300 def : Pat<(v4f32 (vselect (v4i32 XMM0), (v4f32 VR128:$src1),
7301 (v4f32 VR128:$src2))),
7302 (BLENDVPSrr0 VR128:$src2, VR128:$src1)>;
7303 def : Pat<(v2i64 (vselect (v2i64 XMM0), (v2i64 VR128:$src1),
7304 (v2i64 VR128:$src2))),
7305 (BLENDVPDrr0 VR128:$src2, VR128:$src1)>;
7306 def : Pat<(v2f64 (vselect (v2i64 XMM0), (v2f64 VR128:$src1),
7307 (v2f64 VR128:$src2))),
7308 (BLENDVPDrr0 VR128:$src2, VR128:$src1)>;
7310 def : Pat<(v8i16 (X86Blendi (v8i16 VR128:$src1), (v8i16 VR128:$src2),
7312 (PBLENDWrri VR128:$src1, VR128:$src2, imm:$mask)>;
7313 def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$src1), (v4f32 VR128:$src2),
7315 (BLENDPSrri VR128:$src1, VR128:$src2, imm:$mask)>;
7316 def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$src1), (v2f64 VR128:$src2),
7318 (BLENDPDrri VR128:$src1, VR128:$src2, imm:$mask)>;
7322 let SchedRW = [WriteLoad] in {
7323 let Predicates = [HasAVX] in
7324 def VMOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
7325 "vmovntdqa\t{$src, $dst|$dst, $src}",
7326 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
7328 let Predicates = [HasAVX2] in
7329 def VMOVNTDQAYrm : SS48I<0x2A, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
7330 "vmovntdqa\t{$src, $dst|$dst, $src}",
7331 [(set VR256:$dst, (int_x86_avx2_movntdqa addr:$src))]>,
7333 def MOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
7334 "movntdqa\t{$src, $dst|$dst, $src}",
7335 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>;
7338 //===----------------------------------------------------------------------===//
7339 // SSE4.2 - Compare Instructions
7340 //===----------------------------------------------------------------------===//
7342 /// SS42I_binop_rm - Simple SSE 4.2 binary operator
7343 multiclass SS42I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
7344 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
7345 X86MemOperand x86memop, bit Is2Addr = 1> {
7346 def rr : SS428I<opc, MRMSrcReg, (outs RC:$dst),
7347 (ins RC:$src1, RC:$src2),
7349 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
7350 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
7351 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2)))]>;
7352 def rm : SS428I<opc, MRMSrcMem, (outs RC:$dst),
7353 (ins RC:$src1, x86memop:$src2),
7355 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
7356 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
7358 (OpVT (OpNode RC:$src1, (memop_frag addr:$src2))))]>;
7361 let Predicates = [HasAVX] in
7362 defm VPCMPGTQ : SS42I_binop_rm<0x37, "vpcmpgtq", X86pcmpgt, v2i64, VR128,
7363 loadv2i64, i128mem, 0>, VEX_4V;
7365 let Predicates = [HasAVX2] in
7366 defm VPCMPGTQY : SS42I_binop_rm<0x37, "vpcmpgtq", X86pcmpgt, v4i64, VR256,
7367 loadv4i64, i256mem, 0>, VEX_4V, VEX_L;
7369 let Constraints = "$src1 = $dst" in
7370 defm PCMPGTQ : SS42I_binop_rm<0x37, "pcmpgtq", X86pcmpgt, v2i64, VR128,
7371 memopv2i64, i128mem>;
7373 //===----------------------------------------------------------------------===//
7374 // SSE4.2 - String/text Processing Instructions
7375 //===----------------------------------------------------------------------===//
7377 // Packed Compare Implicit Length Strings, Return Mask
7378 multiclass pseudo_pcmpistrm<string asm, PatFrag ld_frag> {
7379 def REG : PseudoI<(outs VR128:$dst),
7380 (ins VR128:$src1, VR128:$src2, u8imm:$src3),
7381 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128 VR128:$src1, VR128:$src2,
7383 def MEM : PseudoI<(outs VR128:$dst),
7384 (ins VR128:$src1, i128mem:$src2, u8imm:$src3),
7385 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128 VR128:$src1,
7386 (bc_v16i8 (ld_frag addr:$src2)), imm:$src3))]>;
7389 let Defs = [EFLAGS], usesCustomInserter = 1 in {
7390 defm VPCMPISTRM128 : pseudo_pcmpistrm<"#VPCMPISTRM128", loadv2i64>,
7392 defm PCMPISTRM128 : pseudo_pcmpistrm<"#PCMPISTRM128", memopv2i64>,
7393 Requires<[UseSSE42]>;
7396 multiclass pcmpistrm_SS42AI<string asm> {
7397 def rr : SS42AI<0x62, MRMSrcReg, (outs),
7398 (ins VR128:$src1, VR128:$src2, u8imm:$src3),
7399 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
7400 []>, Sched<[WritePCmpIStrM]>;
7402 def rm :SS42AI<0x62, MRMSrcMem, (outs),
7403 (ins VR128:$src1, i128mem:$src2, u8imm:$src3),
7404 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
7405 []>, Sched<[WritePCmpIStrMLd, ReadAfterLd]>;
7408 let Defs = [XMM0, EFLAGS], hasSideEffects = 0 in {
7409 let Predicates = [HasAVX] in
7410 defm VPCMPISTRM128 : pcmpistrm_SS42AI<"vpcmpistrm">, VEX;
7411 defm PCMPISTRM128 : pcmpistrm_SS42AI<"pcmpistrm"> ;
7414 // Packed Compare Explicit Length Strings, Return Mask
7415 multiclass pseudo_pcmpestrm<string asm, PatFrag ld_frag> {
7416 def REG : PseudoI<(outs VR128:$dst),
7417 (ins VR128:$src1, VR128:$src3, u8imm:$src5),
7418 [(set VR128:$dst, (int_x86_sse42_pcmpestrm128
7419 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5))]>;
7420 def MEM : PseudoI<(outs VR128:$dst),
7421 (ins VR128:$src1, i128mem:$src3, u8imm:$src5),
7422 [(set VR128:$dst, (int_x86_sse42_pcmpestrm128 VR128:$src1, EAX,
7423 (bc_v16i8 (ld_frag addr:$src3)), EDX, imm:$src5))]>;
7426 let Defs = [EFLAGS], Uses = [EAX, EDX], usesCustomInserter = 1 in {
7427 defm VPCMPESTRM128 : pseudo_pcmpestrm<"#VPCMPESTRM128", loadv2i64>,
7429 defm PCMPESTRM128 : pseudo_pcmpestrm<"#PCMPESTRM128", memopv2i64>,
7430 Requires<[UseSSE42]>;
7433 multiclass SS42AI_pcmpestrm<string asm> {
7434 def rr : SS42AI<0x60, MRMSrcReg, (outs),
7435 (ins VR128:$src1, VR128:$src3, u8imm:$src5),
7436 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
7437 []>, Sched<[WritePCmpEStrM]>;
7439 def rm : SS42AI<0x60, MRMSrcMem, (outs),
7440 (ins VR128:$src1, i128mem:$src3, u8imm:$src5),
7441 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
7442 []>, Sched<[WritePCmpEStrMLd, ReadAfterLd]>;
7445 let Defs = [XMM0, EFLAGS], Uses = [EAX, EDX], hasSideEffects = 0 in {
7446 let Predicates = [HasAVX] in
7447 defm VPCMPESTRM128 : SS42AI_pcmpestrm<"vpcmpestrm">, VEX;
7448 defm PCMPESTRM128 : SS42AI_pcmpestrm<"pcmpestrm">;
7451 // Packed Compare Implicit Length Strings, Return Index
7452 multiclass pseudo_pcmpistri<string asm, PatFrag ld_frag> {
7453 def REG : PseudoI<(outs GR32:$dst),
7454 (ins VR128:$src1, VR128:$src2, u8imm:$src3),
7455 [(set GR32:$dst, EFLAGS,
7456 (X86pcmpistri VR128:$src1, VR128:$src2, imm:$src3))]>;
7457 def MEM : PseudoI<(outs GR32:$dst),
7458 (ins VR128:$src1, i128mem:$src2, u8imm:$src3),
7459 [(set GR32:$dst, EFLAGS, (X86pcmpistri VR128:$src1,
7460 (bc_v16i8 (ld_frag addr:$src2)), imm:$src3))]>;
7463 let Defs = [EFLAGS], usesCustomInserter = 1 in {
7464 defm VPCMPISTRI : pseudo_pcmpistri<"#VPCMPISTRI", loadv2i64>,
7466 defm PCMPISTRI : pseudo_pcmpistri<"#PCMPISTRI", memopv2i64>,
7467 Requires<[UseSSE42]>;
7470 multiclass SS42AI_pcmpistri<string asm> {
7471 def rr : SS42AI<0x63, MRMSrcReg, (outs),
7472 (ins VR128:$src1, VR128:$src2, u8imm:$src3),
7473 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
7474 []>, Sched<[WritePCmpIStrI]>;
7476 def rm : SS42AI<0x63, MRMSrcMem, (outs),
7477 (ins VR128:$src1, i128mem:$src2, u8imm:$src3),
7478 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
7479 []>, Sched<[WritePCmpIStrILd, ReadAfterLd]>;
7482 let Defs = [ECX, EFLAGS], hasSideEffects = 0 in {
7483 let Predicates = [HasAVX] in
7484 defm VPCMPISTRI : SS42AI_pcmpistri<"vpcmpistri">, VEX;
7485 defm PCMPISTRI : SS42AI_pcmpistri<"pcmpistri">;
7488 // Packed Compare Explicit Length Strings, Return Index
7489 multiclass pseudo_pcmpestri<string asm, PatFrag ld_frag> {
7490 def REG : PseudoI<(outs GR32:$dst),
7491 (ins VR128:$src1, VR128:$src3, u8imm:$src5),
7492 [(set GR32:$dst, EFLAGS,
7493 (X86pcmpestri VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5))]>;
7494 def MEM : PseudoI<(outs GR32:$dst),
7495 (ins VR128:$src1, i128mem:$src3, u8imm:$src5),
7496 [(set GR32:$dst, EFLAGS,
7497 (X86pcmpestri VR128:$src1, EAX, (bc_v16i8 (ld_frag addr:$src3)), EDX,
7501 let Defs = [EFLAGS], Uses = [EAX, EDX], usesCustomInserter = 1 in {
7502 defm VPCMPESTRI : pseudo_pcmpestri<"#VPCMPESTRI", loadv2i64>,
7504 defm PCMPESTRI : pseudo_pcmpestri<"#PCMPESTRI", memopv2i64>,
7505 Requires<[UseSSE42]>;
7508 multiclass SS42AI_pcmpestri<string asm> {
7509 def rr : SS42AI<0x61, MRMSrcReg, (outs),
7510 (ins VR128:$src1, VR128:$src3, u8imm:$src5),
7511 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
7512 []>, Sched<[WritePCmpEStrI]>;
7514 def rm : SS42AI<0x61, MRMSrcMem, (outs),
7515 (ins VR128:$src1, i128mem:$src3, u8imm:$src5),
7516 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
7517 []>, Sched<[WritePCmpEStrILd, ReadAfterLd]>;
7520 let Defs = [ECX, EFLAGS], Uses = [EAX, EDX], hasSideEffects = 0 in {
7521 let Predicates = [HasAVX] in
7522 defm VPCMPESTRI : SS42AI_pcmpestri<"vpcmpestri">, VEX;
7523 defm PCMPESTRI : SS42AI_pcmpestri<"pcmpestri">;
7526 //===----------------------------------------------------------------------===//
7527 // SSE4.2 - CRC Instructions
7528 //===----------------------------------------------------------------------===//
7530 // No CRC instructions have AVX equivalents
7532 // crc intrinsic instruction
7533 // This set of instructions are only rm, the only difference is the size
7535 class SS42I_crc32r<bits<8> opc, string asm, RegisterClass RCOut,
7536 RegisterClass RCIn, SDPatternOperator Int> :
7537 SS42FI<opc, MRMSrcReg, (outs RCOut:$dst), (ins RCOut:$src1, RCIn:$src2),
7538 !strconcat(asm, "\t{$src2, $src1|$src1, $src2}"),
7539 [(set RCOut:$dst, (Int RCOut:$src1, RCIn:$src2))], IIC_CRC32_REG>,
7542 class SS42I_crc32m<bits<8> opc, string asm, RegisterClass RCOut,
7543 X86MemOperand x86memop, SDPatternOperator Int> :
7544 SS42FI<opc, MRMSrcMem, (outs RCOut:$dst), (ins RCOut:$src1, x86memop:$src2),
7545 !strconcat(asm, "\t{$src2, $src1|$src1, $src2}"),
7546 [(set RCOut:$dst, (Int RCOut:$src1, (load addr:$src2)))],
7547 IIC_CRC32_MEM>, Sched<[WriteFAddLd, ReadAfterLd]>;
7549 let Constraints = "$src1 = $dst" in {
7550 def CRC32r32m8 : SS42I_crc32m<0xF0, "crc32{b}", GR32, i8mem,
7551 int_x86_sse42_crc32_32_8>;
7552 def CRC32r32r8 : SS42I_crc32r<0xF0, "crc32{b}", GR32, GR8,
7553 int_x86_sse42_crc32_32_8>;
7554 def CRC32r32m16 : SS42I_crc32m<0xF1, "crc32{w}", GR32, i16mem,
7555 int_x86_sse42_crc32_32_16>, OpSize16;
7556 def CRC32r32r16 : SS42I_crc32r<0xF1, "crc32{w}", GR32, GR16,
7557 int_x86_sse42_crc32_32_16>, OpSize16;
7558 def CRC32r32m32 : SS42I_crc32m<0xF1, "crc32{l}", GR32, i32mem,
7559 int_x86_sse42_crc32_32_32>, OpSize32;
7560 def CRC32r32r32 : SS42I_crc32r<0xF1, "crc32{l}", GR32, GR32,
7561 int_x86_sse42_crc32_32_32>, OpSize32;
7562 def CRC32r64m64 : SS42I_crc32m<0xF1, "crc32{q}", GR64, i64mem,
7563 int_x86_sse42_crc32_64_64>, REX_W;
7564 def CRC32r64r64 : SS42I_crc32r<0xF1, "crc32{q}", GR64, GR64,
7565 int_x86_sse42_crc32_64_64>, REX_W;
7566 let hasSideEffects = 0 in {
7568 def CRC32r64m8 : SS42I_crc32m<0xF0, "crc32{b}", GR64, i8mem,
7570 def CRC32r64r8 : SS42I_crc32r<0xF0, "crc32{b}", GR64, GR8,
7575 //===----------------------------------------------------------------------===//
7576 // SHA-NI Instructions
7577 //===----------------------------------------------------------------------===//
7579 multiclass SHAI_binop<bits<8> Opc, string OpcodeStr, Intrinsic IntId,
7581 def rr : I<Opc, MRMSrcReg, (outs VR128:$dst),
7582 (ins VR128:$src1, VR128:$src2),
7583 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
7585 (set VR128:$dst, (IntId VR128:$src1, VR128:$src2, XMM0)),
7586 (set VR128:$dst, (IntId VR128:$src1, VR128:$src2)))]>, T8;
7588 def rm : I<Opc, MRMSrcMem, (outs VR128:$dst),
7589 (ins VR128:$src1, i128mem:$src2),
7590 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
7592 (set VR128:$dst, (IntId VR128:$src1,
7593 (bc_v4i32 (memopv2i64 addr:$src2)), XMM0)),
7594 (set VR128:$dst, (IntId VR128:$src1,
7595 (bc_v4i32 (memopv2i64 addr:$src2)))))]>, T8;
7598 let Constraints = "$src1 = $dst", Predicates = [HasSHA] in {
7599 def SHA1RNDS4rri : Ii8<0xCC, MRMSrcReg, (outs VR128:$dst),
7600 (ins VR128:$src1, VR128:$src2, u8imm:$src3),
7601 "sha1rnds4\t{$src3, $src2, $dst|$dst, $src2, $src3}",
7603 (int_x86_sha1rnds4 VR128:$src1, VR128:$src2,
7604 (i8 imm:$src3)))]>, TA;
7605 def SHA1RNDS4rmi : Ii8<0xCC, MRMSrcMem, (outs VR128:$dst),
7606 (ins VR128:$src1, i128mem:$src2, u8imm:$src3),
7607 "sha1rnds4\t{$src3, $src2, $dst|$dst, $src2, $src3}",
7609 (int_x86_sha1rnds4 VR128:$src1,
7610 (bc_v4i32 (memopv2i64 addr:$src2)),
7611 (i8 imm:$src3)))]>, TA;
7613 defm SHA1NEXTE : SHAI_binop<0xC8, "sha1nexte", int_x86_sha1nexte>;
7614 defm SHA1MSG1 : SHAI_binop<0xC9, "sha1msg1", int_x86_sha1msg1>;
7615 defm SHA1MSG2 : SHAI_binop<0xCA, "sha1msg2", int_x86_sha1msg2>;
7618 defm SHA256RNDS2 : SHAI_binop<0xCB, "sha256rnds2", int_x86_sha256rnds2, 1>;
7620 defm SHA256MSG1 : SHAI_binop<0xCC, "sha256msg1", int_x86_sha256msg1>;
7621 defm SHA256MSG2 : SHAI_binop<0xCD, "sha256msg2", int_x86_sha256msg2>;
7624 // Aliases with explicit %xmm0
7625 def : InstAlias<"sha256rnds2\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}",
7626 (SHA256RNDS2rr VR128:$dst, VR128:$src2)>;
7627 def : InstAlias<"sha256rnds2\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}",
7628 (SHA256RNDS2rm VR128:$dst, i128mem:$src2)>;
7630 //===----------------------------------------------------------------------===//
7631 // AES-NI Instructions
7632 //===----------------------------------------------------------------------===//
7634 multiclass AESI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId128,
7635 PatFrag ld_frag, bit Is2Addr = 1> {
7636 def rr : AES8I<opc, MRMSrcReg, (outs VR128:$dst),
7637 (ins VR128:$src1, VR128:$src2),
7639 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
7640 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
7641 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
7642 Sched<[WriteAESDecEnc]>;
7643 def rm : AES8I<opc, MRMSrcMem, (outs VR128:$dst),
7644 (ins VR128:$src1, i128mem:$src2),
7646 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
7647 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
7649 (IntId128 VR128:$src1, (ld_frag addr:$src2)))]>,
7650 Sched<[WriteAESDecEncLd, ReadAfterLd]>;
7653 // Perform One Round of an AES Encryption/Decryption Flow
7654 let Predicates = [HasAVX, HasAES] in {
7655 defm VAESENC : AESI_binop_rm_int<0xDC, "vaesenc",
7656 int_x86_aesni_aesenc, loadv2i64, 0>, VEX_4V;
7657 defm VAESENCLAST : AESI_binop_rm_int<0xDD, "vaesenclast",
7658 int_x86_aesni_aesenclast, loadv2i64, 0>, VEX_4V;
7659 defm VAESDEC : AESI_binop_rm_int<0xDE, "vaesdec",
7660 int_x86_aesni_aesdec, loadv2i64, 0>, VEX_4V;
7661 defm VAESDECLAST : AESI_binop_rm_int<0xDF, "vaesdeclast",
7662 int_x86_aesni_aesdeclast, loadv2i64, 0>, VEX_4V;
7665 let Constraints = "$src1 = $dst" in {
7666 defm AESENC : AESI_binop_rm_int<0xDC, "aesenc",
7667 int_x86_aesni_aesenc, memopv2i64>;
7668 defm AESENCLAST : AESI_binop_rm_int<0xDD, "aesenclast",
7669 int_x86_aesni_aesenclast, memopv2i64>;
7670 defm AESDEC : AESI_binop_rm_int<0xDE, "aesdec",
7671 int_x86_aesni_aesdec, memopv2i64>;
7672 defm AESDECLAST : AESI_binop_rm_int<0xDF, "aesdeclast",
7673 int_x86_aesni_aesdeclast, memopv2i64>;
7676 // Perform the AES InvMixColumn Transformation
7677 let Predicates = [HasAVX, HasAES] in {
7678 def VAESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
7680 "vaesimc\t{$src1, $dst|$dst, $src1}",
7682 (int_x86_aesni_aesimc VR128:$src1))]>, Sched<[WriteAESIMC]>,
7684 def VAESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
7685 (ins i128mem:$src1),
7686 "vaesimc\t{$src1, $dst|$dst, $src1}",
7687 [(set VR128:$dst, (int_x86_aesni_aesimc (loadv2i64 addr:$src1)))]>,
7688 Sched<[WriteAESIMCLd]>, VEX;
7690 def AESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
7692 "aesimc\t{$src1, $dst|$dst, $src1}",
7694 (int_x86_aesni_aesimc VR128:$src1))]>, Sched<[WriteAESIMC]>;
7695 def AESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
7696 (ins i128mem:$src1),
7697 "aesimc\t{$src1, $dst|$dst, $src1}",
7698 [(set VR128:$dst, (int_x86_aesni_aesimc (memopv2i64 addr:$src1)))]>,
7699 Sched<[WriteAESIMCLd]>;
7701 // AES Round Key Generation Assist
7702 let Predicates = [HasAVX, HasAES] in {
7703 def VAESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
7704 (ins VR128:$src1, u8imm:$src2),
7705 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7707 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
7708 Sched<[WriteAESKeyGen]>, VEX;
7709 def VAESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
7710 (ins i128mem:$src1, u8imm:$src2),
7711 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7713 (int_x86_aesni_aeskeygenassist (loadv2i64 addr:$src1), imm:$src2))]>,
7714 Sched<[WriteAESKeyGenLd]>, VEX;
7716 def AESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
7717 (ins VR128:$src1, u8imm:$src2),
7718 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7720 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
7721 Sched<[WriteAESKeyGen]>;
7722 def AESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
7723 (ins i128mem:$src1, u8imm:$src2),
7724 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7726 (int_x86_aesni_aeskeygenassist (memopv2i64 addr:$src1), imm:$src2))]>,
7727 Sched<[WriteAESKeyGenLd]>;
7729 //===----------------------------------------------------------------------===//
7730 // PCLMUL Instructions
7731 //===----------------------------------------------------------------------===//
7733 // AVX carry-less Multiplication instructions
7734 let isCommutable = 1 in
7735 def VPCLMULQDQrr : AVXPCLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
7736 (ins VR128:$src1, VR128:$src2, u8imm:$src3),
7737 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
7739 (int_x86_pclmulqdq VR128:$src1, VR128:$src2, imm:$src3))]>,
7740 Sched<[WriteCLMul]>;
7742 def VPCLMULQDQrm : AVXPCLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
7743 (ins VR128:$src1, i128mem:$src2, u8imm:$src3),
7744 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
7745 [(set VR128:$dst, (int_x86_pclmulqdq VR128:$src1,
7746 (loadv2i64 addr:$src2), imm:$src3))]>,
7747 Sched<[WriteCLMulLd, ReadAfterLd]>;
7749 // Carry-less Multiplication instructions
7750 let Constraints = "$src1 = $dst" in {
7751 let isCommutable = 1 in
7752 def PCLMULQDQrr : PCLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
7753 (ins VR128:$src1, VR128:$src2, u8imm:$src3),
7754 "pclmulqdq\t{$src3, $src2, $dst|$dst, $src2, $src3}",
7756 (int_x86_pclmulqdq VR128:$src1, VR128:$src2, imm:$src3))],
7757 IIC_SSE_PCLMULQDQ_RR>, Sched<[WriteCLMul]>;
7759 def PCLMULQDQrm : PCLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
7760 (ins VR128:$src1, i128mem:$src2, u8imm:$src3),
7761 "pclmulqdq\t{$src3, $src2, $dst|$dst, $src2, $src3}",
7762 [(set VR128:$dst, (int_x86_pclmulqdq VR128:$src1,
7763 (memopv2i64 addr:$src2), imm:$src3))],
7764 IIC_SSE_PCLMULQDQ_RM>,
7765 Sched<[WriteCLMulLd, ReadAfterLd]>;
7766 } // Constraints = "$src1 = $dst"
7769 multiclass pclmul_alias<string asm, int immop> {
7770 def : InstAlias<!strconcat("pclmul", asm, "dq {$src, $dst|$dst, $src}"),
7771 (PCLMULQDQrr VR128:$dst, VR128:$src, immop), 0>;
7773 def : InstAlias<!strconcat("pclmul", asm, "dq {$src, $dst|$dst, $src}"),
7774 (PCLMULQDQrm VR128:$dst, i128mem:$src, immop), 0>;
7776 def : InstAlias<!strconcat("vpclmul", asm,
7777 "dq {$src2, $src1, $dst|$dst, $src1, $src2}"),
7778 (VPCLMULQDQrr VR128:$dst, VR128:$src1, VR128:$src2, immop),
7781 def : InstAlias<!strconcat("vpclmul", asm,
7782 "dq {$src2, $src1, $dst|$dst, $src1, $src2}"),
7783 (VPCLMULQDQrm VR128:$dst, VR128:$src1, i128mem:$src2, immop),
7786 defm : pclmul_alias<"hqhq", 0x11>;
7787 defm : pclmul_alias<"hqlq", 0x01>;
7788 defm : pclmul_alias<"lqhq", 0x10>;
7789 defm : pclmul_alias<"lqlq", 0x00>;
7791 //===----------------------------------------------------------------------===//
7792 // SSE4A Instructions
7793 //===----------------------------------------------------------------------===//
7795 let Predicates = [HasSSE4A] in {
7797 let Constraints = "$src = $dst" in {
7798 def EXTRQI : Ii8<0x78, MRMXr, (outs VR128:$dst),
7799 (ins VR128:$src, u8imm:$len, u8imm:$idx),
7800 "extrq\t{$idx, $len, $src|$src, $len, $idx}",
7801 [(set VR128:$dst, (int_x86_sse4a_extrqi VR128:$src, imm:$len,
7803 def EXTRQ : I<0x79, MRMSrcReg, (outs VR128:$dst),
7804 (ins VR128:$src, VR128:$mask),
7805 "extrq\t{$mask, $src|$src, $mask}",
7806 [(set VR128:$dst, (int_x86_sse4a_extrq VR128:$src,
7807 VR128:$mask))]>, PD;
7809 def INSERTQI : Ii8<0x78, MRMSrcReg, (outs VR128:$dst),
7810 (ins VR128:$src, VR128:$src2, u8imm:$len, u8imm:$idx),
7811 "insertq\t{$idx, $len, $src2, $src|$src, $src2, $len, $idx}",
7812 [(set VR128:$dst, (int_x86_sse4a_insertqi VR128:$src,
7813 VR128:$src2, imm:$len, imm:$idx))]>, XD;
7814 def INSERTQ : I<0x79, MRMSrcReg, (outs VR128:$dst),
7815 (ins VR128:$src, VR128:$mask),
7816 "insertq\t{$mask, $src|$src, $mask}",
7817 [(set VR128:$dst, (int_x86_sse4a_insertq VR128:$src,
7818 VR128:$mask))]>, XD;
7821 def MOVNTSS : I<0x2B, MRMDestMem, (outs), (ins f32mem:$dst, VR128:$src),
7822 "movntss\t{$src, $dst|$dst, $src}",
7823 [(int_x86_sse4a_movnt_ss addr:$dst, VR128:$src)]>, XS;
7825 def MOVNTSD : I<0x2B, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
7826 "movntsd\t{$src, $dst|$dst, $src}",
7827 [(int_x86_sse4a_movnt_sd addr:$dst, VR128:$src)]>, XD;
7830 //===----------------------------------------------------------------------===//
7832 //===----------------------------------------------------------------------===//
7834 //===----------------------------------------------------------------------===//
7835 // VBROADCAST - Load from memory and broadcast to all elements of the
7836 // destination operand
7838 class avx_broadcast<bits<8> opc, string OpcodeStr, RegisterClass RC,
7839 X86MemOperand x86memop, Intrinsic Int, SchedWrite Sched> :
7840 AVX8I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
7841 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
7842 [(set RC:$dst, (Int addr:$src))]>, Sched<[Sched]>, VEX;
7844 class avx_broadcast_no_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
7845 X86MemOperand x86memop, ValueType VT,
7846 PatFrag ld_frag, SchedWrite Sched> :
7847 AVX8I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
7848 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
7849 [(set RC:$dst, (VT (X86VBroadcast (ld_frag addr:$src))))]>,
7850 Sched<[Sched]>, VEX {
7854 // AVX2 adds register forms
7855 class avx2_broadcast_reg<bits<8> opc, string OpcodeStr, RegisterClass RC,
7856 Intrinsic Int, SchedWrite Sched> :
7857 AVX28I<opc, MRMSrcReg, (outs RC:$dst), (ins VR128:$src),
7858 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
7859 [(set RC:$dst, (Int VR128:$src))]>, Sched<[Sched]>, VEX;
7861 let ExeDomain = SSEPackedSingle in {
7862 def VBROADCASTSSrm : avx_broadcast_no_int<0x18, "vbroadcastss", VR128,
7863 f32mem, v4f32, loadf32, WriteLoad>;
7864 def VBROADCASTSSYrm : avx_broadcast_no_int<0x18, "vbroadcastss", VR256,
7865 f32mem, v8f32, loadf32,
7866 WriteFShuffleLd>, VEX_L;
7868 let ExeDomain = SSEPackedDouble in
7869 def VBROADCASTSDYrm : avx_broadcast_no_int<0x19, "vbroadcastsd", VR256, f64mem,
7870 v4f64, loadf64, WriteFShuffleLd>, VEX_L;
7871 def VBROADCASTF128 : avx_broadcast<0x1A, "vbroadcastf128", VR256, f128mem,
7872 int_x86_avx_vbroadcastf128_pd_256,
7873 WriteFShuffleLd>, VEX_L;
7875 let ExeDomain = SSEPackedSingle in {
7876 def VBROADCASTSSrr : avx2_broadcast_reg<0x18, "vbroadcastss", VR128,
7877 int_x86_avx2_vbroadcast_ss_ps,
7879 def VBROADCASTSSYrr : avx2_broadcast_reg<0x18, "vbroadcastss", VR256,
7880 int_x86_avx2_vbroadcast_ss_ps_256,
7881 WriteFShuffle256>, VEX_L;
7883 let ExeDomain = SSEPackedDouble in
7884 def VBROADCASTSDYrr : avx2_broadcast_reg<0x19, "vbroadcastsd", VR256,
7885 int_x86_avx2_vbroadcast_sd_pd_256,
7886 WriteFShuffle256>, VEX_L;
7888 let Predicates = [HasAVX2] in
7889 def VBROADCASTI128 : avx_broadcast<0x5A, "vbroadcasti128", VR256, i128mem,
7890 int_x86_avx2_vbroadcasti128, WriteLoad>,
7893 let Predicates = [HasAVX] in
7894 def : Pat<(int_x86_avx_vbroadcastf128_ps_256 addr:$src),
7895 (VBROADCASTF128 addr:$src)>;
7898 //===----------------------------------------------------------------------===//
7899 // VINSERTF128 - Insert packed floating-point values
7901 let hasSideEffects = 0, ExeDomain = SSEPackedSingle in {
7902 def VINSERTF128rr : AVXAIi8<0x18, MRMSrcReg, (outs VR256:$dst),
7903 (ins VR256:$src1, VR128:$src2, u8imm:$src3),
7904 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
7905 []>, Sched<[WriteFShuffle]>, VEX_4V, VEX_L;
7907 def VINSERTF128rm : AVXAIi8<0x18, MRMSrcMem, (outs VR256:$dst),
7908 (ins VR256:$src1, f128mem:$src2, u8imm:$src3),
7909 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
7910 []>, Sched<[WriteFShuffleLd, ReadAfterLd]>, VEX_4V, VEX_L;
7913 let Predicates = [HasAVX] in {
7914 def : Pat<(vinsert128_insert:$ins (v8f32 VR256:$src1), (v4f32 VR128:$src2),
7916 (VINSERTF128rr VR256:$src1, VR128:$src2,
7917 (INSERT_get_vinsert128_imm VR256:$ins))>;
7918 def : Pat<(vinsert128_insert:$ins (v4f64 VR256:$src1), (v2f64 VR128:$src2),
7920 (VINSERTF128rr VR256:$src1, VR128:$src2,
7921 (INSERT_get_vinsert128_imm VR256:$ins))>;
7923 def : Pat<(vinsert128_insert:$ins (v8f32 VR256:$src1), (loadv4f32 addr:$src2),
7925 (VINSERTF128rm VR256:$src1, addr:$src2,
7926 (INSERT_get_vinsert128_imm VR256:$ins))>;
7927 def : Pat<(vinsert128_insert:$ins (v4f64 VR256:$src1), (loadv2f64 addr:$src2),
7929 (VINSERTF128rm VR256:$src1, addr:$src2,
7930 (INSERT_get_vinsert128_imm VR256:$ins))>;
7933 let Predicates = [HasAVX1Only] in {
7934 def : Pat<(vinsert128_insert:$ins (v4i64 VR256:$src1), (v2i64 VR128:$src2),
7936 (VINSERTF128rr VR256:$src1, VR128:$src2,
7937 (INSERT_get_vinsert128_imm VR256:$ins))>;
7938 def : Pat<(vinsert128_insert:$ins (v8i32 VR256:$src1), (v4i32 VR128:$src2),
7940 (VINSERTF128rr VR256:$src1, VR128:$src2,
7941 (INSERT_get_vinsert128_imm VR256:$ins))>;
7942 def : Pat<(vinsert128_insert:$ins (v32i8 VR256:$src1), (v16i8 VR128:$src2),
7944 (VINSERTF128rr VR256:$src1, VR128:$src2,
7945 (INSERT_get_vinsert128_imm VR256:$ins))>;
7946 def : Pat<(vinsert128_insert:$ins (v16i16 VR256:$src1), (v8i16 VR128:$src2),
7948 (VINSERTF128rr VR256:$src1, VR128:$src2,
7949 (INSERT_get_vinsert128_imm VR256:$ins))>;
7951 def : Pat<(vinsert128_insert:$ins (v4i64 VR256:$src1), (loadv2i64 addr:$src2),
7953 (VINSERTF128rm VR256:$src1, addr:$src2,
7954 (INSERT_get_vinsert128_imm VR256:$ins))>;
7955 def : Pat<(vinsert128_insert:$ins (v8i32 VR256:$src1),
7956 (bc_v4i32 (loadv2i64 addr:$src2)),
7958 (VINSERTF128rm VR256:$src1, addr:$src2,
7959 (INSERT_get_vinsert128_imm VR256:$ins))>;
7960 def : Pat<(vinsert128_insert:$ins (v32i8 VR256:$src1),
7961 (bc_v16i8 (loadv2i64 addr:$src2)),
7963 (VINSERTF128rm VR256:$src1, addr:$src2,
7964 (INSERT_get_vinsert128_imm VR256:$ins))>;
7965 def : Pat<(vinsert128_insert:$ins (v16i16 VR256:$src1),
7966 (bc_v8i16 (loadv2i64 addr:$src2)),
7968 (VINSERTF128rm VR256:$src1, addr:$src2,
7969 (INSERT_get_vinsert128_imm VR256:$ins))>;
7972 //===----------------------------------------------------------------------===//
7973 // VEXTRACTF128 - Extract packed floating-point values
7975 let hasSideEffects = 0, ExeDomain = SSEPackedSingle in {
7976 def VEXTRACTF128rr : AVXAIi8<0x19, MRMDestReg, (outs VR128:$dst),
7977 (ins VR256:$src1, u8imm:$src2),
7978 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7979 []>, Sched<[WriteFShuffle]>, VEX, VEX_L;
7981 def VEXTRACTF128mr : AVXAIi8<0x19, MRMDestMem, (outs),
7982 (ins f128mem:$dst, VR256:$src1, u8imm:$src2),
7983 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7984 []>, Sched<[WriteStore]>, VEX, VEX_L;
7988 let Predicates = [HasAVX] in {
7989 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
7990 (v4f32 (VEXTRACTF128rr
7991 (v8f32 VR256:$src1),
7992 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
7993 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
7994 (v2f64 (VEXTRACTF128rr
7995 (v4f64 VR256:$src1),
7996 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
7998 def : Pat<(store (v4f32 (vextract128_extract:$ext (v8f32 VR256:$src1),
7999 (iPTR imm))), addr:$dst),
8000 (VEXTRACTF128mr addr:$dst, VR256:$src1,
8001 (EXTRACT_get_vextract128_imm VR128:$ext))>;
8002 def : Pat<(store (v2f64 (vextract128_extract:$ext (v4f64 VR256:$src1),
8003 (iPTR imm))), addr:$dst),
8004 (VEXTRACTF128mr addr:$dst, VR256:$src1,
8005 (EXTRACT_get_vextract128_imm VR128:$ext))>;
8008 let Predicates = [HasAVX1Only] in {
8009 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
8010 (v2i64 (VEXTRACTF128rr
8011 (v4i64 VR256:$src1),
8012 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
8013 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
8014 (v4i32 (VEXTRACTF128rr
8015 (v8i32 VR256:$src1),
8016 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
8017 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
8018 (v8i16 (VEXTRACTF128rr
8019 (v16i16 VR256:$src1),
8020 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
8021 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
8022 (v16i8 (VEXTRACTF128rr
8023 (v32i8 VR256:$src1),
8024 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
8026 def : Pat<(alignedstore (v2i64 (vextract128_extract:$ext (v4i64 VR256:$src1),
8027 (iPTR imm))), addr:$dst),
8028 (VEXTRACTF128mr addr:$dst, VR256:$src1,
8029 (EXTRACT_get_vextract128_imm VR128:$ext))>;
8030 def : Pat<(alignedstore (v4i32 (vextract128_extract:$ext (v8i32 VR256:$src1),
8031 (iPTR imm))), addr:$dst),
8032 (VEXTRACTF128mr addr:$dst, VR256:$src1,
8033 (EXTRACT_get_vextract128_imm VR128:$ext))>;
8034 def : Pat<(alignedstore (v8i16 (vextract128_extract:$ext (v16i16 VR256:$src1),
8035 (iPTR imm))), addr:$dst),
8036 (VEXTRACTF128mr addr:$dst, VR256:$src1,
8037 (EXTRACT_get_vextract128_imm VR128:$ext))>;
8038 def : Pat<(alignedstore (v16i8 (vextract128_extract:$ext (v32i8 VR256:$src1),
8039 (iPTR imm))), addr:$dst),
8040 (VEXTRACTF128mr addr:$dst, VR256:$src1,
8041 (EXTRACT_get_vextract128_imm VR128:$ext))>;
8044 //===----------------------------------------------------------------------===//
8045 // VMASKMOV - Conditional SIMD Packed Loads and Stores
8047 multiclass avx_movmask_rm<bits<8> opc_rm, bits<8> opc_mr, string OpcodeStr,
8048 Intrinsic IntLd, Intrinsic IntLd256,
8049 Intrinsic IntSt, Intrinsic IntSt256> {
8050 def rm : AVX8I<opc_rm, MRMSrcMem, (outs VR128:$dst),
8051 (ins VR128:$src1, f128mem:$src2),
8052 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8053 [(set VR128:$dst, (IntLd addr:$src2, VR128:$src1))]>,
8055 def Yrm : AVX8I<opc_rm, MRMSrcMem, (outs VR256:$dst),
8056 (ins VR256:$src1, f256mem:$src2),
8057 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8058 [(set VR256:$dst, (IntLd256 addr:$src2, VR256:$src1))]>,
8060 def mr : AVX8I<opc_mr, MRMDestMem, (outs),
8061 (ins f128mem:$dst, VR128:$src1, VR128:$src2),
8062 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8063 [(IntSt addr:$dst, VR128:$src1, VR128:$src2)]>, VEX_4V;
8064 def Ymr : AVX8I<opc_mr, MRMDestMem, (outs),
8065 (ins f256mem:$dst, VR256:$src1, VR256:$src2),
8066 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8067 [(IntSt256 addr:$dst, VR256:$src1, VR256:$src2)]>, VEX_4V, VEX_L;
8070 let ExeDomain = SSEPackedSingle in
8071 defm VMASKMOVPS : avx_movmask_rm<0x2C, 0x2E, "vmaskmovps",
8072 int_x86_avx_maskload_ps,
8073 int_x86_avx_maskload_ps_256,
8074 int_x86_avx_maskstore_ps,
8075 int_x86_avx_maskstore_ps_256>;
8076 let ExeDomain = SSEPackedDouble in
8077 defm VMASKMOVPD : avx_movmask_rm<0x2D, 0x2F, "vmaskmovpd",
8078 int_x86_avx_maskload_pd,
8079 int_x86_avx_maskload_pd_256,
8080 int_x86_avx_maskstore_pd,
8081 int_x86_avx_maskstore_pd_256>;
8083 //===----------------------------------------------------------------------===//
8084 // VPERMIL - Permute Single and Double Floating-Point Values
8086 multiclass avx_permil<bits<8> opc_rm, bits<8> opc_rmi, string OpcodeStr,
8087 RegisterClass RC, X86MemOperand x86memop_f,
8088 X86MemOperand x86memop_i, PatFrag i_frag,
8089 Intrinsic IntVar, ValueType vt> {
8090 def rr : AVX8I<opc_rm, MRMSrcReg, (outs RC:$dst),
8091 (ins RC:$src1, RC:$src2),
8092 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8093 [(set RC:$dst, (IntVar RC:$src1, RC:$src2))]>, VEX_4V,
8094 Sched<[WriteFShuffle]>;
8095 def rm : AVX8I<opc_rm, MRMSrcMem, (outs RC:$dst),
8096 (ins RC:$src1, x86memop_i:$src2),
8097 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8098 [(set RC:$dst, (IntVar RC:$src1,
8099 (bitconvert (i_frag addr:$src2))))]>, VEX_4V,
8100 Sched<[WriteFShuffleLd, ReadAfterLd]>;
8102 def ri : AVXAIi8<opc_rmi, MRMSrcReg, (outs RC:$dst),
8103 (ins RC:$src1, u8imm:$src2),
8104 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8105 [(set RC:$dst, (vt (X86VPermilpi RC:$src1, (i8 imm:$src2))))]>, VEX,
8106 Sched<[WriteFShuffle]>;
8107 def mi : AVXAIi8<opc_rmi, MRMSrcMem, (outs RC:$dst),
8108 (ins x86memop_f:$src1, u8imm:$src2),
8109 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8111 (vt (X86VPermilpi (load addr:$src1), (i8 imm:$src2))))]>, VEX,
8112 Sched<[WriteFShuffleLd]>;
8115 let ExeDomain = SSEPackedSingle in {
8116 defm VPERMILPS : avx_permil<0x0C, 0x04, "vpermilps", VR128, f128mem, i128mem,
8117 loadv2i64, int_x86_avx_vpermilvar_ps, v4f32>;
8118 defm VPERMILPSY : avx_permil<0x0C, 0x04, "vpermilps", VR256, f256mem, i256mem,
8119 loadv4i64, int_x86_avx_vpermilvar_ps_256, v8f32>, VEX_L;
8121 let ExeDomain = SSEPackedDouble in {
8122 defm VPERMILPD : avx_permil<0x0D, 0x05, "vpermilpd", VR128, f128mem, i128mem,
8123 loadv2i64, int_x86_avx_vpermilvar_pd, v2f64>;
8124 defm VPERMILPDY : avx_permil<0x0D, 0x05, "vpermilpd", VR256, f256mem, i256mem,
8125 loadv4i64, int_x86_avx_vpermilvar_pd_256, v4f64>, VEX_L;
8128 let Predicates = [HasAVX] in {
8129 def : Pat<(v8f32 (X86VPermilpv VR256:$src1, (v8i32 VR256:$src2))),
8130 (VPERMILPSYrr VR256:$src1, VR256:$src2)>;
8131 def : Pat<(v8f32 (X86VPermilpv VR256:$src1, (bc_v8i32 (loadv4i64 addr:$src2)))),
8132 (VPERMILPSYrm VR256:$src1, addr:$src2)>;
8133 def : Pat<(v4f64 (X86VPermilpv VR256:$src1, (v4i64 VR256:$src2))),
8134 (VPERMILPDYrr VR256:$src1, VR256:$src2)>;
8135 def : Pat<(v4f64 (X86VPermilpv VR256:$src1, (loadv4i64 addr:$src2))),
8136 (VPERMILPDYrm VR256:$src1, addr:$src2)>;
8138 def : Pat<(v8i32 (X86VPermilpi VR256:$src1, (i8 imm:$imm))),
8139 (VPERMILPSYri VR256:$src1, imm:$imm)>;
8140 def : Pat<(v4i64 (X86VPermilpi VR256:$src1, (i8 imm:$imm))),
8141 (VPERMILPDYri VR256:$src1, imm:$imm)>;
8142 def : Pat<(v8i32 (X86VPermilpi (bc_v8i32 (loadv4i64 addr:$src1)),
8144 (VPERMILPSYmi addr:$src1, imm:$imm)>;
8145 def : Pat<(v4i64 (X86VPermilpi (loadv4i64 addr:$src1), (i8 imm:$imm))),
8146 (VPERMILPDYmi addr:$src1, imm:$imm)>;
8148 def : Pat<(v4f32 (X86VPermilpv VR128:$src1, (v4i32 VR128:$src2))),
8149 (VPERMILPSrr VR128:$src1, VR128:$src2)>;
8150 def : Pat<(v4f32 (X86VPermilpv VR128:$src1, (bc_v4i32 (loadv2i64 addr:$src2)))),
8151 (VPERMILPSrm VR128:$src1, addr:$src2)>;
8152 def : Pat<(v2f64 (X86VPermilpv VR128:$src1, (v2i64 VR128:$src2))),
8153 (VPERMILPDrr VR128:$src1, VR128:$src2)>;
8154 def : Pat<(v2f64 (X86VPermilpv VR128:$src1, (loadv2i64 addr:$src2))),
8155 (VPERMILPDrm VR128:$src1, addr:$src2)>;
8157 def : Pat<(v2i64 (X86VPermilpi VR128:$src1, (i8 imm:$imm))),
8158 (VPERMILPDri VR128:$src1, imm:$imm)>;
8159 def : Pat<(v2i64 (X86VPermilpi (loadv2i64 addr:$src1), (i8 imm:$imm))),
8160 (VPERMILPDmi addr:$src1, imm:$imm)>;
8163 //===----------------------------------------------------------------------===//
8164 // VPERM2F128 - Permute Floating-Point Values in 128-bit chunks
8166 let ExeDomain = SSEPackedSingle in {
8167 def VPERM2F128rr : AVXAIi8<0x06, MRMSrcReg, (outs VR256:$dst),
8168 (ins VR256:$src1, VR256:$src2, u8imm:$src3),
8169 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
8170 [(set VR256:$dst, (v8f32 (X86VPerm2x128 VR256:$src1, VR256:$src2,
8171 (i8 imm:$src3))))]>, VEX_4V, VEX_L,
8172 Sched<[WriteFShuffle]>;
8173 def VPERM2F128rm : AVXAIi8<0x06, MRMSrcMem, (outs VR256:$dst),
8174 (ins VR256:$src1, f256mem:$src2, u8imm:$src3),
8175 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
8176 [(set VR256:$dst, (X86VPerm2x128 VR256:$src1, (loadv8f32 addr:$src2),
8177 (i8 imm:$src3)))]>, VEX_4V, VEX_L,
8178 Sched<[WriteFShuffleLd, ReadAfterLd]>;
8181 let Predicates = [HasAVX] in {
8182 def : Pat<(v4f64 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
8183 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
8184 def : Pat<(v4f64 (X86VPerm2x128 VR256:$src1,
8185 (loadv4f64 addr:$src2), (i8 imm:$imm))),
8186 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
8189 let Predicates = [HasAVX1Only] in {
8190 def : Pat<(v8i32 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
8191 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
8192 def : Pat<(v4i64 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
8193 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
8194 def : Pat<(v32i8 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
8195 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
8196 def : Pat<(v16i16 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
8197 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
8199 def : Pat<(v8i32 (X86VPerm2x128 VR256:$src1,
8200 (bc_v8i32 (loadv4i64 addr:$src2)), (i8 imm:$imm))),
8201 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
8202 def : Pat<(v4i64 (X86VPerm2x128 VR256:$src1,
8203 (loadv4i64 addr:$src2), (i8 imm:$imm))),
8204 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
8205 def : Pat<(v32i8 (X86VPerm2x128 VR256:$src1,
8206 (bc_v32i8 (loadv4i64 addr:$src2)), (i8 imm:$imm))),
8207 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
8208 def : Pat<(v16i16 (X86VPerm2x128 VR256:$src1,
8209 (bc_v16i16 (loadv4i64 addr:$src2)), (i8 imm:$imm))),
8210 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
8213 //===----------------------------------------------------------------------===//
8214 // VZERO - Zero YMM registers
8216 let Defs = [YMM0, YMM1, YMM2, YMM3, YMM4, YMM5, YMM6, YMM7,
8217 YMM8, YMM9, YMM10, YMM11, YMM12, YMM13, YMM14, YMM15] in {
8218 // Zero All YMM registers
8219 def VZEROALL : I<0x77, RawFrm, (outs), (ins), "vzeroall",
8220 [(int_x86_avx_vzeroall)]>, PS, VEX, VEX_L, Requires<[HasAVX]>;
8222 // Zero Upper bits of YMM registers
8223 def VZEROUPPER : I<0x77, RawFrm, (outs), (ins), "vzeroupper",
8224 [(int_x86_avx_vzeroupper)]>, PS, VEX, Requires<[HasAVX]>;
8227 //===----------------------------------------------------------------------===//
8228 // Half precision conversion instructions
8229 //===----------------------------------------------------------------------===//
8230 multiclass f16c_ph2ps<RegisterClass RC, X86MemOperand x86memop, Intrinsic Int> {
8231 def rr : I<0x13, MRMSrcReg, (outs RC:$dst), (ins VR128:$src),
8232 "vcvtph2ps\t{$src, $dst|$dst, $src}",
8233 [(set RC:$dst, (Int VR128:$src))]>,
8234 T8PD, VEX, Sched<[WriteCvtF2F]>;
8235 let hasSideEffects = 0, mayLoad = 1 in
8236 def rm : I<0x13, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
8237 "vcvtph2ps\t{$src, $dst|$dst, $src}", []>, T8PD, VEX,
8238 Sched<[WriteCvtF2FLd]>;
8241 multiclass f16c_ps2ph<RegisterClass RC, X86MemOperand x86memop, Intrinsic Int> {
8242 def rr : Ii8<0x1D, MRMDestReg, (outs VR128:$dst),
8243 (ins RC:$src1, i32u8imm:$src2),
8244 "vcvtps2ph\t{$src2, $src1, $dst|$dst, $src1, $src2}",
8245 [(set VR128:$dst, (Int RC:$src1, imm:$src2))]>,
8246 TAPD, VEX, Sched<[WriteCvtF2F]>;
8247 let hasSideEffects = 0, mayStore = 1,
8248 SchedRW = [WriteCvtF2FLd, WriteRMW] in
8249 def mr : Ii8<0x1D, MRMDestMem, (outs),
8250 (ins x86memop:$dst, RC:$src1, i32u8imm:$src2),
8251 "vcvtps2ph\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
8255 let Predicates = [HasF16C] in {
8256 defm VCVTPH2PS : f16c_ph2ps<VR128, f64mem, int_x86_vcvtph2ps_128>;
8257 defm VCVTPH2PSY : f16c_ph2ps<VR256, f128mem, int_x86_vcvtph2ps_256>, VEX_L;
8258 defm VCVTPS2PH : f16c_ps2ph<VR128, f64mem, int_x86_vcvtps2ph_128>;
8259 defm VCVTPS2PHY : f16c_ps2ph<VR256, f128mem, int_x86_vcvtps2ph_256>, VEX_L;
8261 // Pattern match vcvtph2ps of a scalar i64 load.
8262 def : Pat<(int_x86_vcvtph2ps_128 (vzmovl_v2i64 addr:$src)),
8263 (VCVTPH2PSrm addr:$src)>;
8264 def : Pat<(int_x86_vcvtph2ps_128 (vzload_v2i64 addr:$src)),
8265 (VCVTPH2PSrm addr:$src)>;
8268 // Patterns for matching conversions from float to half-float and vice versa.
8269 let Predicates = [HasF16C] in {
8270 def : Pat<(fp_to_f16 FR32:$src),
8271 (i16 (EXTRACT_SUBREG (VMOVPDI2DIrr (VCVTPS2PHrr
8272 (COPY_TO_REGCLASS FR32:$src, VR128), 0)), sub_16bit))>;
8274 def : Pat<(f16_to_fp GR16:$src),
8275 (f32 (COPY_TO_REGCLASS (VCVTPH2PSrr
8276 (COPY_TO_REGCLASS (MOVSX32rr16 GR16:$src), VR128)), FR32)) >;
8278 def : Pat<(f16_to_fp (i16 (fp_to_f16 FR32:$src))),
8279 (f32 (COPY_TO_REGCLASS (VCVTPH2PSrr
8280 (VCVTPS2PHrr (COPY_TO_REGCLASS FR32:$src, VR128), 0)), FR32)) >;
8283 //===----------------------------------------------------------------------===//
8284 // AVX2 Instructions
8285 //===----------------------------------------------------------------------===//
8287 /// AVX2_binop_rmi_int - AVX2 binary operator with 8-bit immediate
8288 multiclass AVX2_binop_rmi_int<bits<8> opc, string OpcodeStr,
8289 Intrinsic IntId, RegisterClass RC, PatFrag memop_frag,
8290 X86MemOperand x86memop> {
8291 let isCommutable = 1 in
8292 def rri : AVX2AIi8<opc, MRMSrcReg, (outs RC:$dst),
8293 (ins RC:$src1, RC:$src2, u8imm:$src3),
8294 !strconcat(OpcodeStr,
8295 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
8296 [(set RC:$dst, (IntId RC:$src1, RC:$src2, imm:$src3))]>,
8297 Sched<[WriteBlend]>, VEX_4V;
8298 def rmi : AVX2AIi8<opc, MRMSrcMem, (outs RC:$dst),
8299 (ins RC:$src1, x86memop:$src2, u8imm:$src3),
8300 !strconcat(OpcodeStr,
8301 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
8304 (bitconvert (memop_frag addr:$src2)), imm:$src3))]>,
8305 Sched<[WriteBlendLd, ReadAfterLd]>, VEX_4V;
8308 defm VPBLENDD : AVX2_binop_rmi_int<0x02, "vpblendd", int_x86_avx2_pblendd_128,
8309 VR128, loadv2i64, i128mem>;
8310 defm VPBLENDDY : AVX2_binop_rmi_int<0x02, "vpblendd", int_x86_avx2_pblendd_256,
8311 VR256, loadv4i64, i256mem>, VEX_L;
8313 def : Pat<(v4i32 (X86Blendi (v4i32 VR128:$src1), (v4i32 VR128:$src2),
8315 (VPBLENDDrri VR128:$src1, VR128:$src2, imm:$mask)>;
8316 def : Pat<(v8i32 (X86Blendi (v8i32 VR256:$src1), (v8i32 VR256:$src2),
8318 (VPBLENDDYrri VR256:$src1, VR256:$src2, imm:$mask)>;
8320 //===----------------------------------------------------------------------===//
8321 // VPBROADCAST - Load from memory and broadcast to all elements of the
8322 // destination operand
8324 multiclass avx2_broadcast<bits<8> opc, string OpcodeStr,
8325 X86MemOperand x86memop, PatFrag ld_frag,
8326 Intrinsic Int128, Intrinsic Int256> {
8327 def rr : AVX28I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
8328 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
8329 [(set VR128:$dst, (Int128 VR128:$src))]>,
8330 Sched<[WriteShuffle]>, VEX;
8331 def rm : AVX28I<opc, MRMSrcMem, (outs VR128:$dst), (ins x86memop:$src),
8332 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
8334 (Int128 (scalar_to_vector (ld_frag addr:$src))))]>,
8335 Sched<[WriteLoad]>, VEX;
8336 def Yrr : AVX28I<opc, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
8337 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
8338 [(set VR256:$dst, (Int256 VR128:$src))]>,
8339 Sched<[WriteShuffle256]>, VEX, VEX_L;
8340 def Yrm : AVX28I<opc, MRMSrcMem, (outs VR256:$dst), (ins x86memop:$src),
8341 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
8343 (Int256 (scalar_to_vector (ld_frag addr:$src))))]>,
8344 Sched<[WriteLoad]>, VEX, VEX_L;
8347 defm VPBROADCASTB : avx2_broadcast<0x78, "vpbroadcastb", i8mem, loadi8,
8348 int_x86_avx2_pbroadcastb_128,
8349 int_x86_avx2_pbroadcastb_256>;
8350 defm VPBROADCASTW : avx2_broadcast<0x79, "vpbroadcastw", i16mem, loadi16,
8351 int_x86_avx2_pbroadcastw_128,
8352 int_x86_avx2_pbroadcastw_256>;
8353 defm VPBROADCASTD : avx2_broadcast<0x58, "vpbroadcastd", i32mem, loadi32,
8354 int_x86_avx2_pbroadcastd_128,
8355 int_x86_avx2_pbroadcastd_256>;
8356 defm VPBROADCASTQ : avx2_broadcast<0x59, "vpbroadcastq", i64mem, loadi64,
8357 int_x86_avx2_pbroadcastq_128,
8358 int_x86_avx2_pbroadcastq_256>;
8360 let Predicates = [HasAVX2] in {
8361 def : Pat<(v16i8 (X86VBroadcast (loadi8 addr:$src))),
8362 (VPBROADCASTBrm addr:$src)>;
8363 def : Pat<(v32i8 (X86VBroadcast (loadi8 addr:$src))),
8364 (VPBROADCASTBYrm addr:$src)>;
8365 def : Pat<(v8i16 (X86VBroadcast (loadi16 addr:$src))),
8366 (VPBROADCASTWrm addr:$src)>;
8367 def : Pat<(v16i16 (X86VBroadcast (loadi16 addr:$src))),
8368 (VPBROADCASTWYrm addr:$src)>;
8369 def : Pat<(v4i32 (X86VBroadcast (loadi32 addr:$src))),
8370 (VPBROADCASTDrm addr:$src)>;
8371 def : Pat<(v8i32 (X86VBroadcast (loadi32 addr:$src))),
8372 (VPBROADCASTDYrm addr:$src)>;
8373 def : Pat<(v2i64 (X86VBroadcast (loadi64 addr:$src))),
8374 (VPBROADCASTQrm addr:$src)>;
8375 def : Pat<(v4i64 (X86VBroadcast (loadi64 addr:$src))),
8376 (VPBROADCASTQYrm addr:$src)>;
8378 def : Pat<(v16i8 (X86VBroadcast (v16i8 VR128:$src))),
8379 (VPBROADCASTBrr VR128:$src)>;
8380 def : Pat<(v32i8 (X86VBroadcast (v16i8 VR128:$src))),
8381 (VPBROADCASTBYrr VR128:$src)>;
8382 def : Pat<(v8i16 (X86VBroadcast (v8i16 VR128:$src))),
8383 (VPBROADCASTWrr VR128:$src)>;
8384 def : Pat<(v16i16 (X86VBroadcast (v8i16 VR128:$src))),
8385 (VPBROADCASTWYrr VR128:$src)>;
8386 def : Pat<(v4i32 (X86VBroadcast (v4i32 VR128:$src))),
8387 (VPBROADCASTDrr VR128:$src)>;
8388 def : Pat<(v8i32 (X86VBroadcast (v4i32 VR128:$src))),
8389 (VPBROADCASTDYrr VR128:$src)>;
8390 def : Pat<(v2i64 (X86VBroadcast (v2i64 VR128:$src))),
8391 (VPBROADCASTQrr VR128:$src)>;
8392 def : Pat<(v4i64 (X86VBroadcast (v2i64 VR128:$src))),
8393 (VPBROADCASTQYrr VR128:$src)>;
8394 def : Pat<(v4f32 (X86VBroadcast (v4f32 VR128:$src))),
8395 (VBROADCASTSSrr VR128:$src)>;
8396 def : Pat<(v8f32 (X86VBroadcast (v4f32 VR128:$src))),
8397 (VBROADCASTSSYrr VR128:$src)>;
8398 def : Pat<(v2f64 (X86VBroadcast (v2f64 VR128:$src))),
8399 (VPBROADCASTQrr VR128:$src)>;
8400 def : Pat<(v4f64 (X86VBroadcast (v2f64 VR128:$src))),
8401 (VBROADCASTSDYrr VR128:$src)>;
8403 // Provide aliases for broadcast from the same regitser class that
8404 // automatically does the extract.
8405 def : Pat<(v32i8 (X86VBroadcast (v32i8 VR256:$src))),
8406 (VPBROADCASTBYrr (v16i8 (EXTRACT_SUBREG (v32i8 VR256:$src),
8408 def : Pat<(v16i16 (X86VBroadcast (v16i16 VR256:$src))),
8409 (VPBROADCASTWYrr (v8i16 (EXTRACT_SUBREG (v16i16 VR256:$src),
8411 def : Pat<(v8i32 (X86VBroadcast (v8i32 VR256:$src))),
8412 (VPBROADCASTDYrr (v4i32 (EXTRACT_SUBREG (v8i32 VR256:$src),
8414 def : Pat<(v4i64 (X86VBroadcast (v4i64 VR256:$src))),
8415 (VPBROADCASTQYrr (v2i64 (EXTRACT_SUBREG (v4i64 VR256:$src),
8417 def : Pat<(v8f32 (X86VBroadcast (v8f32 VR256:$src))),
8418 (VBROADCASTSSYrr (v4f32 (EXTRACT_SUBREG (v8f32 VR256:$src),
8420 def : Pat<(v4f64 (X86VBroadcast (v4f64 VR256:$src))),
8421 (VBROADCASTSDYrr (v2f64 (EXTRACT_SUBREG (v4f64 VR256:$src),
8424 // Provide fallback in case the load node that is used in the patterns above
8425 // is used by additional users, which prevents the pattern selection.
8426 let AddedComplexity = 20 in {
8427 def : Pat<(v4f32 (X86VBroadcast FR32:$src)),
8428 (VBROADCASTSSrr (COPY_TO_REGCLASS FR32:$src, VR128))>;
8429 def : Pat<(v8f32 (X86VBroadcast FR32:$src)),
8430 (VBROADCASTSSYrr (COPY_TO_REGCLASS FR32:$src, VR128))>;
8431 def : Pat<(v4f64 (X86VBroadcast FR64:$src)),
8432 (VBROADCASTSDYrr (COPY_TO_REGCLASS FR64:$src, VR128))>;
8434 def : Pat<(v4i32 (X86VBroadcast GR32:$src)),
8435 (VBROADCASTSSrr (COPY_TO_REGCLASS GR32:$src, VR128))>;
8436 def : Pat<(v8i32 (X86VBroadcast GR32:$src)),
8437 (VBROADCASTSSYrr (COPY_TO_REGCLASS GR32:$src, VR128))>;
8438 def : Pat<(v4i64 (X86VBroadcast GR64:$src)),
8439 (VBROADCASTSDYrr (COPY_TO_REGCLASS GR64:$src, VR128))>;
8441 def : Pat<(v16i8 (X86VBroadcast GR8:$src)),
8442 (VPBROADCASTBrr (COPY_TO_REGCLASS
8443 (i32 (SUBREG_TO_REG (i32 0), GR8:$src, sub_8bit)),
8445 def : Pat<(v32i8 (X86VBroadcast GR8:$src)),
8446 (VPBROADCASTBYrr (COPY_TO_REGCLASS
8447 (i32 (SUBREG_TO_REG (i32 0), GR8:$src, sub_8bit)),
8450 def : Pat<(v8i16 (X86VBroadcast GR16:$src)),
8451 (VPBROADCASTWrr (COPY_TO_REGCLASS
8452 (i32 (SUBREG_TO_REG (i32 0), GR16:$src, sub_16bit)),
8454 def : Pat<(v16i16 (X86VBroadcast GR16:$src)),
8455 (VPBROADCASTWYrr (COPY_TO_REGCLASS
8456 (i32 (SUBREG_TO_REG (i32 0), GR16:$src, sub_16bit)),
8459 // The patterns for VPBROADCASTD are not needed because they would match
8460 // the exact same thing as VBROADCASTSS patterns.
8462 def : Pat<(v2i64 (X86VBroadcast GR64:$src)),
8463 (VPBROADCASTQrr (COPY_TO_REGCLASS GR64:$src, VR128))>;
8464 // The v4i64 pattern is not needed because VBROADCASTSDYrr already match.
8468 // AVX1 broadcast patterns
8469 let Predicates = [HasAVX1Only] in {
8470 def : Pat<(v8i32 (X86VBroadcast (loadi32 addr:$src))),
8471 (VBROADCASTSSYrm addr:$src)>;
8472 def : Pat<(v4i64 (X86VBroadcast (loadi64 addr:$src))),
8473 (VBROADCASTSDYrm addr:$src)>;
8474 def : Pat<(v4i32 (X86VBroadcast (loadi32 addr:$src))),
8475 (VBROADCASTSSrm addr:$src)>;
8478 let Predicates = [HasAVX] in {
8479 // Provide fallback in case the load node that is used in the patterns above
8480 // is used by additional users, which prevents the pattern selection.
8481 let AddedComplexity = 20 in {
8482 // 128bit broadcasts:
8483 def : Pat<(v4f32 (X86VBroadcast FR32:$src)),
8484 (VPSHUFDri (COPY_TO_REGCLASS FR32:$src, VR128), 0)>;
8485 def : Pat<(v8f32 (X86VBroadcast FR32:$src)),
8486 (VINSERTF128rr (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)),
8487 (VPSHUFDri (COPY_TO_REGCLASS FR32:$src, VR128), 0), sub_xmm),
8488 (VPSHUFDri (COPY_TO_REGCLASS FR32:$src, VR128), 0), 1)>;
8489 def : Pat<(v4f64 (X86VBroadcast FR64:$src)),
8490 (VINSERTF128rr (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)),
8491 (VPSHUFDri (COPY_TO_REGCLASS FR64:$src, VR128), 0x44), sub_xmm),
8492 (VPSHUFDri (COPY_TO_REGCLASS FR64:$src, VR128), 0x44), 1)>;
8494 def : Pat<(v4i32 (X86VBroadcast GR32:$src)),
8495 (VPSHUFDri (COPY_TO_REGCLASS GR32:$src, VR128), 0)>;
8496 def : Pat<(v8i32 (X86VBroadcast GR32:$src)),
8497 (VINSERTF128rr (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)),
8498 (VPSHUFDri (COPY_TO_REGCLASS GR32:$src, VR128), 0), sub_xmm),
8499 (VPSHUFDri (COPY_TO_REGCLASS GR32:$src, VR128), 0), 1)>;
8500 def : Pat<(v4i64 (X86VBroadcast GR64:$src)),
8501 (VINSERTF128rr (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)),
8502 (VPSHUFDri (COPY_TO_REGCLASS GR64:$src, VR128), 0x44), sub_xmm),
8503 (VPSHUFDri (COPY_TO_REGCLASS GR64:$src, VR128), 0x44), 1)>;
8506 def : Pat<(v2f64 (X86VBroadcast f64:$src)),
8507 (VMOVDDUPrr (COPY_TO_REGCLASS FR64:$src, VR128))>;
8510 //===----------------------------------------------------------------------===//
8511 // VPERM - Permute instructions
8514 multiclass avx2_perm<bits<8> opc, string OpcodeStr, PatFrag mem_frag,
8515 ValueType OpVT, X86FoldableSchedWrite Sched> {
8516 def Yrr : AVX28I<opc, MRMSrcReg, (outs VR256:$dst),
8517 (ins VR256:$src1, VR256:$src2),
8518 !strconcat(OpcodeStr,
8519 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8521 (OpVT (X86VPermv VR256:$src1, VR256:$src2)))]>,
8522 Sched<[Sched]>, VEX_4V, VEX_L;
8523 def Yrm : AVX28I<opc, MRMSrcMem, (outs VR256:$dst),
8524 (ins VR256:$src1, i256mem:$src2),
8525 !strconcat(OpcodeStr,
8526 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8528 (OpVT (X86VPermv VR256:$src1,
8529 (bitconvert (mem_frag addr:$src2)))))]>,
8530 Sched<[Sched.Folded, ReadAfterLd]>, VEX_4V, VEX_L;
8533 defm VPERMD : avx2_perm<0x36, "vpermd", loadv4i64, v8i32, WriteShuffle256>;
8534 let ExeDomain = SSEPackedSingle in
8535 defm VPERMPS : avx2_perm<0x16, "vpermps", loadv8f32, v8f32, WriteFShuffle256>;
8537 multiclass avx2_perm_imm<bits<8> opc, string OpcodeStr, PatFrag mem_frag,
8538 ValueType OpVT, X86FoldableSchedWrite Sched> {
8539 def Yri : AVX2AIi8<opc, MRMSrcReg, (outs VR256:$dst),
8540 (ins VR256:$src1, u8imm:$src2),
8541 !strconcat(OpcodeStr,
8542 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8544 (OpVT (X86VPermi VR256:$src1, (i8 imm:$src2))))]>,
8545 Sched<[Sched]>, VEX, VEX_L;
8546 def Ymi : AVX2AIi8<opc, MRMSrcMem, (outs VR256:$dst),
8547 (ins i256mem:$src1, u8imm:$src2),
8548 !strconcat(OpcodeStr,
8549 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8551 (OpVT (X86VPermi (mem_frag addr:$src1),
8552 (i8 imm:$src2))))]>,
8553 Sched<[Sched.Folded, ReadAfterLd]>, VEX, VEX_L;
8556 defm VPERMQ : avx2_perm_imm<0x00, "vpermq", loadv4i64, v4i64,
8557 WriteShuffle256>, VEX_W;
8558 let ExeDomain = SSEPackedDouble in
8559 defm VPERMPD : avx2_perm_imm<0x01, "vpermpd", loadv4f64, v4f64,
8560 WriteFShuffle256>, VEX_W;
8562 //===----------------------------------------------------------------------===//
8563 // VPERM2I128 - Permute Floating-Point Values in 128-bit chunks
8565 def VPERM2I128rr : AVX2AIi8<0x46, MRMSrcReg, (outs VR256:$dst),
8566 (ins VR256:$src1, VR256:$src2, u8imm:$src3),
8567 "vperm2i128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
8568 [(set VR256:$dst, (v4i64 (X86VPerm2x128 VR256:$src1, VR256:$src2,
8569 (i8 imm:$src3))))]>, Sched<[WriteShuffle256]>,
8571 def VPERM2I128rm : AVX2AIi8<0x46, MRMSrcMem, (outs VR256:$dst),
8572 (ins VR256:$src1, f256mem:$src2, u8imm:$src3),
8573 "vperm2i128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
8574 [(set VR256:$dst, (X86VPerm2x128 VR256:$src1, (loadv4i64 addr:$src2),
8576 Sched<[WriteShuffle256Ld, ReadAfterLd]>, VEX_4V, VEX_L;
8578 let Predicates = [HasAVX2] in {
8579 def : Pat<(v8i32 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
8580 (VPERM2I128rr VR256:$src1, VR256:$src2, imm:$imm)>;
8581 def : Pat<(v32i8 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
8582 (VPERM2I128rr VR256:$src1, VR256:$src2, imm:$imm)>;
8583 def : Pat<(v16i16 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
8584 (VPERM2I128rr VR256:$src1, VR256:$src2, imm:$imm)>;
8586 def : Pat<(v32i8 (X86VPerm2x128 VR256:$src1, (bc_v32i8 (loadv4i64 addr:$src2)),
8588 (VPERM2I128rm VR256:$src1, addr:$src2, imm:$imm)>;
8589 def : Pat<(v16i16 (X86VPerm2x128 VR256:$src1,
8590 (bc_v16i16 (loadv4i64 addr:$src2)), (i8 imm:$imm))),
8591 (VPERM2I128rm VR256:$src1, addr:$src2, imm:$imm)>;
8592 def : Pat<(v8i32 (X86VPerm2x128 VR256:$src1, (bc_v8i32 (loadv4i64 addr:$src2)),
8594 (VPERM2I128rm VR256:$src1, addr:$src2, imm:$imm)>;
8598 //===----------------------------------------------------------------------===//
8599 // VINSERTI128 - Insert packed integer values
8601 let hasSideEffects = 0 in {
8602 def VINSERTI128rr : AVX2AIi8<0x38, MRMSrcReg, (outs VR256:$dst),
8603 (ins VR256:$src1, VR128:$src2, u8imm:$src3),
8604 "vinserti128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
8605 []>, Sched<[WriteShuffle256]>, VEX_4V, VEX_L;
8607 def VINSERTI128rm : AVX2AIi8<0x38, MRMSrcMem, (outs VR256:$dst),
8608 (ins VR256:$src1, i128mem:$src2, u8imm:$src3),
8609 "vinserti128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
8610 []>, Sched<[WriteShuffle256Ld, ReadAfterLd]>, VEX_4V, VEX_L;
8613 let Predicates = [HasAVX2] in {
8614 def : Pat<(vinsert128_insert:$ins (v4i64 VR256:$src1), (v2i64 VR128:$src2),
8616 (VINSERTI128rr VR256:$src1, VR128:$src2,
8617 (INSERT_get_vinsert128_imm VR256:$ins))>;
8618 def : Pat<(vinsert128_insert:$ins (v8i32 VR256:$src1), (v4i32 VR128:$src2),
8620 (VINSERTI128rr VR256:$src1, VR128:$src2,
8621 (INSERT_get_vinsert128_imm VR256:$ins))>;
8622 def : Pat<(vinsert128_insert:$ins (v32i8 VR256:$src1), (v16i8 VR128:$src2),
8624 (VINSERTI128rr VR256:$src1, VR128:$src2,
8625 (INSERT_get_vinsert128_imm VR256:$ins))>;
8626 def : Pat<(vinsert128_insert:$ins (v16i16 VR256:$src1), (v8i16 VR128:$src2),
8628 (VINSERTI128rr VR256:$src1, VR128:$src2,
8629 (INSERT_get_vinsert128_imm VR256:$ins))>;
8631 def : Pat<(vinsert128_insert:$ins (v4i64 VR256:$src1), (loadv2i64 addr:$src2),
8633 (VINSERTI128rm VR256:$src1, addr:$src2,
8634 (INSERT_get_vinsert128_imm VR256:$ins))>;
8635 def : Pat<(vinsert128_insert:$ins (v8i32 VR256:$src1),
8636 (bc_v4i32 (loadv2i64 addr:$src2)),
8638 (VINSERTI128rm VR256:$src1, addr:$src2,
8639 (INSERT_get_vinsert128_imm VR256:$ins))>;
8640 def : Pat<(vinsert128_insert:$ins (v32i8 VR256:$src1),
8641 (bc_v16i8 (loadv2i64 addr:$src2)),
8643 (VINSERTI128rm VR256:$src1, addr:$src2,
8644 (INSERT_get_vinsert128_imm VR256:$ins))>;
8645 def : Pat<(vinsert128_insert:$ins (v16i16 VR256:$src1),
8646 (bc_v8i16 (loadv2i64 addr:$src2)),
8648 (VINSERTI128rm VR256:$src1, addr:$src2,
8649 (INSERT_get_vinsert128_imm VR256:$ins))>;
8652 //===----------------------------------------------------------------------===//
8653 // VEXTRACTI128 - Extract packed integer values
8655 def VEXTRACTI128rr : AVX2AIi8<0x39, MRMDestReg, (outs VR128:$dst),
8656 (ins VR256:$src1, u8imm:$src2),
8657 "vextracti128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
8659 (int_x86_avx2_vextracti128 VR256:$src1, imm:$src2))]>,
8660 Sched<[WriteShuffle256]>, VEX, VEX_L;
8661 let hasSideEffects = 0, mayStore = 1 in
8662 def VEXTRACTI128mr : AVX2AIi8<0x39, MRMDestMem, (outs),
8663 (ins i128mem:$dst, VR256:$src1, u8imm:$src2),
8664 "vextracti128\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
8665 Sched<[WriteStore]>, VEX, VEX_L;
8667 let Predicates = [HasAVX2] in {
8668 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
8669 (v2i64 (VEXTRACTI128rr
8670 (v4i64 VR256:$src1),
8671 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
8672 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
8673 (v4i32 (VEXTRACTI128rr
8674 (v8i32 VR256:$src1),
8675 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
8676 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
8677 (v8i16 (VEXTRACTI128rr
8678 (v16i16 VR256:$src1),
8679 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
8680 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
8681 (v16i8 (VEXTRACTI128rr
8682 (v32i8 VR256:$src1),
8683 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
8685 def : Pat<(store (v2i64 (vextract128_extract:$ext (v4i64 VR256:$src1),
8686 (iPTR imm))), addr:$dst),
8687 (VEXTRACTI128mr addr:$dst, VR256:$src1,
8688 (EXTRACT_get_vextract128_imm VR128:$ext))>;
8689 def : Pat<(store (v4i32 (vextract128_extract:$ext (v8i32 VR256:$src1),
8690 (iPTR imm))), addr:$dst),
8691 (VEXTRACTI128mr addr:$dst, VR256:$src1,
8692 (EXTRACT_get_vextract128_imm VR128:$ext))>;
8693 def : Pat<(store (v8i16 (vextract128_extract:$ext (v16i16 VR256:$src1),
8694 (iPTR imm))), addr:$dst),
8695 (VEXTRACTI128mr addr:$dst, VR256:$src1,
8696 (EXTRACT_get_vextract128_imm VR128:$ext))>;
8697 def : Pat<(store (v16i8 (vextract128_extract:$ext (v32i8 VR256:$src1),
8698 (iPTR imm))), addr:$dst),
8699 (VEXTRACTI128mr addr:$dst, VR256:$src1,
8700 (EXTRACT_get_vextract128_imm VR128:$ext))>;
8703 //===----------------------------------------------------------------------===//
8704 // VPMASKMOV - Conditional SIMD Integer Packed Loads and Stores
8706 multiclass avx2_pmovmask<string OpcodeStr,
8707 Intrinsic IntLd128, Intrinsic IntLd256,
8708 Intrinsic IntSt128, Intrinsic IntSt256> {
8709 def rm : AVX28I<0x8c, MRMSrcMem, (outs VR128:$dst),
8710 (ins VR128:$src1, i128mem:$src2),
8711 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8712 [(set VR128:$dst, (IntLd128 addr:$src2, VR128:$src1))]>, VEX_4V;
8713 def Yrm : AVX28I<0x8c, MRMSrcMem, (outs VR256:$dst),
8714 (ins VR256:$src1, i256mem:$src2),
8715 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8716 [(set VR256:$dst, (IntLd256 addr:$src2, VR256:$src1))]>,
8718 def mr : AVX28I<0x8e, MRMDestMem, (outs),
8719 (ins i128mem:$dst, VR128:$src1, VR128:$src2),
8720 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8721 [(IntSt128 addr:$dst, VR128:$src1, VR128:$src2)]>, VEX_4V;
8722 def Ymr : AVX28I<0x8e, MRMDestMem, (outs),
8723 (ins i256mem:$dst, VR256:$src1, VR256:$src2),
8724 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8725 [(IntSt256 addr:$dst, VR256:$src1, VR256:$src2)]>, VEX_4V, VEX_L;
8728 defm VPMASKMOVD : avx2_pmovmask<"vpmaskmovd",
8729 int_x86_avx2_maskload_d,
8730 int_x86_avx2_maskload_d_256,
8731 int_x86_avx2_maskstore_d,
8732 int_x86_avx2_maskstore_d_256>;
8733 defm VPMASKMOVQ : avx2_pmovmask<"vpmaskmovq",
8734 int_x86_avx2_maskload_q,
8735 int_x86_avx2_maskload_q_256,
8736 int_x86_avx2_maskstore_q,
8737 int_x86_avx2_maskstore_q_256>, VEX_W;
8739 def: Pat<(masked_store addr:$ptr, (v8i32 VR256:$mask), (v8f32 VR256:$src)),
8740 (VMASKMOVPSYmr addr:$ptr, VR256:$mask, VR256:$src)>;
8742 def: Pat<(masked_store addr:$ptr, (v8i32 VR256:$mask), (v8i32 VR256:$src)),
8743 (VPMASKMOVDYmr addr:$ptr, VR256:$mask, VR256:$src)>;
8745 def: Pat<(masked_store addr:$ptr, (v4i32 VR128:$mask), (v4f32 VR128:$src)),
8746 (VMASKMOVPSmr addr:$ptr, VR128:$mask, VR128:$src)>;
8748 def: Pat<(masked_store addr:$ptr, (v4i32 VR128:$mask), (v4i32 VR128:$src)),
8749 (VPMASKMOVDmr addr:$ptr, VR128:$mask, VR128:$src)>;
8751 def: Pat<(v8f32 (masked_load addr:$ptr, (v8i32 VR256:$mask), undef)),
8752 (VMASKMOVPSYrm VR256:$mask, addr:$ptr)>;
8754 def: Pat<(v8f32 (masked_load addr:$ptr, (v8i32 VR256:$mask),
8755 (bc_v8f32 (v8i32 immAllZerosV)))),
8756 (VMASKMOVPSYrm VR256:$mask, addr:$ptr)>;
8758 def: Pat<(v8f32 (masked_load addr:$ptr, (v8i32 VR256:$mask), (v8f32 VR256:$src0))),
8759 (VBLENDVPSYrr VR256:$src0, (VMASKMOVPSYrm VR256:$mask, addr:$ptr),
8762 def: Pat<(v8i32 (masked_load addr:$ptr, (v8i32 VR256:$mask), undef)),
8763 (VPMASKMOVDYrm VR256:$mask, addr:$ptr)>;
8765 def: Pat<(v8i32 (masked_load addr:$ptr, (v8i32 VR256:$mask), (v8i32 immAllZerosV))),
8766 (VPMASKMOVDYrm VR256:$mask, addr:$ptr)>;
8768 def: Pat<(v8i32 (masked_load addr:$ptr, (v8i32 VR256:$mask), (v8i32 VR256:$src0))),
8769 (VBLENDVPSYrr VR256:$src0, (VPMASKMOVDYrm VR256:$mask, addr:$ptr),
8772 def: Pat<(v4f32 (masked_load addr:$ptr, (v4i32 VR128:$mask), undef)),
8773 (VMASKMOVPSrm VR128:$mask, addr:$ptr)>;
8775 def: Pat<(v4f32 (masked_load addr:$ptr, (v4i32 VR128:$mask),
8776 (bc_v4f32 (v4i32 immAllZerosV)))),
8777 (VMASKMOVPSrm VR128:$mask, addr:$ptr)>;
8779 def: Pat<(v4f32 (masked_load addr:$ptr, (v4i32 VR128:$mask), (v4f32 VR128:$src0))),
8780 (VBLENDVPSrr VR128:$src0, (VMASKMOVPSrm VR128:$mask, addr:$ptr),
8783 def: Pat<(v4i32 (masked_load addr:$ptr, (v4i32 VR128:$mask), undef)),
8784 (VPMASKMOVDrm VR128:$mask, addr:$ptr)>;
8786 def: Pat<(v4i32 (masked_load addr:$ptr, (v4i32 VR128:$mask), (v4i32 immAllZerosV))),
8787 (VPMASKMOVDrm VR128:$mask, addr:$ptr)>;
8789 def: Pat<(v4i32 (masked_load addr:$ptr, (v4i32 VR128:$mask), (v4i32 VR128:$src0))),
8790 (VBLENDVPSrr VR128:$src0, (VPMASKMOVDrm VR128:$mask, addr:$ptr),
8793 def: Pat<(masked_store addr:$ptr, (v4i64 VR256:$mask), (v4f64 VR256:$src)),
8794 (VMASKMOVPDYmr addr:$ptr, VR256:$mask, VR256:$src)>;
8796 def: Pat<(masked_store addr:$ptr, (v4i64 VR256:$mask), (v4i64 VR256:$src)),
8797 (VPMASKMOVQYmr addr:$ptr, VR256:$mask, VR256:$src)>;
8799 def: Pat<(v4f64 (masked_load addr:$ptr, (v4i64 VR256:$mask), undef)),
8800 (VMASKMOVPDYrm VR256:$mask, addr:$ptr)>;
8802 def: Pat<(v4f64 (masked_load addr:$ptr, (v4i64 VR256:$mask),
8803 (v4f64 immAllZerosV))),
8804 (VMASKMOVPDYrm VR256:$mask, addr:$ptr)>;
8806 def: Pat<(v4f64 (masked_load addr:$ptr, (v4i64 VR256:$mask), (v4f64 VR256:$src0))),
8807 (VBLENDVPDYrr VR256:$src0, (VMASKMOVPDYrm VR256:$mask, addr:$ptr),
8810 def: Pat<(v4i64 (masked_load addr:$ptr, (v4i64 VR256:$mask), undef)),
8811 (VPMASKMOVQYrm VR256:$mask, addr:$ptr)>;
8813 def: Pat<(v4i64 (masked_load addr:$ptr, (v4i64 VR256:$mask),
8814 (bc_v4i64 (v8i32 immAllZerosV)))),
8815 (VPMASKMOVQYrm VR256:$mask, addr:$ptr)>;
8817 def: Pat<(v4i64 (masked_load addr:$ptr, (v4i64 VR256:$mask), (v4i64 VR256:$src0))),
8818 (VBLENDVPDYrr VR256:$src0, (VPMASKMOVQYrm VR256:$mask, addr:$ptr),
8821 def: Pat<(masked_store addr:$ptr, (v2i64 VR128:$mask), (v2f64 VR128:$src)),
8822 (VMASKMOVPDmr addr:$ptr, VR128:$mask, VR128:$src)>;
8824 def: Pat<(masked_store addr:$ptr, (v2i64 VR128:$mask), (v2i64 VR128:$src)),
8825 (VPMASKMOVQmr addr:$ptr, VR128:$mask, VR128:$src)>;
8827 def: Pat<(v2f64 (masked_load addr:$ptr, (v2i64 VR128:$mask), undef)),
8828 (VMASKMOVPDrm VR128:$mask, addr:$ptr)>;
8830 def: Pat<(v2f64 (masked_load addr:$ptr, (v2i64 VR128:$mask),
8831 (v2f64 immAllZerosV))),
8832 (VMASKMOVPDrm VR128:$mask, addr:$ptr)>;
8834 def: Pat<(v2f64 (masked_load addr:$ptr, (v2i64 VR128:$mask), (v2f64 VR128:$src0))),
8835 (VBLENDVPDrr VR128:$src0, (VMASKMOVPDrm VR128:$mask, addr:$ptr),
8838 def: Pat<(v2i64 (masked_load addr:$ptr, (v2i64 VR128:$mask), undef)),
8839 (VPMASKMOVQrm VR128:$mask, addr:$ptr)>;
8841 def: Pat<(v2i64 (masked_load addr:$ptr, (v2i64 VR128:$mask),
8842 (bc_v2i64 (v4i32 immAllZerosV)))),
8843 (VPMASKMOVQrm VR128:$mask, addr:$ptr)>;
8845 def: Pat<(v2i64 (masked_load addr:$ptr, (v2i64 VR128:$mask), (v2i64 VR128:$src0))),
8846 (VBLENDVPDrr VR128:$src0, (VPMASKMOVQrm VR128:$mask, addr:$ptr),
8849 //===----------------------------------------------------------------------===//
8850 // Variable Bit Shifts
8852 multiclass avx2_var_shift<bits<8> opc, string OpcodeStr, SDNode OpNode,
8853 ValueType vt128, ValueType vt256> {
8854 def rr : AVX28I<opc, MRMSrcReg, (outs VR128:$dst),
8855 (ins VR128:$src1, VR128:$src2),
8856 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8858 (vt128 (OpNode VR128:$src1, (vt128 VR128:$src2))))]>,
8859 VEX_4V, Sched<[WriteVarVecShift]>;
8860 def rm : AVX28I<opc, MRMSrcMem, (outs VR128:$dst),
8861 (ins VR128:$src1, i128mem:$src2),
8862 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8864 (vt128 (OpNode VR128:$src1,
8865 (vt128 (bitconvert (loadv2i64 addr:$src2))))))]>,
8866 VEX_4V, Sched<[WriteVarVecShiftLd, ReadAfterLd]>;
8867 def Yrr : AVX28I<opc, MRMSrcReg, (outs VR256:$dst),
8868 (ins VR256:$src1, VR256:$src2),
8869 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8871 (vt256 (OpNode VR256:$src1, (vt256 VR256:$src2))))]>,
8872 VEX_4V, VEX_L, Sched<[WriteVarVecShift]>;
8873 def Yrm : AVX28I<opc, MRMSrcMem, (outs VR256:$dst),
8874 (ins VR256:$src1, i256mem:$src2),
8875 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8877 (vt256 (OpNode VR256:$src1,
8878 (vt256 (bitconvert (loadv4i64 addr:$src2))))))]>,
8879 VEX_4V, VEX_L, Sched<[WriteVarVecShiftLd, ReadAfterLd]>;
8882 defm VPSLLVD : avx2_var_shift<0x47, "vpsllvd", shl, v4i32, v8i32>;
8883 defm VPSLLVQ : avx2_var_shift<0x47, "vpsllvq", shl, v2i64, v4i64>, VEX_W;
8884 defm VPSRLVD : avx2_var_shift<0x45, "vpsrlvd", srl, v4i32, v8i32>;
8885 defm VPSRLVQ : avx2_var_shift<0x45, "vpsrlvq", srl, v2i64, v4i64>, VEX_W;
8886 defm VPSRAVD : avx2_var_shift<0x46, "vpsravd", sra, v4i32, v8i32>;
8888 //===----------------------------------------------------------------------===//
8889 // VGATHER - GATHER Operations
8890 multiclass avx2_gather<bits<8> opc, string OpcodeStr, RegisterClass RC256,
8891 X86MemOperand memop128, X86MemOperand memop256> {
8892 def rm : AVX28I<opc, MRMSrcMem, (outs VR128:$dst, VR128:$mask_wb),
8893 (ins VR128:$src1, memop128:$src2, VR128:$mask),
8894 !strconcat(OpcodeStr,
8895 "\t{$mask, $src2, $dst|$dst, $src2, $mask}"),
8897 def Yrm : AVX28I<opc, MRMSrcMem, (outs RC256:$dst, RC256:$mask_wb),
8898 (ins RC256:$src1, memop256:$src2, RC256:$mask),
8899 !strconcat(OpcodeStr,
8900 "\t{$mask, $src2, $dst|$dst, $src2, $mask}"),
8901 []>, VEX_4VOp3, VEX_L;
8904 let mayLoad = 1, Constraints
8905 = "@earlyclobber $dst,@earlyclobber $mask_wb, $src1 = $dst, $mask = $mask_wb"
8907 defm VPGATHERDQ : avx2_gather<0x90, "vpgatherdq", VR256, vx64mem, vx64mem>, VEX_W;
8908 defm VPGATHERQQ : avx2_gather<0x91, "vpgatherqq", VR256, vx64mem, vy64mem>, VEX_W;
8909 defm VPGATHERDD : avx2_gather<0x90, "vpgatherdd", VR256, vx32mem, vy32mem>;
8910 defm VPGATHERQD : avx2_gather<0x91, "vpgatherqd", VR128, vx32mem, vy32mem>;
8912 let ExeDomain = SSEPackedDouble in {
8913 defm VGATHERDPD : avx2_gather<0x92, "vgatherdpd", VR256, vx64mem, vx64mem>, VEX_W;
8914 defm VGATHERQPD : avx2_gather<0x93, "vgatherqpd", VR256, vx64mem, vy64mem>, VEX_W;
8917 let ExeDomain = SSEPackedSingle in {
8918 defm VGATHERDPS : avx2_gather<0x92, "vgatherdps", VR256, vx32mem, vy32mem>;
8919 defm VGATHERQPS : avx2_gather<0x93, "vgatherqps", VR128, vx32mem, vy32mem>;