1 //====- X86InstrSSE.td - Describe the X86 Instruction Set --*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86 SSE instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
17 //===----------------------------------------------------------------------===//
18 // SSE specific DAG Nodes.
19 //===----------------------------------------------------------------------===//
21 def SDTX86FPShiftOp : SDTypeProfile<1, 2, [ SDTCisSameAs<0, 1>,
22 SDTCisFP<0>, SDTCisInt<2> ]>;
24 def X86fmin : SDNode<"X86ISD::FMIN", SDTFPBinOp>;
25 def X86fmax : SDNode<"X86ISD::FMAX", SDTFPBinOp>;
26 def X86fand : SDNode<"X86ISD::FAND", SDTFPBinOp,
27 [SDNPCommutative, SDNPAssociative]>;
28 def X86for : SDNode<"X86ISD::FOR", SDTFPBinOp,
29 [SDNPCommutative, SDNPAssociative]>;
30 def X86fxor : SDNode<"X86ISD::FXOR", SDTFPBinOp,
31 [SDNPCommutative, SDNPAssociative]>;
32 def X86frsqrt : SDNode<"X86ISD::FRSQRT", SDTFPUnaryOp>;
33 def X86frcp : SDNode<"X86ISD::FRCP", SDTFPUnaryOp>;
34 def X86fsrl : SDNode<"X86ISD::FSRL", SDTX86FPShiftOp>;
35 def X86comi : SDNode<"X86ISD::COMI", SDTX86CmpTest>;
36 def X86ucomi : SDNode<"X86ISD::UCOMI", SDTX86CmpTest>;
37 def X86pextrb : SDNode<"X86ISD::PEXTRB",
38 SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
39 def X86pextrw : SDNode<"X86ISD::PEXTRW",
40 SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
41 def X86pinsrb : SDNode<"X86ISD::PINSRB",
42 SDTypeProfile<1, 3, [SDTCisVT<0, v16i8>, SDTCisSameAs<0,1>,
43 SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
44 def X86pinsrw : SDNode<"X86ISD::PINSRW",
45 SDTypeProfile<1, 3, [SDTCisVT<0, v8i16>, SDTCisSameAs<0,1>,
46 SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
47 def X86insrtps : SDNode<"X86ISD::INSERTPS",
48 SDTypeProfile<1, 3, [SDTCisVT<0, v4f32>, SDTCisSameAs<0,1>,
49 SDTCisVT<2, f32>, SDTCisPtrTy<3>]>>;
50 def X86zvmovl : SDNode<"X86ISD::ZEXT_VMOVL", SDTUnaryOp>;
52 //===----------------------------------------------------------------------===//
53 // SSE Complex Patterns
54 //===----------------------------------------------------------------------===//
56 // These are 'extloads' from a scalar to the low element of a vector, zeroing
57 // the top elements. These are used for the SSE 'ss' and 'sd' instruction
59 def sse_load_f32 : ComplexPattern<v4f32, 4, "SelectScalarSSELoad", [],
60 [SDNPHasChain, SDNPMayLoad]>;
61 def sse_load_f64 : ComplexPattern<v2f64, 4, "SelectScalarSSELoad", [],
62 [SDNPHasChain, SDNPMayLoad]>;
64 def ssmem : Operand<v4f32> {
65 let PrintMethod = "printf32mem";
66 let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc, i32imm);
68 def sdmem : Operand<v2f64> {
69 let PrintMethod = "printf64mem";
70 let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc, i32imm);
73 //===----------------------------------------------------------------------===//
74 // SSE pattern fragments
75 //===----------------------------------------------------------------------===//
77 def loadv4f32 : PatFrag<(ops node:$ptr), (v4f32 (load node:$ptr))>;
78 def loadv2f64 : PatFrag<(ops node:$ptr), (v2f64 (load node:$ptr))>;
79 def loadv4i32 : PatFrag<(ops node:$ptr), (v4i32 (load node:$ptr))>;
80 def loadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (load node:$ptr))>;
82 // Like 'store', but always requires vector alignment.
83 def alignedstore : PatFrag<(ops node:$val, node:$ptr),
84 (st node:$val, node:$ptr), [{
85 if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
86 return !ST->isTruncatingStore() &&
87 ST->getAddressingMode() == ISD::UNINDEXED &&
88 ST->getAlignment() >= 16;
92 // Like 'load', but always requires vector alignment.
93 def alignedload : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
94 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N))
95 return LD->getExtensionType() == ISD::NON_EXTLOAD &&
96 LD->getAddressingMode() == ISD::UNINDEXED &&
97 LD->getAlignment() >= 16;
101 def alignedloadfsf32 : PatFrag<(ops node:$ptr), (f32 (alignedload node:$ptr))>;
102 def alignedloadfsf64 : PatFrag<(ops node:$ptr), (f64 (alignedload node:$ptr))>;
103 def alignedloadv4f32 : PatFrag<(ops node:$ptr), (v4f32 (alignedload node:$ptr))>;
104 def alignedloadv2f64 : PatFrag<(ops node:$ptr), (v2f64 (alignedload node:$ptr))>;
105 def alignedloadv4i32 : PatFrag<(ops node:$ptr), (v4i32 (alignedload node:$ptr))>;
106 def alignedloadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (alignedload node:$ptr))>;
108 // Like 'load', but uses special alignment checks suitable for use in
109 // memory operands in most SSE instructions, which are required to
110 // be naturally aligned on some targets but not on others.
111 // FIXME: Actually implement support for targets that don't require the
112 // alignment. This probably wants a subtarget predicate.
113 def memop : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
114 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N))
115 return LD->getExtensionType() == ISD::NON_EXTLOAD &&
116 LD->getAddressingMode() == ISD::UNINDEXED &&
117 LD->getAlignment() >= 16;
121 def memopfsf32 : PatFrag<(ops node:$ptr), (f32 (memop node:$ptr))>;
122 def memopfsf64 : PatFrag<(ops node:$ptr), (f64 (memop node:$ptr))>;
123 def memopv4f32 : PatFrag<(ops node:$ptr), (v4f32 (memop node:$ptr))>;
124 def memopv2f64 : PatFrag<(ops node:$ptr), (v2f64 (memop node:$ptr))>;
125 def memopv4i32 : PatFrag<(ops node:$ptr), (v4i32 (memop node:$ptr))>;
126 def memopv2i64 : PatFrag<(ops node:$ptr), (v2i64 (memop node:$ptr))>;
127 def memopv16i8 : PatFrag<(ops node:$ptr), (v16i8 (memop node:$ptr))>;
129 // SSSE3 uses MMX registers for some instructions. They aren't aligned on a
131 // FIXME: 8 byte alignment for mmx reads is not required
132 def memop64 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
133 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N))
134 return LD->getExtensionType() == ISD::NON_EXTLOAD &&
135 LD->getAddressingMode() == ISD::UNINDEXED &&
136 LD->getAlignment() >= 8;
140 def memopv8i8 : PatFrag<(ops node:$ptr), (v8i8 (memop64 node:$ptr))>;
141 def memopv4i16 : PatFrag<(ops node:$ptr), (v4i16 (memop64 node:$ptr))>;
142 def memopv8i16 : PatFrag<(ops node:$ptr), (v8i16 (memop64 node:$ptr))>;
143 def memopv2i32 : PatFrag<(ops node:$ptr), (v2i32 (memop64 node:$ptr))>;
145 def bc_v4f32 : PatFrag<(ops node:$in), (v4f32 (bitconvert node:$in))>;
146 def bc_v2f64 : PatFrag<(ops node:$in), (v2f64 (bitconvert node:$in))>;
147 def bc_v16i8 : PatFrag<(ops node:$in), (v16i8 (bitconvert node:$in))>;
148 def bc_v8i16 : PatFrag<(ops node:$in), (v8i16 (bitconvert node:$in))>;
149 def bc_v4i32 : PatFrag<(ops node:$in), (v4i32 (bitconvert node:$in))>;
150 def bc_v2i64 : PatFrag<(ops node:$in), (v2i64 (bitconvert node:$in))>;
152 def fp32imm0 : PatLeaf<(f32 fpimm), [{
153 return N->isExactlyValue(+0.0);
156 def PSxLDQ_imm : SDNodeXForm<imm, [{
157 // Transformation function: imm >> 3
158 return getI32Imm(N->getValue() >> 3);
161 // SHUFFLE_get_shuf_imm xform function: convert vector_shuffle mask to PSHUF*,
163 def SHUFFLE_get_shuf_imm : SDNodeXForm<build_vector, [{
164 return getI8Imm(X86::getShuffleSHUFImmediate(N));
167 // SHUFFLE_get_pshufhw_imm xform function: convert vector_shuffle mask to
169 def SHUFFLE_get_pshufhw_imm : SDNodeXForm<build_vector, [{
170 return getI8Imm(X86::getShufflePSHUFHWImmediate(N));
173 // SHUFFLE_get_pshuflw_imm xform function: convert vector_shuffle mask to
175 def SHUFFLE_get_pshuflw_imm : SDNodeXForm<build_vector, [{
176 return getI8Imm(X86::getShufflePSHUFLWImmediate(N));
179 def SSE_splat_mask : PatLeaf<(build_vector), [{
180 return X86::isSplatMask(N);
181 }], SHUFFLE_get_shuf_imm>;
183 def SSE_splat_lo_mask : PatLeaf<(build_vector), [{
184 return X86::isSplatLoMask(N);
187 def MOVHLPS_shuffle_mask : PatLeaf<(build_vector), [{
188 return X86::isMOVHLPSMask(N);
191 def MOVHLPS_v_undef_shuffle_mask : PatLeaf<(build_vector), [{
192 return X86::isMOVHLPS_v_undef_Mask(N);
195 def MOVHP_shuffle_mask : PatLeaf<(build_vector), [{
196 return X86::isMOVHPMask(N);
199 def MOVLP_shuffle_mask : PatLeaf<(build_vector), [{
200 return X86::isMOVLPMask(N);
203 def MOVL_shuffle_mask : PatLeaf<(build_vector), [{
204 return X86::isMOVLMask(N);
207 def MOVSHDUP_shuffle_mask : PatLeaf<(build_vector), [{
208 return X86::isMOVSHDUPMask(N);
211 def MOVSLDUP_shuffle_mask : PatLeaf<(build_vector), [{
212 return X86::isMOVSLDUPMask(N);
215 def UNPCKL_shuffle_mask : PatLeaf<(build_vector), [{
216 return X86::isUNPCKLMask(N);
219 def UNPCKH_shuffle_mask : PatLeaf<(build_vector), [{
220 return X86::isUNPCKHMask(N);
223 def UNPCKL_v_undef_shuffle_mask : PatLeaf<(build_vector), [{
224 return X86::isUNPCKL_v_undef_Mask(N);
227 def UNPCKH_v_undef_shuffle_mask : PatLeaf<(build_vector), [{
228 return X86::isUNPCKH_v_undef_Mask(N);
231 def PSHUFD_shuffle_mask : PatLeaf<(build_vector), [{
232 return X86::isPSHUFDMask(N);
233 }], SHUFFLE_get_shuf_imm>;
235 def PSHUFHW_shuffle_mask : PatLeaf<(build_vector), [{
236 return X86::isPSHUFHWMask(N);
237 }], SHUFFLE_get_pshufhw_imm>;
239 def PSHUFLW_shuffle_mask : PatLeaf<(build_vector), [{
240 return X86::isPSHUFLWMask(N);
241 }], SHUFFLE_get_pshuflw_imm>;
243 def SHUFP_unary_shuffle_mask : PatLeaf<(build_vector), [{
244 return X86::isPSHUFDMask(N);
245 }], SHUFFLE_get_shuf_imm>;
247 def SHUFP_shuffle_mask : PatLeaf<(build_vector), [{
248 return X86::isSHUFPMask(N);
249 }], SHUFFLE_get_shuf_imm>;
251 def PSHUFD_binary_shuffle_mask : PatLeaf<(build_vector), [{
252 return X86::isSHUFPMask(N);
253 }], SHUFFLE_get_shuf_imm>;
255 //===----------------------------------------------------------------------===//
256 // SSE scalar FP Instructions
257 //===----------------------------------------------------------------------===//
259 // CMOV* - Used to implement the SSE SELECT DAG operation. Expanded by the
260 // scheduler into a branch sequence.
261 // These are expanded by the scheduler.
262 let Uses = [EFLAGS], usesCustomDAGSchedInserter = 1 in {
263 def CMOV_FR32 : I<0, Pseudo,
264 (outs FR32:$dst), (ins FR32:$t, FR32:$f, i8imm:$cond),
265 "#CMOV_FR32 PSEUDO!",
266 [(set FR32:$dst, (X86cmov FR32:$t, FR32:$f, imm:$cond,
268 def CMOV_FR64 : I<0, Pseudo,
269 (outs FR64:$dst), (ins FR64:$t, FR64:$f, i8imm:$cond),
270 "#CMOV_FR64 PSEUDO!",
271 [(set FR64:$dst, (X86cmov FR64:$t, FR64:$f, imm:$cond,
273 def CMOV_V4F32 : I<0, Pseudo,
274 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
275 "#CMOV_V4F32 PSEUDO!",
277 (v4f32 (X86cmov VR128:$t, VR128:$f, imm:$cond,
279 def CMOV_V2F64 : I<0, Pseudo,
280 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
281 "#CMOV_V2F64 PSEUDO!",
283 (v2f64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
285 def CMOV_V2I64 : I<0, Pseudo,
286 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
287 "#CMOV_V2I64 PSEUDO!",
289 (v2i64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
293 //===----------------------------------------------------------------------===//
295 //===----------------------------------------------------------------------===//
298 let neverHasSideEffects = 1 in
299 def MOVSSrr : SSI<0x10, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
300 "movss\t{$src, $dst|$dst, $src}", []>;
301 let isSimpleLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in
302 def MOVSSrm : SSI<0x10, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
303 "movss\t{$src, $dst|$dst, $src}",
304 [(set FR32:$dst, (loadf32 addr:$src))]>;
305 def MOVSSmr : SSI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
306 "movss\t{$src, $dst|$dst, $src}",
307 [(store FR32:$src, addr:$dst)]>;
309 // Conversion instructions
310 def CVTTSS2SIrr : SSI<0x2C, MRMSrcReg, (outs GR32:$dst), (ins FR32:$src),
311 "cvttss2si\t{$src, $dst|$dst, $src}",
312 [(set GR32:$dst, (fp_to_sint FR32:$src))]>;
313 def CVTTSS2SIrm : SSI<0x2C, MRMSrcMem, (outs GR32:$dst), (ins f32mem:$src),
314 "cvttss2si\t{$src, $dst|$dst, $src}",
315 [(set GR32:$dst, (fp_to_sint (loadf32 addr:$src)))]>;
316 def CVTSI2SSrr : SSI<0x2A, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
317 "cvtsi2ss\t{$src, $dst|$dst, $src}",
318 [(set FR32:$dst, (sint_to_fp GR32:$src))]>;
319 def CVTSI2SSrm : SSI<0x2A, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
320 "cvtsi2ss\t{$src, $dst|$dst, $src}",
321 [(set FR32:$dst, (sint_to_fp (loadi32 addr:$src)))]>;
323 // Match intrinsics which expect XMM operand(s).
324 def Int_CVTSS2SIrr : SSI<0x2D, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
325 "cvtss2si\t{$src, $dst|$dst, $src}",
326 [(set GR32:$dst, (int_x86_sse_cvtss2si VR128:$src))]>;
327 def Int_CVTSS2SIrm : SSI<0x2D, MRMSrcMem, (outs GR32:$dst), (ins f32mem:$src),
328 "cvtss2si\t{$src, $dst|$dst, $src}",
329 [(set GR32:$dst, (int_x86_sse_cvtss2si
330 (load addr:$src)))]>;
332 // Match intrinisics which expect MM and XMM operand(s).
333 def Int_CVTPS2PIrr : PSI<0x2D, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
334 "cvtps2pi\t{$src, $dst|$dst, $src}",
335 [(set VR64:$dst, (int_x86_sse_cvtps2pi VR128:$src))]>;
336 def Int_CVTPS2PIrm : PSI<0x2D, MRMSrcMem, (outs VR64:$dst), (ins f64mem:$src),
337 "cvtps2pi\t{$src, $dst|$dst, $src}",
338 [(set VR64:$dst, (int_x86_sse_cvtps2pi
339 (load addr:$src)))]>;
340 def Int_CVTTPS2PIrr: PSI<0x2C, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
341 "cvttps2pi\t{$src, $dst|$dst, $src}",
342 [(set VR64:$dst, (int_x86_sse_cvttps2pi VR128:$src))]>;
343 def Int_CVTTPS2PIrm: PSI<0x2C, MRMSrcMem, (outs VR64:$dst), (ins f64mem:$src),
344 "cvttps2pi\t{$src, $dst|$dst, $src}",
345 [(set VR64:$dst, (int_x86_sse_cvttps2pi
346 (load addr:$src)))]>;
347 let Constraints = "$src1 = $dst" in {
348 def Int_CVTPI2PSrr : PSI<0x2A, MRMSrcReg,
349 (outs VR128:$dst), (ins VR128:$src1, VR64:$src2),
350 "cvtpi2ps\t{$src2, $dst|$dst, $src2}",
351 [(set VR128:$dst, (int_x86_sse_cvtpi2ps VR128:$src1,
353 def Int_CVTPI2PSrm : PSI<0x2A, MRMSrcMem,
354 (outs VR128:$dst), (ins VR128:$src1, i64mem:$src2),
355 "cvtpi2ps\t{$src2, $dst|$dst, $src2}",
356 [(set VR128:$dst, (int_x86_sse_cvtpi2ps VR128:$src1,
357 (load addr:$src2)))]>;
360 // Aliases for intrinsics
361 def Int_CVTTSS2SIrr : SSI<0x2C, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
362 "cvttss2si\t{$src, $dst|$dst, $src}",
364 (int_x86_sse_cvttss2si VR128:$src))]>;
365 def Int_CVTTSS2SIrm : SSI<0x2C, MRMSrcMem, (outs GR32:$dst), (ins f32mem:$src),
366 "cvttss2si\t{$src, $dst|$dst, $src}",
368 (int_x86_sse_cvttss2si(load addr:$src)))]>;
370 let Constraints = "$src1 = $dst" in {
371 def Int_CVTSI2SSrr : SSI<0x2A, MRMSrcReg,
372 (outs VR128:$dst), (ins VR128:$src1, GR32:$src2),
373 "cvtsi2ss\t{$src2, $dst|$dst, $src2}",
374 [(set VR128:$dst, (int_x86_sse_cvtsi2ss VR128:$src1,
376 def Int_CVTSI2SSrm : SSI<0x2A, MRMSrcMem,
377 (outs VR128:$dst), (ins VR128:$src1, i32mem:$src2),
378 "cvtsi2ss\t{$src2, $dst|$dst, $src2}",
379 [(set VR128:$dst, (int_x86_sse_cvtsi2ss VR128:$src1,
380 (loadi32 addr:$src2)))]>;
383 // Comparison instructions
384 let Constraints = "$src1 = $dst" in {
385 let neverHasSideEffects = 1 in
386 def CMPSSrr : SSIi8<0xC2, MRMSrcReg,
387 (outs FR32:$dst), (ins FR32:$src1, FR32:$src, SSECC:$cc),
388 "cmp${cc}ss\t{$src, $dst|$dst, $src}", []>;
389 let neverHasSideEffects = 1, mayLoad = 1 in
390 def CMPSSrm : SSIi8<0xC2, MRMSrcMem,
391 (outs FR32:$dst), (ins FR32:$src1, f32mem:$src, SSECC:$cc),
392 "cmp${cc}ss\t{$src, $dst|$dst, $src}", []>;
395 let Defs = [EFLAGS] in {
396 def UCOMISSrr: PSI<0x2E, MRMSrcReg, (outs), (ins FR32:$src1, FR32:$src2),
397 "ucomiss\t{$src2, $src1|$src1, $src2}",
398 [(X86cmp FR32:$src1, FR32:$src2), (implicit EFLAGS)]>;
399 def UCOMISSrm: PSI<0x2E, MRMSrcMem, (outs), (ins FR32:$src1, f32mem:$src2),
400 "ucomiss\t{$src2, $src1|$src1, $src2}",
401 [(X86cmp FR32:$src1, (loadf32 addr:$src2)),
405 // Aliases to match intrinsics which expect XMM operand(s).
406 let Constraints = "$src1 = $dst" in {
407 def Int_CMPSSrr : SSIi8<0xC2, MRMSrcReg,
408 (outs VR128:$dst), (ins VR128:$src1, VR128:$src, SSECC:$cc),
409 "cmp${cc}ss\t{$src, $dst|$dst, $src}",
410 [(set VR128:$dst, (int_x86_sse_cmp_ss VR128:$src1,
411 VR128:$src, imm:$cc))]>;
412 def Int_CMPSSrm : SSIi8<0xC2, MRMSrcMem,
413 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src, SSECC:$cc),
414 "cmp${cc}ss\t{$src, $dst|$dst, $src}",
415 [(set VR128:$dst, (int_x86_sse_cmp_ss VR128:$src1,
416 (load addr:$src), imm:$cc))]>;
419 let Defs = [EFLAGS] in {
420 def Int_UCOMISSrr: PSI<0x2E, MRMSrcReg, (outs),
421 (ins VR128:$src1, VR128:$src2),
422 "ucomiss\t{$src2, $src1|$src1, $src2}",
423 [(X86ucomi (v4f32 VR128:$src1), VR128:$src2),
425 def Int_UCOMISSrm: PSI<0x2E, MRMSrcMem, (outs),
426 (ins VR128:$src1, f128mem:$src2),
427 "ucomiss\t{$src2, $src1|$src1, $src2}",
428 [(X86ucomi (v4f32 VR128:$src1), (load addr:$src2)),
431 def Int_COMISSrr: PSI<0x2F, MRMSrcReg, (outs),
432 (ins VR128:$src1, VR128:$src2),
433 "comiss\t{$src2, $src1|$src1, $src2}",
434 [(X86comi (v4f32 VR128:$src1), VR128:$src2),
436 def Int_COMISSrm: PSI<0x2F, MRMSrcMem, (outs),
437 (ins VR128:$src1, f128mem:$src2),
438 "comiss\t{$src2, $src1|$src1, $src2}",
439 [(X86comi (v4f32 VR128:$src1), (load addr:$src2)),
443 // Aliases of packed SSE1 instructions for scalar use. These all have names that
446 // Alias instructions that map fld0 to pxor for sse.
447 let isReMaterializable = 1 in
448 def FsFLD0SS : I<0xEF, MRMInitReg, (outs FR32:$dst), (ins),
449 "pxor\t$dst, $dst", [(set FR32:$dst, fp32imm0)]>,
450 Requires<[HasSSE1]>, TB, OpSize;
452 // Alias instruction to do FR32 reg-to-reg copy using movaps. Upper bits are
454 let neverHasSideEffects = 1 in
455 def FsMOVAPSrr : PSI<0x28, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
456 "movaps\t{$src, $dst|$dst, $src}", []>;
458 // Alias instruction to load FR32 from f128mem using movaps. Upper bits are
460 let isSimpleLoad = 1 in
461 def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
462 "movaps\t{$src, $dst|$dst, $src}",
463 [(set FR32:$dst, (alignedloadfsf32 addr:$src))]>;
465 // Alias bitwise logical operations using SSE logical ops on packed FP values.
466 let Constraints = "$src1 = $dst" in {
467 let isCommutable = 1 in {
468 def FsANDPSrr : PSI<0x54, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
469 "andps\t{$src2, $dst|$dst, $src2}",
470 [(set FR32:$dst, (X86fand FR32:$src1, FR32:$src2))]>;
471 def FsORPSrr : PSI<0x56, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
472 "orps\t{$src2, $dst|$dst, $src2}",
473 [(set FR32:$dst, (X86for FR32:$src1, FR32:$src2))]>;
474 def FsXORPSrr : PSI<0x57, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
475 "xorps\t{$src2, $dst|$dst, $src2}",
476 [(set FR32:$dst, (X86fxor FR32:$src1, FR32:$src2))]>;
479 def FsANDPSrm : PSI<0x54, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f128mem:$src2),
480 "andps\t{$src2, $dst|$dst, $src2}",
481 [(set FR32:$dst, (X86fand FR32:$src1,
482 (memopfsf32 addr:$src2)))]>;
483 def FsORPSrm : PSI<0x56, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f128mem:$src2),
484 "orps\t{$src2, $dst|$dst, $src2}",
485 [(set FR32:$dst, (X86for FR32:$src1,
486 (memopfsf32 addr:$src2)))]>;
487 def FsXORPSrm : PSI<0x57, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f128mem:$src2),
488 "xorps\t{$src2, $dst|$dst, $src2}",
489 [(set FR32:$dst, (X86fxor FR32:$src1,
490 (memopfsf32 addr:$src2)))]>;
491 let neverHasSideEffects = 1 in {
492 def FsANDNPSrr : PSI<0x55, MRMSrcReg,
493 (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
494 "andnps\t{$src2, $dst|$dst, $src2}", []>;
497 def FsANDNPSrm : PSI<0x55, MRMSrcMem,
498 (outs FR32:$dst), (ins FR32:$src1, f128mem:$src2),
499 "andnps\t{$src2, $dst|$dst, $src2}", []>;
503 /// basic_sse1_fp_binop_rm - SSE1 binops come in both scalar and vector forms.
505 /// In addition, we also have a special variant of the scalar form here to
506 /// represent the associated intrinsic operation. This form is unlike the
507 /// plain scalar form, in that it takes an entire vector (instead of a scalar)
508 /// and leaves the top elements undefined.
510 /// These three forms can each be reg+reg or reg+mem, so there are a total of
511 /// six "instructions".
513 let Constraints = "$src1 = $dst" in {
514 multiclass basic_sse1_fp_binop_rm<bits<8> opc, string OpcodeStr,
515 SDNode OpNode, Intrinsic F32Int,
516 bit Commutable = 0> {
517 // Scalar operation, reg+reg.
518 def SSrr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
519 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
520 [(set FR32:$dst, (OpNode FR32:$src1, FR32:$src2))]> {
521 let isCommutable = Commutable;
524 // Scalar operation, reg+mem.
525 def SSrm : SSI<opc, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f32mem:$src2),
526 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
527 [(set FR32:$dst, (OpNode FR32:$src1, (load addr:$src2)))]>;
529 // Vector operation, reg+reg.
530 def PSrr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
531 !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
532 [(set VR128:$dst, (v4f32 (OpNode VR128:$src1, VR128:$src2)))]> {
533 let isCommutable = Commutable;
536 // Vector operation, reg+mem.
537 def PSrm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
538 !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
539 [(set VR128:$dst, (OpNode VR128:$src1, (memopv4f32 addr:$src2)))]>;
541 // Intrinsic operation, reg+reg.
542 def SSrr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
543 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
544 [(set VR128:$dst, (F32Int VR128:$src1, VR128:$src2))]> {
545 let isCommutable = Commutable;
548 // Intrinsic operation, reg+mem.
549 def SSrm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2),
550 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
551 [(set VR128:$dst, (F32Int VR128:$src1,
552 sse_load_f32:$src2))]>;
556 // Arithmetic instructions
557 defm ADD : basic_sse1_fp_binop_rm<0x58, "add", fadd, int_x86_sse_add_ss, 1>;
558 defm MUL : basic_sse1_fp_binop_rm<0x59, "mul", fmul, int_x86_sse_mul_ss, 1>;
559 defm SUB : basic_sse1_fp_binop_rm<0x5C, "sub", fsub, int_x86_sse_sub_ss>;
560 defm DIV : basic_sse1_fp_binop_rm<0x5E, "div", fdiv, int_x86_sse_div_ss>;
562 /// sse1_fp_binop_rm - Other SSE1 binops
564 /// This multiclass is like basic_sse1_fp_binop_rm, with the addition of
565 /// instructions for a full-vector intrinsic form. Operations that map
566 /// onto C operators don't use this form since they just use the plain
567 /// vector form instead of having a separate vector intrinsic form.
569 /// This provides a total of eight "instructions".
571 let Constraints = "$src1 = $dst" in {
572 multiclass sse1_fp_binop_rm<bits<8> opc, string OpcodeStr,
576 bit Commutable = 0> {
578 // Scalar operation, reg+reg.
579 def SSrr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
580 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
581 [(set FR32:$dst, (OpNode FR32:$src1, FR32:$src2))]> {
582 let isCommutable = Commutable;
585 // Scalar operation, reg+mem.
586 def SSrm : SSI<opc, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f32mem:$src2),
587 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
588 [(set FR32:$dst, (OpNode FR32:$src1, (load addr:$src2)))]>;
590 // Vector operation, reg+reg.
591 def PSrr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
592 !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
593 [(set VR128:$dst, (v4f32 (OpNode VR128:$src1, VR128:$src2)))]> {
594 let isCommutable = Commutable;
597 // Vector operation, reg+mem.
598 def PSrm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
599 !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
600 [(set VR128:$dst, (OpNode VR128:$src1, (memopv4f32 addr:$src2)))]>;
602 // Intrinsic operation, reg+reg.
603 def SSrr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
604 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
605 [(set VR128:$dst, (F32Int VR128:$src1, VR128:$src2))]> {
606 let isCommutable = Commutable;
609 // Intrinsic operation, reg+mem.
610 def SSrm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2),
611 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
612 [(set VR128:$dst, (F32Int VR128:$src1,
613 sse_load_f32:$src2))]>;
615 // Vector intrinsic operation, reg+reg.
616 def PSrr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
617 !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
618 [(set VR128:$dst, (V4F32Int VR128:$src1, VR128:$src2))]> {
619 let isCommutable = Commutable;
622 // Vector intrinsic operation, reg+mem.
623 def PSrm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
624 !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
625 [(set VR128:$dst, (V4F32Int VR128:$src1, (load addr:$src2)))]>;
629 defm MAX : sse1_fp_binop_rm<0x5F, "max", X86fmax,
630 int_x86_sse_max_ss, int_x86_sse_max_ps>;
631 defm MIN : sse1_fp_binop_rm<0x5D, "min", X86fmin,
632 int_x86_sse_min_ss, int_x86_sse_min_ps>;
634 //===----------------------------------------------------------------------===//
635 // SSE packed FP Instructions
638 let neverHasSideEffects = 1 in
639 def MOVAPSrr : PSI<0x28, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
640 "movaps\t{$src, $dst|$dst, $src}", []>;
641 let isSimpleLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in
642 def MOVAPSrm : PSI<0x28, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
643 "movaps\t{$src, $dst|$dst, $src}",
644 [(set VR128:$dst, (alignedloadv4f32 addr:$src))]>;
646 def MOVAPSmr : PSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
647 "movaps\t{$src, $dst|$dst, $src}",
648 [(alignedstore (v4f32 VR128:$src), addr:$dst)]>;
650 let neverHasSideEffects = 1 in
651 def MOVUPSrr : PSI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
652 "movups\t{$src, $dst|$dst, $src}", []>;
653 let isSimpleLoad = 1 in
654 def MOVUPSrm : PSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
655 "movups\t{$src, $dst|$dst, $src}",
656 [(set VR128:$dst, (loadv4f32 addr:$src))]>;
657 def MOVUPSmr : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
658 "movups\t{$src, $dst|$dst, $src}",
659 [(store (v4f32 VR128:$src), addr:$dst)]>;
661 // Intrinsic forms of MOVUPS load and store
662 let isSimpleLoad = 1 in
663 def MOVUPSrm_Int : PSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
664 "movups\t{$src, $dst|$dst, $src}",
665 [(set VR128:$dst, (int_x86_sse_loadu_ps addr:$src))]>;
666 def MOVUPSmr_Int : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
667 "movups\t{$src, $dst|$dst, $src}",
668 [(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>;
670 let Constraints = "$src1 = $dst" in {
671 let AddedComplexity = 20 in {
672 def MOVLPSrm : PSI<0x12, MRMSrcMem,
673 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
674 "movlps\t{$src2, $dst|$dst, $src2}",
676 (v4f32 (vector_shuffle VR128:$src1,
677 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2)))),
678 MOVLP_shuffle_mask)))]>;
679 def MOVHPSrm : PSI<0x16, MRMSrcMem,
680 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
681 "movhps\t{$src2, $dst|$dst, $src2}",
683 (v4f32 (vector_shuffle VR128:$src1,
684 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2)))),
685 MOVHP_shuffle_mask)))]>;
687 } // Constraints = "$src1 = $dst"
689 def MOVLPSmr : PSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
690 "movlps\t{$src, $dst|$dst, $src}",
691 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
692 (iPTR 0))), addr:$dst)]>;
694 // v2f64 extract element 1 is always custom lowered to unpack high to low
695 // and extract element 0 so the non-store version isn't too horrible.
696 def MOVHPSmr : PSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
697 "movhps\t{$src, $dst|$dst, $src}",
698 [(store (f64 (vector_extract
699 (v2f64 (vector_shuffle
700 (bc_v2f64 (v4f32 VR128:$src)), (undef),
701 UNPCKH_shuffle_mask)), (iPTR 0))),
704 let Constraints = "$src1 = $dst" in {
705 let AddedComplexity = 15 in {
706 def MOVLHPSrr : PSI<0x16, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
707 "movlhps\t{$src2, $dst|$dst, $src2}",
709 (v4f32 (vector_shuffle VR128:$src1, VR128:$src2,
710 MOVHP_shuffle_mask)))]>;
712 def MOVHLPSrr : PSI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
713 "movhlps\t{$src2, $dst|$dst, $src2}",
715 (v4f32 (vector_shuffle VR128:$src1, VR128:$src2,
716 MOVHLPS_shuffle_mask)))]>;
718 } // Constraints = "$src1 = $dst"
724 /// sse1_fp_unop_rm - SSE1 unops come in both scalar and vector forms.
726 /// In addition, we also have a special variant of the scalar form here to
727 /// represent the associated intrinsic operation. This form is unlike the
728 /// plain scalar form, in that it takes an entire vector (instead of a
729 /// scalar) and leaves the top elements undefined.
731 /// And, we have a special variant form for a full-vector intrinsic form.
733 /// These four forms can each have a reg or a mem operand, so there are a
734 /// total of eight "instructions".
736 multiclass sse1_fp_unop_rm<bits<8> opc, string OpcodeStr,
740 bit Commutable = 0> {
741 // Scalar operation, reg.
742 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
743 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
744 [(set FR32:$dst, (OpNode FR32:$src))]> {
745 let isCommutable = Commutable;
748 // Scalar operation, mem.
749 def SSm : SSI<opc, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
750 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
751 [(set FR32:$dst, (OpNode (load addr:$src)))]>;
753 // Vector operation, reg.
754 def PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
755 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
756 [(set VR128:$dst, (v4f32 (OpNode VR128:$src)))]> {
757 let isCommutable = Commutable;
760 // Vector operation, mem.
761 def PSm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
762 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
763 [(set VR128:$dst, (OpNode (memopv4f32 addr:$src)))]>;
765 // Intrinsic operation, reg.
766 def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
767 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
768 [(set VR128:$dst, (F32Int VR128:$src))]> {
769 let isCommutable = Commutable;
772 // Intrinsic operation, mem.
773 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins ssmem:$src),
774 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
775 [(set VR128:$dst, (F32Int sse_load_f32:$src))]>;
777 // Vector intrinsic operation, reg
778 def PSr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
779 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
780 [(set VR128:$dst, (V4F32Int VR128:$src))]> {
781 let isCommutable = Commutable;
784 // Vector intrinsic operation, mem
785 def PSm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
786 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
787 [(set VR128:$dst, (V4F32Int (load addr:$src)))]>;
791 defm SQRT : sse1_fp_unop_rm<0x51, "sqrt", fsqrt,
792 int_x86_sse_sqrt_ss, int_x86_sse_sqrt_ps>;
794 // Reciprocal approximations. Note that these typically require refinement
795 // in order to obtain suitable precision.
796 defm RSQRT : sse1_fp_unop_rm<0x52, "rsqrt", X86frsqrt,
797 int_x86_sse_rsqrt_ss, int_x86_sse_rsqrt_ps>;
798 defm RCP : sse1_fp_unop_rm<0x53, "rcp", X86frcp,
799 int_x86_sse_rcp_ss, int_x86_sse_rcp_ps>;
802 let Constraints = "$src1 = $dst" in {
803 let isCommutable = 1 in {
804 def ANDPSrr : PSI<0x54, MRMSrcReg,
805 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
806 "andps\t{$src2, $dst|$dst, $src2}",
807 [(set VR128:$dst, (v2i64
808 (and VR128:$src1, VR128:$src2)))]>;
809 def ORPSrr : PSI<0x56, MRMSrcReg,
810 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
811 "orps\t{$src2, $dst|$dst, $src2}",
812 [(set VR128:$dst, (v2i64
813 (or VR128:$src1, VR128:$src2)))]>;
814 def XORPSrr : PSI<0x57, MRMSrcReg,
815 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
816 "xorps\t{$src2, $dst|$dst, $src2}",
817 [(set VR128:$dst, (v2i64
818 (xor VR128:$src1, VR128:$src2)))]>;
821 def ANDPSrm : PSI<0x54, MRMSrcMem,
822 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
823 "andps\t{$src2, $dst|$dst, $src2}",
824 [(set VR128:$dst, (and (bc_v2i64 (v4f32 VR128:$src1)),
825 (memopv2i64 addr:$src2)))]>;
826 def ORPSrm : PSI<0x56, MRMSrcMem,
827 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
828 "orps\t{$src2, $dst|$dst, $src2}",
829 [(set VR128:$dst, (or (bc_v2i64 (v4f32 VR128:$src1)),
830 (memopv2i64 addr:$src2)))]>;
831 def XORPSrm : PSI<0x57, MRMSrcMem,
832 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
833 "xorps\t{$src2, $dst|$dst, $src2}",
834 [(set VR128:$dst, (xor (bc_v2i64 (v4f32 VR128:$src1)),
835 (memopv2i64 addr:$src2)))]>;
836 def ANDNPSrr : PSI<0x55, MRMSrcReg,
837 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
838 "andnps\t{$src2, $dst|$dst, $src2}",
840 (v2i64 (and (xor VR128:$src1,
841 (bc_v2i64 (v4i32 immAllOnesV))),
843 def ANDNPSrm : PSI<0x55, MRMSrcMem,
844 (outs VR128:$dst), (ins VR128:$src1,f128mem:$src2),
845 "andnps\t{$src2, $dst|$dst, $src2}",
847 (v2i64 (and (xor (bc_v2i64 (v4f32 VR128:$src1)),
848 (bc_v2i64 (v4i32 immAllOnesV))),
849 (memopv2i64 addr:$src2))))]>;
852 let Constraints = "$src1 = $dst" in {
853 def CMPPSrri : PSIi8<0xC2, MRMSrcReg,
854 (outs VR128:$dst), (ins VR128:$src1, VR128:$src, SSECC:$cc),
855 "cmp${cc}ps\t{$src, $dst|$dst, $src}",
856 [(set VR128:$dst, (int_x86_sse_cmp_ps VR128:$src1,
857 VR128:$src, imm:$cc))]>;
858 def CMPPSrmi : PSIi8<0xC2, MRMSrcMem,
859 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src, SSECC:$cc),
860 "cmp${cc}ps\t{$src, $dst|$dst, $src}",
861 [(set VR128:$dst, (int_x86_sse_cmp_ps VR128:$src1,
862 (load addr:$src), imm:$cc))]>;
865 // Shuffle and unpack instructions
866 let Constraints = "$src1 = $dst" in {
867 let isConvertibleToThreeAddress = 1 in // Convert to pshufd
868 def SHUFPSrri : PSIi8<0xC6, MRMSrcReg,
869 (outs VR128:$dst), (ins VR128:$src1,
870 VR128:$src2, i32i8imm:$src3),
871 "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
873 (v4f32 (vector_shuffle
874 VR128:$src1, VR128:$src2,
875 SHUFP_shuffle_mask:$src3)))]>;
876 def SHUFPSrmi : PSIi8<0xC6, MRMSrcMem,
877 (outs VR128:$dst), (ins VR128:$src1,
878 f128mem:$src2, i32i8imm:$src3),
879 "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
881 (v4f32 (vector_shuffle
882 VR128:$src1, (memopv4f32 addr:$src2),
883 SHUFP_shuffle_mask:$src3)))]>;
885 let AddedComplexity = 10 in {
886 def UNPCKHPSrr : PSI<0x15, MRMSrcReg,
887 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
888 "unpckhps\t{$src2, $dst|$dst, $src2}",
890 (v4f32 (vector_shuffle
891 VR128:$src1, VR128:$src2,
892 UNPCKH_shuffle_mask)))]>;
893 def UNPCKHPSrm : PSI<0x15, MRMSrcMem,
894 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
895 "unpckhps\t{$src2, $dst|$dst, $src2}",
897 (v4f32 (vector_shuffle
898 VR128:$src1, (memopv4f32 addr:$src2),
899 UNPCKH_shuffle_mask)))]>;
901 def UNPCKLPSrr : PSI<0x14, MRMSrcReg,
902 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
903 "unpcklps\t{$src2, $dst|$dst, $src2}",
905 (v4f32 (vector_shuffle
906 VR128:$src1, VR128:$src2,
907 UNPCKL_shuffle_mask)))]>;
908 def UNPCKLPSrm : PSI<0x14, MRMSrcMem,
909 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
910 "unpcklps\t{$src2, $dst|$dst, $src2}",
912 (v4f32 (vector_shuffle
913 VR128:$src1, (memopv4f32 addr:$src2),
914 UNPCKL_shuffle_mask)))]>;
916 } // Constraints = "$src1 = $dst"
919 def MOVMSKPSrr : PSI<0x50, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
920 "movmskps\t{$src, $dst|$dst, $src}",
921 [(set GR32:$dst, (int_x86_sse_movmsk_ps VR128:$src))]>;
922 def MOVMSKPDrr : PSI<0x50, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
923 "movmskpd\t{$src, $dst|$dst, $src}",
924 [(set GR32:$dst, (int_x86_sse2_movmsk_pd VR128:$src))]>;
926 // Prefetch intrinsic.
927 def PREFETCHT0 : PSI<0x18, MRM1m, (outs), (ins i8mem:$src),
928 "prefetcht0\t$src", [(prefetch addr:$src, imm, (i32 3))]>;
929 def PREFETCHT1 : PSI<0x18, MRM2m, (outs), (ins i8mem:$src),
930 "prefetcht1\t$src", [(prefetch addr:$src, imm, (i32 2))]>;
931 def PREFETCHT2 : PSI<0x18, MRM3m, (outs), (ins i8mem:$src),
932 "prefetcht2\t$src", [(prefetch addr:$src, imm, (i32 1))]>;
933 def PREFETCHNTA : PSI<0x18, MRM0m, (outs), (ins i8mem:$src),
934 "prefetchnta\t$src", [(prefetch addr:$src, imm, (i32 0))]>;
936 // Non-temporal stores
937 def MOVNTPSmr : PSI<0x2B, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
938 "movntps\t{$src, $dst|$dst, $src}",
939 [(int_x86_sse_movnt_ps addr:$dst, VR128:$src)]>;
941 // Load, store, and memory fence
942 def SFENCE : PSI<0xAE, MRM7m, (outs), (ins), "sfence", [(int_x86_sse_sfence)]>;
945 def LDMXCSR : PSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
946 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>;
947 def STMXCSR : PSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
948 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>;
950 // Alias instructions that map zero vector to pxor / xorp* for sse.
951 let isReMaterializable = 1 in
952 def V_SET0 : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins),
954 [(set VR128:$dst, (v4i32 immAllZerosV))]>;
956 let Predicates = [HasSSE1] in {
957 def : Pat<(v2i64 immAllZerosV), (V_SET0)>;
958 def : Pat<(v8i16 immAllZerosV), (V_SET0)>;
959 def : Pat<(v16i8 immAllZerosV), (V_SET0)>;
960 def : Pat<(v2f64 immAllZerosV), (V_SET0)>;
961 def : Pat<(v4f32 immAllZerosV), (V_SET0)>;
964 // FR32 to 128-bit vector conversion.
965 def MOVSS2PSrr : SSI<0x10, MRMSrcReg, (outs VR128:$dst), (ins FR32:$src),
966 "movss\t{$src, $dst|$dst, $src}",
968 (v4f32 (scalar_to_vector FR32:$src)))]>;
969 def MOVSS2PSrm : SSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f32mem:$src),
970 "movss\t{$src, $dst|$dst, $src}",
972 (v4f32 (scalar_to_vector (loadf32 addr:$src))))]>;
974 // FIXME: may not be able to eliminate this movss with coalescing the src and
975 // dest register classes are different. We really want to write this pattern
977 // def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
979 def MOVPS2SSrr : SSI<0x10, MRMSrcReg, (outs FR32:$dst), (ins VR128:$src),
980 "movss\t{$src, $dst|$dst, $src}",
981 [(set FR32:$dst, (vector_extract (v4f32 VR128:$src),
983 def MOVPS2SSmr : SSI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, VR128:$src),
984 "movss\t{$src, $dst|$dst, $src}",
985 [(store (f32 (vector_extract (v4f32 VR128:$src),
986 (iPTR 0))), addr:$dst)]>;
989 // Move to lower bits of a VR128, leaving upper bits alone.
990 // Three operand (but two address) aliases.
991 let Constraints = "$src1 = $dst" in {
992 let neverHasSideEffects = 1 in
993 def MOVLSS2PSrr : SSI<0x10, MRMSrcReg,
994 (outs VR128:$dst), (ins VR128:$src1, FR32:$src2),
995 "movss\t{$src2, $dst|$dst, $src2}", []>;
997 let AddedComplexity = 15 in
998 def MOVLPSrr : SSI<0x10, MRMSrcReg,
999 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1000 "movss\t{$src2, $dst|$dst, $src2}",
1002 (v4f32 (vector_shuffle VR128:$src1, VR128:$src2,
1003 MOVL_shuffle_mask)))]>;
1006 // Move to lower bits of a VR128 and zeroing upper bits.
1007 // Loading from memory automatically zeroing upper bits.
1008 let AddedComplexity = 20 in
1009 def MOVZSS2PSrm : SSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f32mem:$src),
1010 "movss\t{$src, $dst|$dst, $src}",
1011 [(set VR128:$dst, (v4f32 (X86zvmovl (v4f32 (scalar_to_vector
1012 (loadf32 addr:$src))))))]>;
1014 def : Pat<(v4f32 (X86zvmovl (memopv4f32 addr:$src))),
1015 (MOVZSS2PSrm addr:$src)>;
1017 //===----------------------------------------------------------------------===//
1018 // SSE2 Instructions
1019 //===----------------------------------------------------------------------===//
1021 // Move Instructions
1022 let neverHasSideEffects = 1 in
1023 def MOVSDrr : SDI<0x10, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
1024 "movsd\t{$src, $dst|$dst, $src}", []>;
1025 let isSimpleLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in
1026 def MOVSDrm : SDI<0x10, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
1027 "movsd\t{$src, $dst|$dst, $src}",
1028 [(set FR64:$dst, (loadf64 addr:$src))]>;
1029 def MOVSDmr : SDI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
1030 "movsd\t{$src, $dst|$dst, $src}",
1031 [(store FR64:$src, addr:$dst)]>;
1033 // Conversion instructions
1034 def CVTTSD2SIrr : SDI<0x2C, MRMSrcReg, (outs GR32:$dst), (ins FR64:$src),
1035 "cvttsd2si\t{$src, $dst|$dst, $src}",
1036 [(set GR32:$dst, (fp_to_sint FR64:$src))]>;
1037 def CVTTSD2SIrm : SDI<0x2C, MRMSrcMem, (outs GR32:$dst), (ins f64mem:$src),
1038 "cvttsd2si\t{$src, $dst|$dst, $src}",
1039 [(set GR32:$dst, (fp_to_sint (loadf64 addr:$src)))]>;
1040 def CVTSD2SSrr : SDI<0x5A, MRMSrcReg, (outs FR32:$dst), (ins FR64:$src),
1041 "cvtsd2ss\t{$src, $dst|$dst, $src}",
1042 [(set FR32:$dst, (fround FR64:$src))]>;
1043 def CVTSD2SSrm : SDI<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src),
1044 "cvtsd2ss\t{$src, $dst|$dst, $src}",
1045 [(set FR32:$dst, (fround (loadf64 addr:$src)))]>;
1046 def CVTSI2SDrr : SDI<0x2A, MRMSrcReg, (outs FR64:$dst), (ins GR32:$src),
1047 "cvtsi2sd\t{$src, $dst|$dst, $src}",
1048 [(set FR64:$dst, (sint_to_fp GR32:$src))]>;
1049 def CVTSI2SDrm : SDI<0x2A, MRMSrcMem, (outs FR64:$dst), (ins i32mem:$src),
1050 "cvtsi2sd\t{$src, $dst|$dst, $src}",
1051 [(set FR64:$dst, (sint_to_fp (loadi32 addr:$src)))]>;
1053 // SSE2 instructions with XS prefix
1054 def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src),
1055 "cvtss2sd\t{$src, $dst|$dst, $src}",
1056 [(set FR64:$dst, (fextend FR32:$src))]>, XS,
1057 Requires<[HasSSE2]>;
1058 def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src),
1059 "cvtss2sd\t{$src, $dst|$dst, $src}",
1060 [(set FR64:$dst, (extloadf32 addr:$src))]>, XS,
1061 Requires<[HasSSE2]>;
1063 // Match intrinsics which expect XMM operand(s).
1064 def Int_CVTSD2SIrr : SDI<0x2D, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
1065 "cvtsd2si\t{$src, $dst|$dst, $src}",
1066 [(set GR32:$dst, (int_x86_sse2_cvtsd2si VR128:$src))]>;
1067 def Int_CVTSD2SIrm : SDI<0x2D, MRMSrcMem, (outs GR32:$dst), (ins f128mem:$src),
1068 "cvtsd2si\t{$src, $dst|$dst, $src}",
1069 [(set GR32:$dst, (int_x86_sse2_cvtsd2si
1070 (load addr:$src)))]>;
1072 // Match intrinisics which expect MM and XMM operand(s).
1073 def Int_CVTPD2PIrr : PDI<0x2D, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
1074 "cvtpd2pi\t{$src, $dst|$dst, $src}",
1075 [(set VR64:$dst, (int_x86_sse_cvtpd2pi VR128:$src))]>;
1076 def Int_CVTPD2PIrm : PDI<0x2D, MRMSrcMem, (outs VR64:$dst), (ins f128mem:$src),
1077 "cvtpd2pi\t{$src, $dst|$dst, $src}",
1078 [(set VR64:$dst, (int_x86_sse_cvtpd2pi
1079 (load addr:$src)))]>;
1080 def Int_CVTTPD2PIrr: PDI<0x2C, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
1081 "cvttpd2pi\t{$src, $dst|$dst, $src}",
1082 [(set VR64:$dst, (int_x86_sse_cvttpd2pi VR128:$src))]>;
1083 def Int_CVTTPD2PIrm: PDI<0x2C, MRMSrcMem, (outs VR64:$dst), (ins f128mem:$src),
1084 "cvttpd2pi\t{$src, $dst|$dst, $src}",
1085 [(set VR64:$dst, (int_x86_sse_cvttpd2pi
1086 (load addr:$src)))]>;
1087 def Int_CVTPI2PDrr : PDI<0x2A, MRMSrcReg, (outs VR128:$dst), (ins VR64:$src),
1088 "cvtpi2pd\t{$src, $dst|$dst, $src}",
1089 [(set VR128:$dst, (int_x86_sse_cvtpi2pd VR64:$src))]>;
1090 def Int_CVTPI2PDrm : PDI<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
1091 "cvtpi2pd\t{$src, $dst|$dst, $src}",
1092 [(set VR128:$dst, (int_x86_sse_cvtpi2pd
1093 (load addr:$src)))]>;
1095 // Aliases for intrinsics
1096 def Int_CVTTSD2SIrr : SDI<0x2C, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
1097 "cvttsd2si\t{$src, $dst|$dst, $src}",
1099 (int_x86_sse2_cvttsd2si VR128:$src))]>;
1100 def Int_CVTTSD2SIrm : SDI<0x2C, MRMSrcMem, (outs GR32:$dst), (ins f128mem:$src),
1101 "cvttsd2si\t{$src, $dst|$dst, $src}",
1102 [(set GR32:$dst, (int_x86_sse2_cvttsd2si
1103 (load addr:$src)))]>;
1105 // Comparison instructions
1106 let Constraints = "$src1 = $dst", neverHasSideEffects = 1 in {
1107 def CMPSDrr : SDIi8<0xC2, MRMSrcReg,
1108 (outs FR64:$dst), (ins FR64:$src1, FR64:$src, SSECC:$cc),
1109 "cmp${cc}sd\t{$src, $dst|$dst, $src}", []>;
1111 def CMPSDrm : SDIi8<0xC2, MRMSrcMem,
1112 (outs FR64:$dst), (ins FR64:$src1, f64mem:$src, SSECC:$cc),
1113 "cmp${cc}sd\t{$src, $dst|$dst, $src}", []>;
1116 let Defs = [EFLAGS] in {
1117 def UCOMISDrr: PDI<0x2E, MRMSrcReg, (outs), (ins FR64:$src1, FR64:$src2),
1118 "ucomisd\t{$src2, $src1|$src1, $src2}",
1119 [(X86cmp FR64:$src1, FR64:$src2), (implicit EFLAGS)]>;
1120 def UCOMISDrm: PDI<0x2E, MRMSrcMem, (outs), (ins FR64:$src1, f64mem:$src2),
1121 "ucomisd\t{$src2, $src1|$src1, $src2}",
1122 [(X86cmp FR64:$src1, (loadf64 addr:$src2)),
1123 (implicit EFLAGS)]>;
1126 // Aliases to match intrinsics which expect XMM operand(s).
1127 let Constraints = "$src1 = $dst" in {
1128 def Int_CMPSDrr : SDIi8<0xC2, MRMSrcReg,
1129 (outs VR128:$dst), (ins VR128:$src1, VR128:$src, SSECC:$cc),
1130 "cmp${cc}sd\t{$src, $dst|$dst, $src}",
1131 [(set VR128:$dst, (int_x86_sse2_cmp_sd VR128:$src1,
1132 VR128:$src, imm:$cc))]>;
1133 def Int_CMPSDrm : SDIi8<0xC2, MRMSrcMem,
1134 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src, SSECC:$cc),
1135 "cmp${cc}sd\t{$src, $dst|$dst, $src}",
1136 [(set VR128:$dst, (int_x86_sse2_cmp_sd VR128:$src1,
1137 (load addr:$src), imm:$cc))]>;
1140 let Defs = [EFLAGS] in {
1141 def Int_UCOMISDrr: PDI<0x2E, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
1142 "ucomisd\t{$src2, $src1|$src1, $src2}",
1143 [(X86ucomi (v2f64 VR128:$src1), (v2f64 VR128:$src2)),
1144 (implicit EFLAGS)]>;
1145 def Int_UCOMISDrm: PDI<0x2E, MRMSrcMem, (outs),(ins VR128:$src1, f128mem:$src2),
1146 "ucomisd\t{$src2, $src1|$src1, $src2}",
1147 [(X86ucomi (v2f64 VR128:$src1), (load addr:$src2)),
1148 (implicit EFLAGS)]>;
1150 def Int_COMISDrr: PDI<0x2F, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
1151 "comisd\t{$src2, $src1|$src1, $src2}",
1152 [(X86comi (v2f64 VR128:$src1), (v2f64 VR128:$src2)),
1153 (implicit EFLAGS)]>;
1154 def Int_COMISDrm: PDI<0x2F, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
1155 "comisd\t{$src2, $src1|$src1, $src2}",
1156 [(X86comi (v2f64 VR128:$src1), (load addr:$src2)),
1157 (implicit EFLAGS)]>;
1160 // Aliases of packed SSE2 instructions for scalar use. These all have names that
1163 // Alias instructions that map fld0 to pxor for sse.
1164 let isReMaterializable = 1 in
1165 def FsFLD0SD : I<0xEF, MRMInitReg, (outs FR64:$dst), (ins),
1166 "pxor\t$dst, $dst", [(set FR64:$dst, fpimm0)]>,
1167 Requires<[HasSSE2]>, TB, OpSize;
1169 // Alias instruction to do FR64 reg-to-reg copy using movapd. Upper bits are
1171 let neverHasSideEffects = 1 in
1172 def FsMOVAPDrr : PDI<0x28, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
1173 "movapd\t{$src, $dst|$dst, $src}", []>;
1175 // Alias instruction to load FR64 from f128mem using movapd. Upper bits are
1177 let isSimpleLoad = 1 in
1178 def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
1179 "movapd\t{$src, $dst|$dst, $src}",
1180 [(set FR64:$dst, (alignedloadfsf64 addr:$src))]>;
1182 // Alias bitwise logical operations using SSE logical ops on packed FP values.
1183 let Constraints = "$src1 = $dst" in {
1184 let isCommutable = 1 in {
1185 def FsANDPDrr : PDI<0x54, MRMSrcReg, (outs FR64:$dst),
1186 (ins FR64:$src1, FR64:$src2),
1187 "andpd\t{$src2, $dst|$dst, $src2}",
1188 [(set FR64:$dst, (X86fand FR64:$src1, FR64:$src2))]>;
1189 def FsORPDrr : PDI<0x56, MRMSrcReg, (outs FR64:$dst),
1190 (ins FR64:$src1, FR64:$src2),
1191 "orpd\t{$src2, $dst|$dst, $src2}",
1192 [(set FR64:$dst, (X86for FR64:$src1, FR64:$src2))]>;
1193 def FsXORPDrr : PDI<0x57, MRMSrcReg, (outs FR64:$dst),
1194 (ins FR64:$src1, FR64:$src2),
1195 "xorpd\t{$src2, $dst|$dst, $src2}",
1196 [(set FR64:$dst, (X86fxor FR64:$src1, FR64:$src2))]>;
1199 def FsANDPDrm : PDI<0x54, MRMSrcMem, (outs FR64:$dst),
1200 (ins FR64:$src1, f128mem:$src2),
1201 "andpd\t{$src2, $dst|$dst, $src2}",
1202 [(set FR64:$dst, (X86fand FR64:$src1,
1203 (memopfsf64 addr:$src2)))]>;
1204 def FsORPDrm : PDI<0x56, MRMSrcMem, (outs FR64:$dst),
1205 (ins FR64:$src1, f128mem:$src2),
1206 "orpd\t{$src2, $dst|$dst, $src2}",
1207 [(set FR64:$dst, (X86for FR64:$src1,
1208 (memopfsf64 addr:$src2)))]>;
1209 def FsXORPDrm : PDI<0x57, MRMSrcMem, (outs FR64:$dst),
1210 (ins FR64:$src1, f128mem:$src2),
1211 "xorpd\t{$src2, $dst|$dst, $src2}",
1212 [(set FR64:$dst, (X86fxor FR64:$src1,
1213 (memopfsf64 addr:$src2)))]>;
1215 let neverHasSideEffects = 1 in {
1216 def FsANDNPDrr : PDI<0x55, MRMSrcReg,
1217 (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
1218 "andnpd\t{$src2, $dst|$dst, $src2}", []>;
1220 def FsANDNPDrm : PDI<0x55, MRMSrcMem,
1221 (outs FR64:$dst), (ins FR64:$src1, f128mem:$src2),
1222 "andnpd\t{$src2, $dst|$dst, $src2}", []>;
1226 /// basic_sse2_fp_binop_rm - SSE2 binops come in both scalar and vector forms.
1228 /// In addition, we also have a special variant of the scalar form here to
1229 /// represent the associated intrinsic operation. This form is unlike the
1230 /// plain scalar form, in that it takes an entire vector (instead of a scalar)
1231 /// and leaves the top elements undefined.
1233 /// These three forms can each be reg+reg or reg+mem, so there are a total of
1234 /// six "instructions".
1236 let Constraints = "$src1 = $dst" in {
1237 multiclass basic_sse2_fp_binop_rm<bits<8> opc, string OpcodeStr,
1238 SDNode OpNode, Intrinsic F64Int,
1239 bit Commutable = 0> {
1240 // Scalar operation, reg+reg.
1241 def SDrr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
1242 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
1243 [(set FR64:$dst, (OpNode FR64:$src1, FR64:$src2))]> {
1244 let isCommutable = Commutable;
1247 // Scalar operation, reg+mem.
1248 def SDrm : SDI<opc, MRMSrcMem, (outs FR64:$dst), (ins FR64:$src1, f64mem:$src2),
1249 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
1250 [(set FR64:$dst, (OpNode FR64:$src1, (load addr:$src2)))]>;
1252 // Vector operation, reg+reg.
1253 def PDrr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1254 !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
1255 [(set VR128:$dst, (v2f64 (OpNode VR128:$src1, VR128:$src2)))]> {
1256 let isCommutable = Commutable;
1259 // Vector operation, reg+mem.
1260 def PDrm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
1261 !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
1262 [(set VR128:$dst, (OpNode VR128:$src1, (memopv2f64 addr:$src2)))]>;
1264 // Intrinsic operation, reg+reg.
1265 def SDrr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1266 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
1267 [(set VR128:$dst, (F64Int VR128:$src1, VR128:$src2))]> {
1268 let isCommutable = Commutable;
1271 // Intrinsic operation, reg+mem.
1272 def SDrm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2),
1273 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
1274 [(set VR128:$dst, (F64Int VR128:$src1,
1275 sse_load_f64:$src2))]>;
1279 // Arithmetic instructions
1280 defm ADD : basic_sse2_fp_binop_rm<0x58, "add", fadd, int_x86_sse2_add_sd, 1>;
1281 defm MUL : basic_sse2_fp_binop_rm<0x59, "mul", fmul, int_x86_sse2_mul_sd, 1>;
1282 defm SUB : basic_sse2_fp_binop_rm<0x5C, "sub", fsub, int_x86_sse2_sub_sd>;
1283 defm DIV : basic_sse2_fp_binop_rm<0x5E, "div", fdiv, int_x86_sse2_div_sd>;
1285 /// sse2_fp_binop_rm - Other SSE2 binops
1287 /// This multiclass is like basic_sse2_fp_binop_rm, with the addition of
1288 /// instructions for a full-vector intrinsic form. Operations that map
1289 /// onto C operators don't use this form since they just use the plain
1290 /// vector form instead of having a separate vector intrinsic form.
1292 /// This provides a total of eight "instructions".
1294 let Constraints = "$src1 = $dst" in {
1295 multiclass sse2_fp_binop_rm<bits<8> opc, string OpcodeStr,
1299 bit Commutable = 0> {
1301 // Scalar operation, reg+reg.
1302 def SDrr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
1303 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
1304 [(set FR64:$dst, (OpNode FR64:$src1, FR64:$src2))]> {
1305 let isCommutable = Commutable;
1308 // Scalar operation, reg+mem.
1309 def SDrm : SDI<opc, MRMSrcMem, (outs FR64:$dst), (ins FR64:$src1, f64mem:$src2),
1310 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
1311 [(set FR64:$dst, (OpNode FR64:$src1, (load addr:$src2)))]>;
1313 // Vector operation, reg+reg.
1314 def PDrr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1315 !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
1316 [(set VR128:$dst, (v2f64 (OpNode VR128:$src1, VR128:$src2)))]> {
1317 let isCommutable = Commutable;
1320 // Vector operation, reg+mem.
1321 def PDrm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
1322 !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
1323 [(set VR128:$dst, (OpNode VR128:$src1, (memopv2f64 addr:$src2)))]>;
1325 // Intrinsic operation, reg+reg.
1326 def SDrr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1327 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
1328 [(set VR128:$dst, (F64Int VR128:$src1, VR128:$src2))]> {
1329 let isCommutable = Commutable;
1332 // Intrinsic operation, reg+mem.
1333 def SDrm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2),
1334 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
1335 [(set VR128:$dst, (F64Int VR128:$src1,
1336 sse_load_f64:$src2))]>;
1338 // Vector intrinsic operation, reg+reg.
1339 def PDrr_Int : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1340 !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
1341 [(set VR128:$dst, (V2F64Int VR128:$src1, VR128:$src2))]> {
1342 let isCommutable = Commutable;
1345 // Vector intrinsic operation, reg+mem.
1346 def PDrm_Int : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
1347 !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
1348 [(set VR128:$dst, (V2F64Int VR128:$src1, (load addr:$src2)))]>;
1352 defm MAX : sse2_fp_binop_rm<0x5F, "max", X86fmax,
1353 int_x86_sse2_max_sd, int_x86_sse2_max_pd>;
1354 defm MIN : sse2_fp_binop_rm<0x5D, "min", X86fmin,
1355 int_x86_sse2_min_sd, int_x86_sse2_min_pd>;
1357 //===----------------------------------------------------------------------===//
1358 // SSE packed FP Instructions
1360 // Move Instructions
1361 let neverHasSideEffects = 1 in
1362 def MOVAPDrr : PDI<0x28, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1363 "movapd\t{$src, $dst|$dst, $src}", []>;
1364 let isSimpleLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in
1365 def MOVAPDrm : PDI<0x28, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1366 "movapd\t{$src, $dst|$dst, $src}",
1367 [(set VR128:$dst, (alignedloadv2f64 addr:$src))]>;
1369 def MOVAPDmr : PDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
1370 "movapd\t{$src, $dst|$dst, $src}",
1371 [(alignedstore (v2f64 VR128:$src), addr:$dst)]>;
1373 let neverHasSideEffects = 1 in
1374 def MOVUPDrr : PDI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1375 "movupd\t{$src, $dst|$dst, $src}", []>;
1376 let isSimpleLoad = 1 in
1377 def MOVUPDrm : PDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1378 "movupd\t{$src, $dst|$dst, $src}",
1379 [(set VR128:$dst, (loadv2f64 addr:$src))]>;
1380 def MOVUPDmr : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
1381 "movupd\t{$src, $dst|$dst, $src}",
1382 [(store (v2f64 VR128:$src), addr:$dst)]>;
1384 // Intrinsic forms of MOVUPD load and store
1385 def MOVUPDrm_Int : PDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1386 "movupd\t{$src, $dst|$dst, $src}",
1387 [(set VR128:$dst, (int_x86_sse2_loadu_pd addr:$src))]>;
1388 def MOVUPDmr_Int : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
1389 "movupd\t{$src, $dst|$dst, $src}",
1390 [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>;
1392 let Constraints = "$src1 = $dst" in {
1393 let AddedComplexity = 20 in {
1394 def MOVLPDrm : PDI<0x12, MRMSrcMem,
1395 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
1396 "movlpd\t{$src2, $dst|$dst, $src2}",
1398 (v2f64 (vector_shuffle VR128:$src1,
1399 (scalar_to_vector (loadf64 addr:$src2)),
1400 MOVLP_shuffle_mask)))]>;
1401 def MOVHPDrm : PDI<0x16, MRMSrcMem,
1402 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
1403 "movhpd\t{$src2, $dst|$dst, $src2}",
1405 (v2f64 (vector_shuffle VR128:$src1,
1406 (scalar_to_vector (loadf64 addr:$src2)),
1407 MOVHP_shuffle_mask)))]>;
1408 } // AddedComplexity
1409 } // Constraints = "$src1 = $dst"
1411 def MOVLPDmr : PDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1412 "movlpd\t{$src, $dst|$dst, $src}",
1413 [(store (f64 (vector_extract (v2f64 VR128:$src),
1414 (iPTR 0))), addr:$dst)]>;
1416 // v2f64 extract element 1 is always custom lowered to unpack high to low
1417 // and extract element 0 so the non-store version isn't too horrible.
1418 def MOVHPDmr : PDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1419 "movhpd\t{$src, $dst|$dst, $src}",
1420 [(store (f64 (vector_extract
1421 (v2f64 (vector_shuffle VR128:$src, (undef),
1422 UNPCKH_shuffle_mask)), (iPTR 0))),
1425 // SSE2 instructions without OpSize prefix
1426 def Int_CVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1427 "cvtdq2ps\t{$src, $dst|$dst, $src}",
1428 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
1429 TB, Requires<[HasSSE2]>;
1430 def Int_CVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
1431 "cvtdq2ps\t{$src, $dst|$dst, $src}",
1432 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps
1433 (bitconvert (memopv2i64 addr:$src))))]>,
1434 TB, Requires<[HasSSE2]>;
1436 // SSE2 instructions with XS prefix
1437 def Int_CVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1438 "cvtdq2pd\t{$src, $dst|$dst, $src}",
1439 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
1440 XS, Requires<[HasSSE2]>;
1441 def Int_CVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
1442 "cvtdq2pd\t{$src, $dst|$dst, $src}",
1443 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd
1444 (bitconvert (memopv2i64 addr:$src))))]>,
1445 XS, Requires<[HasSSE2]>;
1447 def Int_CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1448 "cvtps2dq\t{$src, $dst|$dst, $src}",
1449 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>;
1450 def Int_CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1451 "cvtps2dq\t{$src, $dst|$dst, $src}",
1452 [(set VR128:$dst, (int_x86_sse2_cvtps2dq
1453 (load addr:$src)))]>;
1454 // SSE2 packed instructions with XS prefix
1455 def Int_CVTTPS2DQrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1456 "cvttps2dq\t{$src, $dst|$dst, $src}",
1457 [(set VR128:$dst, (int_x86_sse2_cvttps2dq VR128:$src))]>,
1458 XS, Requires<[HasSSE2]>;
1459 def Int_CVTTPS2DQrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1460 "cvttps2dq\t{$src, $dst|$dst, $src}",
1461 [(set VR128:$dst, (int_x86_sse2_cvttps2dq
1462 (load addr:$src)))]>,
1463 XS, Requires<[HasSSE2]>;
1465 // SSE2 packed instructions with XD prefix
1466 def Int_CVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1467 "cvtpd2dq\t{$src, $dst|$dst, $src}",
1468 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
1469 XD, Requires<[HasSSE2]>;
1470 def Int_CVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1471 "cvtpd2dq\t{$src, $dst|$dst, $src}",
1472 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
1473 (load addr:$src)))]>,
1474 XD, Requires<[HasSSE2]>;
1476 def Int_CVTTPD2DQrr : PDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1477 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1478 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>;
1479 def Int_CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst),(ins f128mem:$src),
1480 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1481 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
1482 (load addr:$src)))]>;
1484 // SSE2 instructions without OpSize prefix
1485 def Int_CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1486 "cvtps2pd\t{$src, $dst|$dst, $src}",
1487 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
1488 TB, Requires<[HasSSE2]>;
1489 def Int_CVTPS2PDrm : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins f64mem:$src),
1490 "cvtps2pd\t{$src, $dst|$dst, $src}",
1491 [(set VR128:$dst, (int_x86_sse2_cvtps2pd
1492 (load addr:$src)))]>,
1493 TB, Requires<[HasSSE2]>;
1495 def Int_CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1496 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1497 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
1498 def Int_CVTPD2PSrm : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins f128mem:$src),
1499 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1500 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps
1501 (load addr:$src)))]>;
1503 // Match intrinsics which expect XMM operand(s).
1504 // Aliases for intrinsics
1505 let Constraints = "$src1 = $dst" in {
1506 def Int_CVTSI2SDrr: SDI<0x2A, MRMSrcReg,
1507 (outs VR128:$dst), (ins VR128:$src1, GR32:$src2),
1508 "cvtsi2sd\t{$src2, $dst|$dst, $src2}",
1509 [(set VR128:$dst, (int_x86_sse2_cvtsi2sd VR128:$src1,
1511 def Int_CVTSI2SDrm: SDI<0x2A, MRMSrcMem,
1512 (outs VR128:$dst), (ins VR128:$src1, i32mem:$src2),
1513 "cvtsi2sd\t{$src2, $dst|$dst, $src2}",
1514 [(set VR128:$dst, (int_x86_sse2_cvtsi2sd VR128:$src1,
1515 (loadi32 addr:$src2)))]>;
1516 def Int_CVTSD2SSrr: SDI<0x5A, MRMSrcReg,
1517 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1518 "cvtsd2ss\t{$src2, $dst|$dst, $src2}",
1519 [(set VR128:$dst, (int_x86_sse2_cvtsd2ss VR128:$src1,
1521 def Int_CVTSD2SSrm: SDI<0x5A, MRMSrcMem,
1522 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
1523 "cvtsd2ss\t{$src2, $dst|$dst, $src2}",
1524 [(set VR128:$dst, (int_x86_sse2_cvtsd2ss VR128:$src1,
1525 (load addr:$src2)))]>;
1526 def Int_CVTSS2SDrr: I<0x5A, MRMSrcReg,
1527 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1528 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
1529 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
1530 VR128:$src2))]>, XS,
1531 Requires<[HasSSE2]>;
1532 def Int_CVTSS2SDrm: I<0x5A, MRMSrcMem,
1533 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
1534 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
1535 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
1536 (load addr:$src2)))]>, XS,
1537 Requires<[HasSSE2]>;
1542 /// sse2_fp_unop_rm - SSE2 unops come in both scalar and vector forms.
1544 /// In addition, we also have a special variant of the scalar form here to
1545 /// represent the associated intrinsic operation. This form is unlike the
1546 /// plain scalar form, in that it takes an entire vector (instead of a
1547 /// scalar) and leaves the top elements undefined.
1549 /// And, we have a special variant form for a full-vector intrinsic form.
1551 /// These four forms can each have a reg or a mem operand, so there are a
1552 /// total of eight "instructions".
1554 multiclass sse2_fp_unop_rm<bits<8> opc, string OpcodeStr,
1558 bit Commutable = 0> {
1559 // Scalar operation, reg.
1560 def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
1561 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1562 [(set FR64:$dst, (OpNode FR64:$src))]> {
1563 let isCommutable = Commutable;
1566 // Scalar operation, mem.
1567 def SDm : SDI<opc, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
1568 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1569 [(set FR64:$dst, (OpNode (load addr:$src)))]>;
1571 // Vector operation, reg.
1572 def PDr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1573 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1574 [(set VR128:$dst, (v2f64 (OpNode VR128:$src)))]> {
1575 let isCommutable = Commutable;
1578 // Vector operation, mem.
1579 def PDm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1580 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1581 [(set VR128:$dst, (OpNode (memopv2f64 addr:$src)))]>;
1583 // Intrinsic operation, reg.
1584 def SDr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1585 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1586 [(set VR128:$dst, (F64Int VR128:$src))]> {
1587 let isCommutable = Commutable;
1590 // Intrinsic operation, mem.
1591 def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins sdmem:$src),
1592 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1593 [(set VR128:$dst, (F64Int sse_load_f64:$src))]>;
1595 // Vector intrinsic operation, reg
1596 def PDr_Int : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1597 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1598 [(set VR128:$dst, (V2F64Int VR128:$src))]> {
1599 let isCommutable = Commutable;
1602 // Vector intrinsic operation, mem
1603 def PDm_Int : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1604 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1605 [(set VR128:$dst, (V2F64Int (load addr:$src)))]>;
1609 defm SQRT : sse2_fp_unop_rm<0x51, "sqrt", fsqrt,
1610 int_x86_sse2_sqrt_sd, int_x86_sse2_sqrt_pd>;
1612 // There is no f64 version of the reciprocal approximation instructions.
1615 let Constraints = "$src1 = $dst" in {
1616 let isCommutable = 1 in {
1617 def ANDPDrr : PDI<0x54, MRMSrcReg,
1618 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1619 "andpd\t{$src2, $dst|$dst, $src2}",
1621 (and (bc_v2i64 (v2f64 VR128:$src1)),
1622 (bc_v2i64 (v2f64 VR128:$src2))))]>;
1623 def ORPDrr : PDI<0x56, MRMSrcReg,
1624 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1625 "orpd\t{$src2, $dst|$dst, $src2}",
1627 (or (bc_v2i64 (v2f64 VR128:$src1)),
1628 (bc_v2i64 (v2f64 VR128:$src2))))]>;
1629 def XORPDrr : PDI<0x57, MRMSrcReg,
1630 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1631 "xorpd\t{$src2, $dst|$dst, $src2}",
1633 (xor (bc_v2i64 (v2f64 VR128:$src1)),
1634 (bc_v2i64 (v2f64 VR128:$src2))))]>;
1637 def ANDPDrm : PDI<0x54, MRMSrcMem,
1638 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
1639 "andpd\t{$src2, $dst|$dst, $src2}",
1641 (and (bc_v2i64 (v2f64 VR128:$src1)),
1642 (memopv2i64 addr:$src2)))]>;
1643 def ORPDrm : PDI<0x56, MRMSrcMem,
1644 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
1645 "orpd\t{$src2, $dst|$dst, $src2}",
1647 (or (bc_v2i64 (v2f64 VR128:$src1)),
1648 (memopv2i64 addr:$src2)))]>;
1649 def XORPDrm : PDI<0x57, MRMSrcMem,
1650 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
1651 "xorpd\t{$src2, $dst|$dst, $src2}",
1653 (xor (bc_v2i64 (v2f64 VR128:$src1)),
1654 (memopv2i64 addr:$src2)))]>;
1655 def ANDNPDrr : PDI<0x55, MRMSrcReg,
1656 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1657 "andnpd\t{$src2, $dst|$dst, $src2}",
1659 (and (vnot (bc_v2i64 (v2f64 VR128:$src1))),
1660 (bc_v2i64 (v2f64 VR128:$src2))))]>;
1661 def ANDNPDrm : PDI<0x55, MRMSrcMem,
1662 (outs VR128:$dst), (ins VR128:$src1,f128mem:$src2),
1663 "andnpd\t{$src2, $dst|$dst, $src2}",
1665 (and (vnot (bc_v2i64 (v2f64 VR128:$src1))),
1666 (memopv2i64 addr:$src2)))]>;
1669 let Constraints = "$src1 = $dst" in {
1670 def CMPPDrri : PDIi8<0xC2, MRMSrcReg,
1671 (outs VR128:$dst), (ins VR128:$src1, VR128:$src, SSECC:$cc),
1672 "cmp${cc}pd\t{$src, $dst|$dst, $src}",
1673 [(set VR128:$dst, (int_x86_sse2_cmp_pd VR128:$src1,
1674 VR128:$src, imm:$cc))]>;
1675 def CMPPDrmi : PDIi8<0xC2, MRMSrcMem,
1676 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src, SSECC:$cc),
1677 "cmp${cc}pd\t{$src, $dst|$dst, $src}",
1678 [(set VR128:$dst, (int_x86_sse2_cmp_pd VR128:$src1,
1679 (load addr:$src), imm:$cc))]>;
1682 // Shuffle and unpack instructions
1683 let Constraints = "$src1 = $dst" in {
1684 def SHUFPDrri : PDIi8<0xC6, MRMSrcReg,
1685 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i8imm:$src3),
1686 "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1687 [(set VR128:$dst, (v2f64 (vector_shuffle
1688 VR128:$src1, VR128:$src2,
1689 SHUFP_shuffle_mask:$src3)))]>;
1690 def SHUFPDrmi : PDIi8<0xC6, MRMSrcMem,
1691 (outs VR128:$dst), (ins VR128:$src1,
1692 f128mem:$src2, i8imm:$src3),
1693 "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1695 (v2f64 (vector_shuffle
1696 VR128:$src1, (memopv2f64 addr:$src2),
1697 SHUFP_shuffle_mask:$src3)))]>;
1699 let AddedComplexity = 10 in {
1700 def UNPCKHPDrr : PDI<0x15, MRMSrcReg,
1701 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1702 "unpckhpd\t{$src2, $dst|$dst, $src2}",
1704 (v2f64 (vector_shuffle
1705 VR128:$src1, VR128:$src2,
1706 UNPCKH_shuffle_mask)))]>;
1707 def UNPCKHPDrm : PDI<0x15, MRMSrcMem,
1708 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
1709 "unpckhpd\t{$src2, $dst|$dst, $src2}",
1711 (v2f64 (vector_shuffle
1712 VR128:$src1, (memopv2f64 addr:$src2),
1713 UNPCKH_shuffle_mask)))]>;
1715 def UNPCKLPDrr : PDI<0x14, MRMSrcReg,
1716 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1717 "unpcklpd\t{$src2, $dst|$dst, $src2}",
1719 (v2f64 (vector_shuffle
1720 VR128:$src1, VR128:$src2,
1721 UNPCKL_shuffle_mask)))]>;
1722 def UNPCKLPDrm : PDI<0x14, MRMSrcMem,
1723 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
1724 "unpcklpd\t{$src2, $dst|$dst, $src2}",
1726 (v2f64 (vector_shuffle
1727 VR128:$src1, (memopv2f64 addr:$src2),
1728 UNPCKL_shuffle_mask)))]>;
1729 } // AddedComplexity
1730 } // Constraints = "$src1 = $dst"
1733 //===----------------------------------------------------------------------===//
1734 // SSE integer instructions
1736 // Move Instructions
1737 let neverHasSideEffects = 1 in
1738 def MOVDQArr : PDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1739 "movdqa\t{$src, $dst|$dst, $src}", []>;
1740 let isSimpleLoad = 1, mayLoad = 1 in
1741 def MOVDQArm : PDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
1742 "movdqa\t{$src, $dst|$dst, $src}",
1743 [/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/]>;
1745 def MOVDQAmr : PDI<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
1746 "movdqa\t{$src, $dst|$dst, $src}",
1747 [/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/]>;
1748 let isSimpleLoad = 1, mayLoad = 1 in
1749 def MOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
1750 "movdqu\t{$src, $dst|$dst, $src}",
1751 [/*(set VR128:$dst, (loadv2i64 addr:$src))*/]>,
1752 XS, Requires<[HasSSE2]>;
1754 def MOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
1755 "movdqu\t{$src, $dst|$dst, $src}",
1756 [/*(store (v2i64 VR128:$src), addr:$dst)*/]>,
1757 XS, Requires<[HasSSE2]>;
1759 // Intrinsic forms of MOVDQU load and store
1760 let isSimpleLoad = 1 in
1761 def MOVDQUrm_Int : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
1762 "movdqu\t{$src, $dst|$dst, $src}",
1763 [(set VR128:$dst, (int_x86_sse2_loadu_dq addr:$src))]>,
1764 XS, Requires<[HasSSE2]>;
1765 def MOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
1766 "movdqu\t{$src, $dst|$dst, $src}",
1767 [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
1768 XS, Requires<[HasSSE2]>;
1770 let Constraints = "$src1 = $dst" in {
1772 multiclass PDI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId,
1773 bit Commutable = 0> {
1774 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1775 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
1776 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]> {
1777 let isCommutable = Commutable;
1779 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
1780 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
1781 [(set VR128:$dst, (IntId VR128:$src1,
1782 (bitconvert (memopv2i64 addr:$src2))))]>;
1785 multiclass PDI_binop_rmi_int<bits<8> opc, bits<8> opc2, Format ImmForm,
1787 Intrinsic IntId, Intrinsic IntId2> {
1788 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1789 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
1790 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
1791 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
1792 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
1793 [(set VR128:$dst, (IntId VR128:$src1,
1794 (bitconvert (memopv2i64 addr:$src2))))]>;
1795 def ri : PDIi8<opc2, ImmForm, (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
1796 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
1797 [(set VR128:$dst, (IntId2 VR128:$src1, (i32 imm:$src2)))]>;
1800 /// PDI_binop_rm - Simple SSE2 binary operator.
1801 multiclass PDI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
1802 ValueType OpVT, bit Commutable = 0> {
1803 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1804 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
1805 [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]> {
1806 let isCommutable = Commutable;
1808 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
1809 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
1810 [(set VR128:$dst, (OpVT (OpNode VR128:$src1,
1811 (bitconvert (memopv2i64 addr:$src2)))))]>;
1814 /// PDI_binop_rm_v2i64 - Simple SSE2 binary operator whose type is v2i64.
1816 /// FIXME: we could eliminate this and use PDI_binop_rm instead if tblgen knew
1817 /// to collapse (bitconvert VT to VT) into its operand.
1819 multiclass PDI_binop_rm_v2i64<bits<8> opc, string OpcodeStr, SDNode OpNode,
1820 bit Commutable = 0> {
1821 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1822 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
1823 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))]> {
1824 let isCommutable = Commutable;
1826 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
1827 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
1828 [(set VR128:$dst, (OpNode VR128:$src1,(memopv2i64 addr:$src2)))]>;
1831 } // Constraints = "$src1 = $dst"
1833 // 128-bit Integer Arithmetic
1835 defm PADDB : PDI_binop_rm<0xFC, "paddb", add, v16i8, 1>;
1836 defm PADDW : PDI_binop_rm<0xFD, "paddw", add, v8i16, 1>;
1837 defm PADDD : PDI_binop_rm<0xFE, "paddd", add, v4i32, 1>;
1838 defm PADDQ : PDI_binop_rm_v2i64<0xD4, "paddq", add, 1>;
1840 defm PADDSB : PDI_binop_rm_int<0xEC, "paddsb" , int_x86_sse2_padds_b, 1>;
1841 defm PADDSW : PDI_binop_rm_int<0xED, "paddsw" , int_x86_sse2_padds_w, 1>;
1842 defm PADDUSB : PDI_binop_rm_int<0xDC, "paddusb", int_x86_sse2_paddus_b, 1>;
1843 defm PADDUSW : PDI_binop_rm_int<0xDD, "paddusw", int_x86_sse2_paddus_w, 1>;
1845 defm PSUBB : PDI_binop_rm<0xF8, "psubb", sub, v16i8>;
1846 defm PSUBW : PDI_binop_rm<0xF9, "psubw", sub, v8i16>;
1847 defm PSUBD : PDI_binop_rm<0xFA, "psubd", sub, v4i32>;
1848 defm PSUBQ : PDI_binop_rm_v2i64<0xFB, "psubq", sub>;
1850 defm PSUBSB : PDI_binop_rm_int<0xE8, "psubsb" , int_x86_sse2_psubs_b>;
1851 defm PSUBSW : PDI_binop_rm_int<0xE9, "psubsw" , int_x86_sse2_psubs_w>;
1852 defm PSUBUSB : PDI_binop_rm_int<0xD8, "psubusb", int_x86_sse2_psubus_b>;
1853 defm PSUBUSW : PDI_binop_rm_int<0xD9, "psubusw", int_x86_sse2_psubus_w>;
1855 defm PMULLW : PDI_binop_rm<0xD5, "pmullw", mul, v8i16, 1>;
1857 defm PMULHUW : PDI_binop_rm_int<0xE4, "pmulhuw", int_x86_sse2_pmulhu_w, 1>;
1858 defm PMULHW : PDI_binop_rm_int<0xE5, "pmulhw" , int_x86_sse2_pmulh_w , 1>;
1859 defm PMULUDQ : PDI_binop_rm_int<0xF4, "pmuludq", int_x86_sse2_pmulu_dq, 1>;
1861 defm PMADDWD : PDI_binop_rm_int<0xF5, "pmaddwd", int_x86_sse2_pmadd_wd, 1>;
1863 defm PAVGB : PDI_binop_rm_int<0xE0, "pavgb", int_x86_sse2_pavg_b, 1>;
1864 defm PAVGW : PDI_binop_rm_int<0xE3, "pavgw", int_x86_sse2_pavg_w, 1>;
1867 defm PMINUB : PDI_binop_rm_int<0xDA, "pminub", int_x86_sse2_pminu_b, 1>;
1868 defm PMINSW : PDI_binop_rm_int<0xEA, "pminsw", int_x86_sse2_pmins_w, 1>;
1869 defm PMAXUB : PDI_binop_rm_int<0xDE, "pmaxub", int_x86_sse2_pmaxu_b, 1>;
1870 defm PMAXSW : PDI_binop_rm_int<0xEE, "pmaxsw", int_x86_sse2_pmaxs_w, 1>;
1871 defm PSADBW : PDI_binop_rm_int<0xE0, "psadbw", int_x86_sse2_psad_bw, 1>;
1874 defm PSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "psllw",
1875 int_x86_sse2_psll_w, int_x86_sse2_pslli_w>;
1876 defm PSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "pslld",
1877 int_x86_sse2_psll_d, int_x86_sse2_pslli_d>;
1878 defm PSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "psllq",
1879 int_x86_sse2_psll_q, int_x86_sse2_pslli_q>;
1881 defm PSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "psrlw",
1882 int_x86_sse2_psrl_w, int_x86_sse2_psrli_w>;
1883 defm PSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "psrld",
1884 int_x86_sse2_psrl_d, int_x86_sse2_psrli_d>;
1885 defm PSRLQ : PDI_binop_rmi_int<0xD3, 0x72, MRM2r, "psrlq",
1886 int_x86_sse2_psrl_q, int_x86_sse2_psrli_q>;
1888 defm PSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "psraw",
1889 int_x86_sse2_psra_w, int_x86_sse2_psrai_w>;
1890 defm PSRAD : PDI_binop_rmi_int<0xE2, 0x71, MRM4r, "psrad",
1891 int_x86_sse2_psra_d, int_x86_sse2_psrai_d>;
1893 // 128-bit logical shifts.
1894 let Constraints = "$src1 = $dst", neverHasSideEffects = 1 in {
1895 def PSLLDQri : PDIi8<0x73, MRM7r,
1896 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
1897 "pslldq\t{$src2, $dst|$dst, $src2}", []>;
1898 def PSRLDQri : PDIi8<0x73, MRM3r,
1899 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
1900 "psrldq\t{$src2, $dst|$dst, $src2}", []>;
1901 // PSRADQri doesn't exist in SSE[1-3].
1904 let Predicates = [HasSSE2] in {
1905 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
1906 (v2i64 (PSLLDQri VR128:$src1, (PSxLDQ_imm imm:$src2)))>;
1907 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
1908 (v2i64 (PSRLDQri VR128:$src1, (PSxLDQ_imm imm:$src2)))>;
1909 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
1910 (v2f64 (PSRLDQri VR128:$src1, (PSxLDQ_imm imm:$src2)))>;
1914 defm PAND : PDI_binop_rm_v2i64<0xDB, "pand", and, 1>;
1915 defm POR : PDI_binop_rm_v2i64<0xEB, "por" , or , 1>;
1916 defm PXOR : PDI_binop_rm_v2i64<0xEF, "pxor", xor, 1>;
1918 let Constraints = "$src1 = $dst" in {
1919 def PANDNrr : PDI<0xDF, MRMSrcReg,
1920 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1921 "pandn\t{$src2, $dst|$dst, $src2}",
1922 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
1925 def PANDNrm : PDI<0xDF, MRMSrcMem,
1926 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
1927 "pandn\t{$src2, $dst|$dst, $src2}",
1928 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
1929 (memopv2i64 addr:$src2))))]>;
1932 // SSE2 Integer comparison
1933 defm PCMPEQB : PDI_binop_rm_int<0x74, "pcmpeqb", int_x86_sse2_pcmpeq_b>;
1934 defm PCMPEQW : PDI_binop_rm_int<0x75, "pcmpeqw", int_x86_sse2_pcmpeq_w>;
1935 defm PCMPEQD : PDI_binop_rm_int<0x76, "pcmpeqd", int_x86_sse2_pcmpeq_d>;
1936 defm PCMPGTB : PDI_binop_rm_int<0x64, "pcmpgtb", int_x86_sse2_pcmpgt_b>;
1937 defm PCMPGTW : PDI_binop_rm_int<0x65, "pcmpgtw", int_x86_sse2_pcmpgt_w>;
1938 defm PCMPGTD : PDI_binop_rm_int<0x66, "pcmpgtd", int_x86_sse2_pcmpgt_d>;
1940 // Pack instructions
1941 defm PACKSSWB : PDI_binop_rm_int<0x63, "packsswb", int_x86_sse2_packsswb_128>;
1942 defm PACKSSDW : PDI_binop_rm_int<0x6B, "packssdw", int_x86_sse2_packssdw_128>;
1943 defm PACKUSWB : PDI_binop_rm_int<0x67, "packuswb", int_x86_sse2_packuswb_128>;
1945 // Shuffle and unpack instructions
1946 def PSHUFDri : PDIi8<0x70, MRMSrcReg,
1947 (outs VR128:$dst), (ins VR128:$src1, i8imm:$src2),
1948 "pshufd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1949 [(set VR128:$dst, (v4i32 (vector_shuffle
1950 VR128:$src1, (undef),
1951 PSHUFD_shuffle_mask:$src2)))]>;
1952 def PSHUFDmi : PDIi8<0x70, MRMSrcMem,
1953 (outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
1954 "pshufd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1955 [(set VR128:$dst, (v4i32 (vector_shuffle
1956 (bc_v4i32(memopv2i64 addr:$src1)),
1958 PSHUFD_shuffle_mask:$src2)))]>;
1960 // SSE2 with ImmT == Imm8 and XS prefix.
1961 def PSHUFHWri : Ii8<0x70, MRMSrcReg,
1962 (outs VR128:$dst), (ins VR128:$src1, i8imm:$src2),
1963 "pshufhw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1964 [(set VR128:$dst, (v8i16 (vector_shuffle
1965 VR128:$src1, (undef),
1966 PSHUFHW_shuffle_mask:$src2)))]>,
1967 XS, Requires<[HasSSE2]>;
1968 def PSHUFHWmi : Ii8<0x70, MRMSrcMem,
1969 (outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
1970 "pshufhw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1971 [(set VR128:$dst, (v8i16 (vector_shuffle
1972 (bc_v8i16 (memopv2i64 addr:$src1)),
1974 PSHUFHW_shuffle_mask:$src2)))]>,
1975 XS, Requires<[HasSSE2]>;
1977 // SSE2 with ImmT == Imm8 and XD prefix.
1978 def PSHUFLWri : Ii8<0x70, MRMSrcReg,
1979 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
1980 "pshuflw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1981 [(set VR128:$dst, (v8i16 (vector_shuffle
1982 VR128:$src1, (undef),
1983 PSHUFLW_shuffle_mask:$src2)))]>,
1984 XD, Requires<[HasSSE2]>;
1985 def PSHUFLWmi : Ii8<0x70, MRMSrcMem,
1986 (outs VR128:$dst), (ins i128mem:$src1, i32i8imm:$src2),
1987 "pshuflw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1988 [(set VR128:$dst, (v8i16 (vector_shuffle
1989 (bc_v8i16 (memopv2i64 addr:$src1)),
1991 PSHUFLW_shuffle_mask:$src2)))]>,
1992 XD, Requires<[HasSSE2]>;
1995 let Constraints = "$src1 = $dst" in {
1996 def PUNPCKLBWrr : PDI<0x60, MRMSrcReg,
1997 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1998 "punpcklbw\t{$src2, $dst|$dst, $src2}",
2000 (v16i8 (vector_shuffle VR128:$src1, VR128:$src2,
2001 UNPCKL_shuffle_mask)))]>;
2002 def PUNPCKLBWrm : PDI<0x60, MRMSrcMem,
2003 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2004 "punpcklbw\t{$src2, $dst|$dst, $src2}",
2006 (v16i8 (vector_shuffle VR128:$src1,
2007 (bc_v16i8 (memopv2i64 addr:$src2)),
2008 UNPCKL_shuffle_mask)))]>;
2009 def PUNPCKLWDrr : PDI<0x61, MRMSrcReg,
2010 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2011 "punpcklwd\t{$src2, $dst|$dst, $src2}",
2013 (v8i16 (vector_shuffle VR128:$src1, VR128:$src2,
2014 UNPCKL_shuffle_mask)))]>;
2015 def PUNPCKLWDrm : PDI<0x61, MRMSrcMem,
2016 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2017 "punpcklwd\t{$src2, $dst|$dst, $src2}",
2019 (v8i16 (vector_shuffle VR128:$src1,
2020 (bc_v8i16 (memopv2i64 addr:$src2)),
2021 UNPCKL_shuffle_mask)))]>;
2022 def PUNPCKLDQrr : PDI<0x62, MRMSrcReg,
2023 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2024 "punpckldq\t{$src2, $dst|$dst, $src2}",
2026 (v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
2027 UNPCKL_shuffle_mask)))]>;
2028 def PUNPCKLDQrm : PDI<0x62, MRMSrcMem,
2029 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2030 "punpckldq\t{$src2, $dst|$dst, $src2}",
2032 (v4i32 (vector_shuffle VR128:$src1,
2033 (bc_v4i32 (memopv2i64 addr:$src2)),
2034 UNPCKL_shuffle_mask)))]>;
2035 def PUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
2036 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2037 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
2039 (v2i64 (vector_shuffle VR128:$src1, VR128:$src2,
2040 UNPCKL_shuffle_mask)))]>;
2041 def PUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
2042 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2043 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
2045 (v2i64 (vector_shuffle VR128:$src1,
2046 (memopv2i64 addr:$src2),
2047 UNPCKL_shuffle_mask)))]>;
2049 def PUNPCKHBWrr : PDI<0x68, MRMSrcReg,
2050 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2051 "punpckhbw\t{$src2, $dst|$dst, $src2}",
2053 (v16i8 (vector_shuffle VR128:$src1, VR128:$src2,
2054 UNPCKH_shuffle_mask)))]>;
2055 def PUNPCKHBWrm : PDI<0x68, MRMSrcMem,
2056 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2057 "punpckhbw\t{$src2, $dst|$dst, $src2}",
2059 (v16i8 (vector_shuffle VR128:$src1,
2060 (bc_v16i8 (memopv2i64 addr:$src2)),
2061 UNPCKH_shuffle_mask)))]>;
2062 def PUNPCKHWDrr : PDI<0x69, MRMSrcReg,
2063 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2064 "punpckhwd\t{$src2, $dst|$dst, $src2}",
2066 (v8i16 (vector_shuffle VR128:$src1, VR128:$src2,
2067 UNPCKH_shuffle_mask)))]>;
2068 def PUNPCKHWDrm : PDI<0x69, MRMSrcMem,
2069 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2070 "punpckhwd\t{$src2, $dst|$dst, $src2}",
2072 (v8i16 (vector_shuffle VR128:$src1,
2073 (bc_v8i16 (memopv2i64 addr:$src2)),
2074 UNPCKH_shuffle_mask)))]>;
2075 def PUNPCKHDQrr : PDI<0x6A, MRMSrcReg,
2076 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2077 "punpckhdq\t{$src2, $dst|$dst, $src2}",
2079 (v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
2080 UNPCKH_shuffle_mask)))]>;
2081 def PUNPCKHDQrm : PDI<0x6A, MRMSrcMem,
2082 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2083 "punpckhdq\t{$src2, $dst|$dst, $src2}",
2085 (v4i32 (vector_shuffle VR128:$src1,
2086 (bc_v4i32 (memopv2i64 addr:$src2)),
2087 UNPCKH_shuffle_mask)))]>;
2088 def PUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
2089 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2090 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
2092 (v2i64 (vector_shuffle VR128:$src1, VR128:$src2,
2093 UNPCKH_shuffle_mask)))]>;
2094 def PUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
2095 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2096 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
2098 (v2i64 (vector_shuffle VR128:$src1,
2099 (memopv2i64 addr:$src2),
2100 UNPCKH_shuffle_mask)))]>;
2104 def PEXTRWri : PDIi8<0xC5, MRMSrcReg,
2105 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
2106 "pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2107 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
2109 let Constraints = "$src1 = $dst" in {
2110 def PINSRWrri : PDIi8<0xC4, MRMSrcReg,
2111 (outs VR128:$dst), (ins VR128:$src1,
2112 GR32:$src2, i32i8imm:$src3),
2113 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2115 (X86pinsrw VR128:$src1, GR32:$src2, imm:$src3))]>;
2116 def PINSRWrmi : PDIi8<0xC4, MRMSrcMem,
2117 (outs VR128:$dst), (ins VR128:$src1,
2118 i16mem:$src2, i32i8imm:$src3),
2119 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2121 (X86pinsrw VR128:$src1, (extloadi16 addr:$src2),
2126 def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
2127 "pmovmskb\t{$src, $dst|$dst, $src}",
2128 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>;
2130 // Conditional store
2132 def MASKMOVDQU : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
2133 "maskmovdqu\t{$mask, $src|$src, $mask}",
2134 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>;
2136 // Non-temporal stores
2137 def MOVNTPDmr : PDI<0x2B, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2138 "movntpd\t{$src, $dst|$dst, $src}",
2139 [(int_x86_sse2_movnt_pd addr:$dst, VR128:$src)]>;
2140 def MOVNTDQmr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2141 "movntdq\t{$src, $dst|$dst, $src}",
2142 [(int_x86_sse2_movnt_dq addr:$dst, VR128:$src)]>;
2143 def MOVNTImr : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
2144 "movnti\t{$src, $dst|$dst, $src}",
2145 [(int_x86_sse2_movnt_i addr:$dst, GR32:$src)]>,
2146 TB, Requires<[HasSSE2]>;
2149 def CLFLUSH : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
2150 "clflush\t$src", [(int_x86_sse2_clflush addr:$src)]>,
2151 TB, Requires<[HasSSE2]>;
2153 // Load, store, and memory fence
2154 def LFENCE : I<0xAE, MRM5m, (outs), (ins),
2155 "lfence", [(int_x86_sse2_lfence)]>, TB, Requires<[HasSSE2]>;
2156 def MFENCE : I<0xAE, MRM6m, (outs), (ins),
2157 "mfence", [(int_x86_sse2_mfence)]>, TB, Requires<[HasSSE2]>;
2159 //TODO: custom lower this so as to never even generate the noop
2160 def : Pat<(membarrier (i8 imm:$ll), (i8 imm:$ls), (i8 imm:$sl), (i8 imm:$ss),
2162 def : Pat<(membarrier (i8 0), (i8 0), (i8 0), (i8 1), (i8 1)), (SFENCE)>;
2163 def : Pat<(membarrier (i8 1), (i8 0), (i8 0), (i8 0), (i8 1)), (LFENCE)>;
2164 def : Pat<(membarrier (i8 imm:$ll), (i8 imm:$ls), (i8 imm:$sl), (i8 imm:$ss),
2167 // Alias instructions that map zero vector to pxor / xorp* for sse.
2168 let isReMaterializable = 1 in
2169 def V_SETALLONES : PDI<0x76, MRMInitReg, (outs VR128:$dst), (ins),
2170 "pcmpeqd\t$dst, $dst",
2171 [(set VR128:$dst, (v4i32 immAllOnesV))]>;
2173 // FR64 to 128-bit vector conversion.
2174 def MOVSD2PDrr : SDI<0x10, MRMSrcReg, (outs VR128:$dst), (ins FR64:$src),
2175 "movsd\t{$src, $dst|$dst, $src}",
2177 (v2f64 (scalar_to_vector FR64:$src)))]>;
2178 def MOVSD2PDrm : SDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
2179 "movsd\t{$src, $dst|$dst, $src}",
2181 (v2f64 (scalar_to_vector (loadf64 addr:$src))))]>;
2183 def MOVDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
2184 "movd\t{$src, $dst|$dst, $src}",
2186 (v4i32 (scalar_to_vector GR32:$src)))]>;
2187 def MOVDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
2188 "movd\t{$src, $dst|$dst, $src}",
2190 (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>;
2192 def MOVDI2SSrr : PDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
2193 "movd\t{$src, $dst|$dst, $src}",
2194 [(set FR32:$dst, (bitconvert GR32:$src))]>;
2196 def MOVDI2SSrm : PDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
2197 "movd\t{$src, $dst|$dst, $src}",
2198 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>;
2200 // SSE2 instructions with XS prefix
2201 def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
2202 "movq\t{$src, $dst|$dst, $src}",
2204 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
2205 Requires<[HasSSE2]>;
2206 def MOVPQI2QImr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
2207 "movq\t{$src, $dst|$dst, $src}",
2208 [(store (i64 (vector_extract (v2i64 VR128:$src),
2209 (iPTR 0))), addr:$dst)]>;
2211 // FIXME: may not be able to eliminate this movss with coalescing the src and
2212 // dest register classes are different. We really want to write this pattern
2214 // def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
2215 // (f32 FR32:$src)>;
2216 def MOVPD2SDrr : SDI<0x10, MRMSrcReg, (outs FR64:$dst), (ins VR128:$src),
2217 "movsd\t{$src, $dst|$dst, $src}",
2218 [(set FR64:$dst, (vector_extract (v2f64 VR128:$src),
2220 def MOVPD2SDmr : SDI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
2221 "movsd\t{$src, $dst|$dst, $src}",
2222 [(store (f64 (vector_extract (v2f64 VR128:$src),
2223 (iPTR 0))), addr:$dst)]>;
2224 def MOVPDI2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
2225 "movd\t{$src, $dst|$dst, $src}",
2226 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
2228 def MOVPDI2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src),
2229 "movd\t{$src, $dst|$dst, $src}",
2230 [(store (i32 (vector_extract (v4i32 VR128:$src),
2231 (iPTR 0))), addr:$dst)]>;
2233 def MOVSS2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
2234 "movd\t{$src, $dst|$dst, $src}",
2235 [(set GR32:$dst, (bitconvert FR32:$src))]>;
2236 def MOVSS2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
2237 "movd\t{$src, $dst|$dst, $src}",
2238 [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>;
2241 // Move to lower bits of a VR128, leaving upper bits alone.
2242 // Three operand (but two address) aliases.
2243 let Constraints = "$src1 = $dst" in {
2244 let neverHasSideEffects = 1 in
2245 def MOVLSD2PDrr : SDI<0x10, MRMSrcReg,
2246 (outs VR128:$dst), (ins VR128:$src1, FR64:$src2),
2247 "movsd\t{$src2, $dst|$dst, $src2}", []>;
2249 let AddedComplexity = 15 in
2250 def MOVLPDrr : SDI<0x10, MRMSrcReg,
2251 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2252 "movsd\t{$src2, $dst|$dst, $src2}",
2254 (v2f64 (vector_shuffle VR128:$src1, VR128:$src2,
2255 MOVL_shuffle_mask)))]>;
2258 // Store / copy lower 64-bits of a XMM register.
2259 def MOVLQ128mr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
2260 "movq\t{$src, $dst|$dst, $src}",
2261 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>;
2263 // Move to lower bits of a VR128 and zeroing upper bits.
2264 // Loading from memory automatically zeroing upper bits.
2265 let AddedComplexity = 20 in
2266 def MOVZSD2PDrm : SDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
2267 "movsd\t{$src, $dst|$dst, $src}",
2269 (v2f64 (X86zvmovl (v2f64 (scalar_to_vector
2270 (loadf64 addr:$src))))))]>;
2272 def : Pat<(v2f64 (X86zvmovl (memopv2f64 addr:$src))),
2273 (MOVZSD2PDrm addr:$src)>;
2275 // movd / movq to XMM register zero-extends
2276 let AddedComplexity = 15 in {
2277 def MOVZDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
2278 "movd\t{$src, $dst|$dst, $src}",
2279 [(set VR128:$dst, (v4i32 (X86zvmovl
2280 (v4i32 (scalar_to_vector GR32:$src)))))]>;
2281 // This is X86-64 only.
2282 def MOVZQI2PQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
2283 "mov{d|q}\t{$src, $dst|$dst, $src}",
2284 [(set VR128:$dst, (v2i64 (X86zvmovl
2285 (v2i64 (scalar_to_vector GR64:$src)))))]>;
2288 let AddedComplexity = 20 in {
2289 def MOVZDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
2290 "movd\t{$src, $dst|$dst, $src}",
2292 (v4i32 (X86zvmovl (v4i32 (scalar_to_vector
2293 (loadi32 addr:$src))))))]>;
2294 def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
2295 "movq\t{$src, $dst|$dst, $src}",
2297 (v2i64 (X86zvmovl (v2i64 (scalar_to_vector
2298 (loadi64 addr:$src))))))]>, XS,
2299 Requires<[HasSSE2]>;
2302 // Moving from XMM to XMM and clear upper 64 bits. Note, there is a bug in
2303 // IA32 document. movq xmm1, xmm2 does clear the high bits.
2304 let AddedComplexity = 15 in
2305 def MOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2306 "movq\t{$src, $dst|$dst, $src}",
2307 [(set VR128:$dst, (v2i64 (X86zvmovl (v2i64 VR128:$src))))]>,
2308 XS, Requires<[HasSSE2]>;
2310 let AddedComplexity = 20 in
2311 def MOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2312 "movq\t{$src, $dst|$dst, $src}",
2313 [(set VR128:$dst, (v2i64 (X86zvmovl
2314 (memopv2i64 addr:$src))))]>,
2315 XS, Requires<[HasSSE2]>;
2317 //===----------------------------------------------------------------------===//
2318 // SSE3 Instructions
2319 //===----------------------------------------------------------------------===//
2321 // Move Instructions
2322 def MOVSHDUPrr : S3SI<0x16, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2323 "movshdup\t{$src, $dst|$dst, $src}",
2324 [(set VR128:$dst, (v4f32 (vector_shuffle
2325 VR128:$src, (undef),
2326 MOVSHDUP_shuffle_mask)))]>;
2327 def MOVSHDUPrm : S3SI<0x16, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2328 "movshdup\t{$src, $dst|$dst, $src}",
2329 [(set VR128:$dst, (v4f32 (vector_shuffle
2330 (memopv4f32 addr:$src), (undef),
2331 MOVSHDUP_shuffle_mask)))]>;
2333 def MOVSLDUPrr : S3SI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2334 "movsldup\t{$src, $dst|$dst, $src}",
2335 [(set VR128:$dst, (v4f32 (vector_shuffle
2336 VR128:$src, (undef),
2337 MOVSLDUP_shuffle_mask)))]>;
2338 def MOVSLDUPrm : S3SI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2339 "movsldup\t{$src, $dst|$dst, $src}",
2340 [(set VR128:$dst, (v4f32 (vector_shuffle
2341 (memopv4f32 addr:$src), (undef),
2342 MOVSLDUP_shuffle_mask)))]>;
2344 def MOVDDUPrr : S3DI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2345 "movddup\t{$src, $dst|$dst, $src}",
2346 [(set VR128:$dst, (v2f64 (vector_shuffle
2347 VR128:$src, (undef),
2348 SSE_splat_lo_mask)))]>;
2349 def MOVDDUPrm : S3DI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
2350 "movddup\t{$src, $dst|$dst, $src}",
2352 (v2f64 (vector_shuffle
2353 (scalar_to_vector (loadf64 addr:$src)),
2355 SSE_splat_lo_mask)))]>;
2358 let Constraints = "$src1 = $dst" in {
2359 def ADDSUBPSrr : S3DI<0xD0, MRMSrcReg,
2360 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2361 "addsubps\t{$src2, $dst|$dst, $src2}",
2362 [(set VR128:$dst, (int_x86_sse3_addsub_ps VR128:$src1,
2364 def ADDSUBPSrm : S3DI<0xD0, MRMSrcMem,
2365 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
2366 "addsubps\t{$src2, $dst|$dst, $src2}",
2367 [(set VR128:$dst, (int_x86_sse3_addsub_ps VR128:$src1,
2368 (load addr:$src2)))]>;
2369 def ADDSUBPDrr : S3I<0xD0, MRMSrcReg,
2370 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2371 "addsubpd\t{$src2, $dst|$dst, $src2}",
2372 [(set VR128:$dst, (int_x86_sse3_addsub_pd VR128:$src1,
2374 def ADDSUBPDrm : S3I<0xD0, MRMSrcMem,
2375 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
2376 "addsubpd\t{$src2, $dst|$dst, $src2}",
2377 [(set VR128:$dst, (int_x86_sse3_addsub_pd VR128:$src1,
2378 (load addr:$src2)))]>;
2381 def LDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2382 "lddqu\t{$src, $dst|$dst, $src}",
2383 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>;
2386 class S3D_Intrr<bits<8> o, string OpcodeStr, Intrinsic IntId>
2387 : S3DI<o, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2388 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2389 [(set VR128:$dst, (v4f32 (IntId VR128:$src1, VR128:$src2)))]>;
2390 class S3D_Intrm<bits<8> o, string OpcodeStr, Intrinsic IntId>
2391 : S3DI<o, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
2392 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2393 [(set VR128:$dst, (v4f32 (IntId VR128:$src1, (load addr:$src2))))]>;
2394 class S3_Intrr<bits<8> o, string OpcodeStr, Intrinsic IntId>
2395 : S3I<o, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2396 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2397 [(set VR128:$dst, (v2f64 (IntId VR128:$src1, VR128:$src2)))]>;
2398 class S3_Intrm<bits<8> o, string OpcodeStr, Intrinsic IntId>
2399 : S3I<o, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
2400 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2401 [(set VR128:$dst, (v2f64 (IntId VR128:$src1, (load addr:$src2))))]>;
2403 let Constraints = "$src1 = $dst" in {
2404 def HADDPSrr : S3D_Intrr<0x7C, "haddps", int_x86_sse3_hadd_ps>;
2405 def HADDPSrm : S3D_Intrm<0x7C, "haddps", int_x86_sse3_hadd_ps>;
2406 def HADDPDrr : S3_Intrr <0x7C, "haddpd", int_x86_sse3_hadd_pd>;
2407 def HADDPDrm : S3_Intrm <0x7C, "haddpd", int_x86_sse3_hadd_pd>;
2408 def HSUBPSrr : S3D_Intrr<0x7D, "hsubps", int_x86_sse3_hsub_ps>;
2409 def HSUBPSrm : S3D_Intrm<0x7D, "hsubps", int_x86_sse3_hsub_ps>;
2410 def HSUBPDrr : S3_Intrr <0x7D, "hsubpd", int_x86_sse3_hsub_pd>;
2411 def HSUBPDrm : S3_Intrm <0x7D, "hsubpd", int_x86_sse3_hsub_pd>;
2414 // Thread synchronization
2415 def MONITOR : I<0xC8, RawFrm, (outs), (ins), "monitor",
2416 [(int_x86_sse3_monitor EAX, ECX, EDX)]>,TB, Requires<[HasSSE3]>;
2417 def MWAIT : I<0xC9, RawFrm, (outs), (ins), "mwait",
2418 [(int_x86_sse3_mwait ECX, EAX)]>, TB, Requires<[HasSSE3]>;
2420 // vector_shuffle v1, <undef> <1, 1, 3, 3>
2421 let AddedComplexity = 15 in
2422 def : Pat<(v4i32 (vector_shuffle VR128:$src, (undef),
2423 MOVSHDUP_shuffle_mask)),
2424 (MOVSHDUPrr VR128:$src)>, Requires<[HasSSE3]>;
2425 let AddedComplexity = 20 in
2426 def : Pat<(v4i32 (vector_shuffle (bc_v4i32 (memopv2i64 addr:$src)), (undef),
2427 MOVSHDUP_shuffle_mask)),
2428 (MOVSHDUPrm addr:$src)>, Requires<[HasSSE3]>;
2430 // vector_shuffle v1, <undef> <0, 0, 2, 2>
2431 let AddedComplexity = 15 in
2432 def : Pat<(v4i32 (vector_shuffle VR128:$src, (undef),
2433 MOVSLDUP_shuffle_mask)),
2434 (MOVSLDUPrr VR128:$src)>, Requires<[HasSSE3]>;
2435 let AddedComplexity = 20 in
2436 def : Pat<(v4i32 (vector_shuffle (bc_v4i32 (memopv2i64 addr:$src)), (undef),
2437 MOVSLDUP_shuffle_mask)),
2438 (MOVSLDUPrm addr:$src)>, Requires<[HasSSE3]>;
2440 //===----------------------------------------------------------------------===//
2441 // SSSE3 Instructions
2442 //===----------------------------------------------------------------------===//
2444 /// SS3I_unop_rm_int_8 - Simple SSSE3 unary operator whose type is v*i8.
2445 multiclass SS3I_unop_rm_int_8<bits<8> opc, string OpcodeStr,
2446 Intrinsic IntId64, Intrinsic IntId128> {
2447 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst), (ins VR64:$src),
2448 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2449 [(set VR64:$dst, (IntId64 VR64:$src))]>;
2451 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst), (ins i64mem:$src),
2452 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2454 (IntId64 (bitconvert (memopv8i8 addr:$src))))]>;
2456 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
2458 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2459 [(set VR128:$dst, (IntId128 VR128:$src))]>,
2462 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
2464 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2467 (bitconvert (memopv16i8 addr:$src))))]>, OpSize;
2470 /// SS3I_unop_rm_int_16 - Simple SSSE3 unary operator whose type is v*i16.
2471 multiclass SS3I_unop_rm_int_16<bits<8> opc, string OpcodeStr,
2472 Intrinsic IntId64, Intrinsic IntId128> {
2473 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
2475 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2476 [(set VR64:$dst, (IntId64 VR64:$src))]>;
2478 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
2480 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2483 (bitconvert (memopv4i16 addr:$src))))]>;
2485 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
2487 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2488 [(set VR128:$dst, (IntId128 VR128:$src))]>,
2491 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
2493 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2496 (bitconvert (memopv8i16 addr:$src))))]>, OpSize;
2499 /// SS3I_unop_rm_int_32 - Simple SSSE3 unary operator whose type is v*i32.
2500 multiclass SS3I_unop_rm_int_32<bits<8> opc, string OpcodeStr,
2501 Intrinsic IntId64, Intrinsic IntId128> {
2502 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
2504 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2505 [(set VR64:$dst, (IntId64 VR64:$src))]>;
2507 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
2509 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2512 (bitconvert (memopv2i32 addr:$src))))]>;
2514 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
2516 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2517 [(set VR128:$dst, (IntId128 VR128:$src))]>,
2520 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
2522 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2525 (bitconvert (memopv4i32 addr:$src))))]>, OpSize;
2528 defm PABSB : SS3I_unop_rm_int_8 <0x1C, "pabsb",
2529 int_x86_ssse3_pabs_b,
2530 int_x86_ssse3_pabs_b_128>;
2531 defm PABSW : SS3I_unop_rm_int_16<0x1D, "pabsw",
2532 int_x86_ssse3_pabs_w,
2533 int_x86_ssse3_pabs_w_128>;
2534 defm PABSD : SS3I_unop_rm_int_32<0x1E, "pabsd",
2535 int_x86_ssse3_pabs_d,
2536 int_x86_ssse3_pabs_d_128>;
2538 /// SS3I_binop_rm_int_8 - Simple SSSE3 binary operator whose type is v*i8.
2539 let Constraints = "$src1 = $dst" in {
2540 multiclass SS3I_binop_rm_int_8<bits<8> opc, string OpcodeStr,
2541 Intrinsic IntId64, Intrinsic IntId128,
2542 bit Commutable = 0> {
2543 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
2544 (ins VR64:$src1, VR64:$src2),
2545 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2546 [(set VR64:$dst, (IntId64 VR64:$src1, VR64:$src2))]> {
2547 let isCommutable = Commutable;
2549 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
2550 (ins VR64:$src1, i64mem:$src2),
2551 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2553 (IntId64 VR64:$src1,
2554 (bitconvert (memopv8i8 addr:$src2))))]>;
2556 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
2557 (ins VR128:$src1, VR128:$src2),
2558 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2559 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
2561 let isCommutable = Commutable;
2563 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
2564 (ins VR128:$src1, i128mem:$src2),
2565 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2567 (IntId128 VR128:$src1,
2568 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
2572 /// SS3I_binop_rm_int_16 - Simple SSSE3 binary operator whose type is v*i16.
2573 let Constraints = "$src1 = $dst" in {
2574 multiclass SS3I_binop_rm_int_16<bits<8> opc, string OpcodeStr,
2575 Intrinsic IntId64, Intrinsic IntId128,
2576 bit Commutable = 0> {
2577 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
2578 (ins VR64:$src1, VR64:$src2),
2579 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2580 [(set VR64:$dst, (IntId64 VR64:$src1, VR64:$src2))]> {
2581 let isCommutable = Commutable;
2583 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
2584 (ins VR64:$src1, i64mem:$src2),
2585 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2587 (IntId64 VR64:$src1,
2588 (bitconvert (memopv4i16 addr:$src2))))]>;
2590 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
2591 (ins VR128:$src1, VR128:$src2),
2592 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2593 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
2595 let isCommutable = Commutable;
2597 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
2598 (ins VR128:$src1, i128mem:$src2),
2599 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2601 (IntId128 VR128:$src1,
2602 (bitconvert (memopv8i16 addr:$src2))))]>, OpSize;
2606 /// SS3I_binop_rm_int_32 - Simple SSSE3 binary operator whose type is v*i32.
2607 let Constraints = "$src1 = $dst" in {
2608 multiclass SS3I_binop_rm_int_32<bits<8> opc, string OpcodeStr,
2609 Intrinsic IntId64, Intrinsic IntId128,
2610 bit Commutable = 0> {
2611 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
2612 (ins VR64:$src1, VR64:$src2),
2613 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2614 [(set VR64:$dst, (IntId64 VR64:$src1, VR64:$src2))]> {
2615 let isCommutable = Commutable;
2617 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
2618 (ins VR64:$src1, i64mem:$src2),
2619 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2621 (IntId64 VR64:$src1,
2622 (bitconvert (memopv2i32 addr:$src2))))]>;
2624 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
2625 (ins VR128:$src1, VR128:$src2),
2626 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2627 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
2629 let isCommutable = Commutable;
2631 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
2632 (ins VR128:$src1, i128mem:$src2),
2633 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2635 (IntId128 VR128:$src1,
2636 (bitconvert (memopv4i32 addr:$src2))))]>, OpSize;
2640 defm PHADDW : SS3I_binop_rm_int_16<0x01, "phaddw",
2641 int_x86_ssse3_phadd_w,
2642 int_x86_ssse3_phadd_w_128, 1>;
2643 defm PHADDD : SS3I_binop_rm_int_32<0x02, "phaddd",
2644 int_x86_ssse3_phadd_d,
2645 int_x86_ssse3_phadd_d_128, 1>;
2646 defm PHADDSW : SS3I_binop_rm_int_16<0x03, "phaddsw",
2647 int_x86_ssse3_phadd_sw,
2648 int_x86_ssse3_phadd_sw_128, 1>;
2649 defm PHSUBW : SS3I_binop_rm_int_16<0x05, "phsubw",
2650 int_x86_ssse3_phsub_w,
2651 int_x86_ssse3_phsub_w_128>;
2652 defm PHSUBD : SS3I_binop_rm_int_32<0x06, "phsubd",
2653 int_x86_ssse3_phsub_d,
2654 int_x86_ssse3_phsub_d_128>;
2655 defm PHSUBSW : SS3I_binop_rm_int_16<0x07, "phsubsw",
2656 int_x86_ssse3_phsub_sw,
2657 int_x86_ssse3_phsub_sw_128>;
2658 defm PMADDUBSW : SS3I_binop_rm_int_8 <0x04, "pmaddubsw",
2659 int_x86_ssse3_pmadd_ub_sw,
2660 int_x86_ssse3_pmadd_ub_sw_128, 1>;
2661 defm PMULHRSW : SS3I_binop_rm_int_16<0x0B, "pmulhrsw",
2662 int_x86_ssse3_pmul_hr_sw,
2663 int_x86_ssse3_pmul_hr_sw_128, 1>;
2664 defm PSHUFB : SS3I_binop_rm_int_8 <0x00, "pshufb",
2665 int_x86_ssse3_pshuf_b,
2666 int_x86_ssse3_pshuf_b_128>;
2667 defm PSIGNB : SS3I_binop_rm_int_8 <0x08, "psignb",
2668 int_x86_ssse3_psign_b,
2669 int_x86_ssse3_psign_b_128>;
2670 defm PSIGNW : SS3I_binop_rm_int_16<0x09, "psignw",
2671 int_x86_ssse3_psign_w,
2672 int_x86_ssse3_psign_w_128>;
2673 defm PSIGND : SS3I_binop_rm_int_32<0x09, "psignd",
2674 int_x86_ssse3_psign_d,
2675 int_x86_ssse3_psign_d_128>;
2677 let Constraints = "$src1 = $dst" in {
2678 def PALIGNR64rr : SS3AI<0x0F, MRMSrcReg, (outs VR64:$dst),
2679 (ins VR64:$src1, VR64:$src2, i16imm:$src3),
2680 "palignr\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2682 (int_x86_ssse3_palign_r
2683 VR64:$src1, VR64:$src2,
2685 def PALIGNR64rm : SS3AI<0x0F, MRMSrcReg, (outs VR64:$dst),
2686 (ins VR64:$src1, i64mem:$src2, i16imm:$src3),
2687 "palignr\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2689 (int_x86_ssse3_palign_r
2691 (bitconvert (memopv2i32 addr:$src2)),
2694 def PALIGNR128rr : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
2695 (ins VR128:$src1, VR128:$src2, i32imm:$src3),
2696 "palignr\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2698 (int_x86_ssse3_palign_r_128
2699 VR128:$src1, VR128:$src2,
2700 imm:$src3))]>, OpSize;
2701 def PALIGNR128rm : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
2702 (ins VR128:$src1, i128mem:$src2, i32imm:$src3),
2703 "palignr\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2705 (int_x86_ssse3_palign_r_128
2707 (bitconvert (memopv4i32 addr:$src2)),
2708 imm:$src3))]>, OpSize;
2711 //===----------------------------------------------------------------------===//
2712 // Non-Instruction Patterns
2713 //===----------------------------------------------------------------------===//
2715 // extload f32 -> f64. This matches load+fextend because we have a hack in
2716 // the isel (PreprocessForFPConvert) that can introduce loads after dag combine.
2717 // Since these loads aren't folded into the fextend, we have to match it
2719 let Predicates = [HasSSE2] in
2720 def : Pat<(fextend (loadf32 addr:$src)),
2721 (CVTSS2SDrm addr:$src)>;
2724 let Predicates = [HasSSE2] in {
2725 def : Pat<(v2i64 (bitconvert (v4i32 VR128:$src))), (v2i64 VR128:$src)>;
2726 def : Pat<(v2i64 (bitconvert (v8i16 VR128:$src))), (v2i64 VR128:$src)>;
2727 def : Pat<(v2i64 (bitconvert (v16i8 VR128:$src))), (v2i64 VR128:$src)>;
2728 def : Pat<(v2i64 (bitconvert (v2f64 VR128:$src))), (v2i64 VR128:$src)>;
2729 def : Pat<(v2i64 (bitconvert (v4f32 VR128:$src))), (v2i64 VR128:$src)>;
2730 def : Pat<(v4i32 (bitconvert (v2i64 VR128:$src))), (v4i32 VR128:$src)>;
2731 def : Pat<(v4i32 (bitconvert (v8i16 VR128:$src))), (v4i32 VR128:$src)>;
2732 def : Pat<(v4i32 (bitconvert (v16i8 VR128:$src))), (v4i32 VR128:$src)>;
2733 def : Pat<(v4i32 (bitconvert (v2f64 VR128:$src))), (v4i32 VR128:$src)>;
2734 def : Pat<(v4i32 (bitconvert (v4f32 VR128:$src))), (v4i32 VR128:$src)>;
2735 def : Pat<(v8i16 (bitconvert (v2i64 VR128:$src))), (v8i16 VR128:$src)>;
2736 def : Pat<(v8i16 (bitconvert (v4i32 VR128:$src))), (v8i16 VR128:$src)>;
2737 def : Pat<(v8i16 (bitconvert (v16i8 VR128:$src))), (v8i16 VR128:$src)>;
2738 def : Pat<(v8i16 (bitconvert (v2f64 VR128:$src))), (v8i16 VR128:$src)>;
2739 def : Pat<(v8i16 (bitconvert (v4f32 VR128:$src))), (v8i16 VR128:$src)>;
2740 def : Pat<(v16i8 (bitconvert (v2i64 VR128:$src))), (v16i8 VR128:$src)>;
2741 def : Pat<(v16i8 (bitconvert (v4i32 VR128:$src))), (v16i8 VR128:$src)>;
2742 def : Pat<(v16i8 (bitconvert (v8i16 VR128:$src))), (v16i8 VR128:$src)>;
2743 def : Pat<(v16i8 (bitconvert (v2f64 VR128:$src))), (v16i8 VR128:$src)>;
2744 def : Pat<(v16i8 (bitconvert (v4f32 VR128:$src))), (v16i8 VR128:$src)>;
2745 def : Pat<(v4f32 (bitconvert (v2i64 VR128:$src))), (v4f32 VR128:$src)>;
2746 def : Pat<(v4f32 (bitconvert (v4i32 VR128:$src))), (v4f32 VR128:$src)>;
2747 def : Pat<(v4f32 (bitconvert (v8i16 VR128:$src))), (v4f32 VR128:$src)>;
2748 def : Pat<(v4f32 (bitconvert (v16i8 VR128:$src))), (v4f32 VR128:$src)>;
2749 def : Pat<(v4f32 (bitconvert (v2f64 VR128:$src))), (v4f32 VR128:$src)>;
2750 def : Pat<(v2f64 (bitconvert (v2i64 VR128:$src))), (v2f64 VR128:$src)>;
2751 def : Pat<(v2f64 (bitconvert (v4i32 VR128:$src))), (v2f64 VR128:$src)>;
2752 def : Pat<(v2f64 (bitconvert (v8i16 VR128:$src))), (v2f64 VR128:$src)>;
2753 def : Pat<(v2f64 (bitconvert (v16i8 VR128:$src))), (v2f64 VR128:$src)>;
2754 def : Pat<(v2f64 (bitconvert (v4f32 VR128:$src))), (v2f64 VR128:$src)>;
2757 // Move scalar to XMM zero-extended
2758 // movd to XMM register zero-extends
2759 let AddedComplexity = 15 in {
2760 // Zeroing a VR128 then do a MOVS{S|D} to the lower bits.
2761 def : Pat<(v2f64 (X86zvmovl (v2f64 (scalar_to_vector FR64:$src)))),
2762 (MOVLSD2PDrr (V_SET0), FR64:$src)>, Requires<[HasSSE2]>;
2763 def : Pat<(v4f32 (X86zvmovl (v4f32 (scalar_to_vector FR32:$src)))),
2764 (MOVLSS2PSrr (V_SET0), FR32:$src)>, Requires<[HasSSE2]>;
2767 // Splat v2f64 / v2i64
2768 let AddedComplexity = 10 in {
2769 def : Pat<(vector_shuffle (v2f64 VR128:$src), (undef), SSE_splat_lo_mask:$sm),
2770 (UNPCKLPDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2771 def : Pat<(vector_shuffle (v2f64 VR128:$src), (undef), UNPCKH_shuffle_mask:$sm),
2772 (UNPCKHPDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2773 def : Pat<(vector_shuffle (v2i64 VR128:$src), (undef), SSE_splat_lo_mask:$sm),
2774 (PUNPCKLQDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2775 def : Pat<(vector_shuffle (v2i64 VR128:$src), (undef), UNPCKH_shuffle_mask:$sm),
2776 (PUNPCKHQDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2779 // Special unary SHUFPSrri case.
2780 def : Pat<(v4f32 (vector_shuffle VR128:$src1, (undef),
2781 SHUFP_unary_shuffle_mask:$sm)),
2782 (SHUFPSrri VR128:$src1, VR128:$src1, SHUFP_unary_shuffle_mask:$sm)>,
2783 Requires<[HasSSE1]>;
2784 // Special unary SHUFPDrri case.
2785 def : Pat<(v2f64 (vector_shuffle VR128:$src1, (undef),
2786 SHUFP_unary_shuffle_mask:$sm)),
2787 (SHUFPDrri VR128:$src1, VR128:$src1, SHUFP_unary_shuffle_mask:$sm)>,
2788 Requires<[HasSSE2]>;
2789 // Unary v4f32 shuffle with PSHUF* in order to fold a load.
2790 def : Pat<(vector_shuffle (bc_v4i32 (memopv4f32 addr:$src1)), (undef),
2791 SHUFP_unary_shuffle_mask:$sm),
2792 (PSHUFDmi addr:$src1, SHUFP_unary_shuffle_mask:$sm)>,
2793 Requires<[HasSSE2]>;
2794 // Special binary v4i32 shuffle cases with SHUFPS.
2795 def : Pat<(v4i32 (vector_shuffle VR128:$src1, (v4i32 VR128:$src2),
2796 PSHUFD_binary_shuffle_mask:$sm)),
2797 (SHUFPSrri VR128:$src1, VR128:$src2, PSHUFD_binary_shuffle_mask:$sm)>,
2798 Requires<[HasSSE2]>;
2799 def : Pat<(v4i32 (vector_shuffle VR128:$src1,
2800 (bc_v4i32 (memopv2i64 addr:$src2)), PSHUFD_binary_shuffle_mask:$sm)),
2801 (SHUFPSrmi VR128:$src1, addr:$src2, PSHUFD_binary_shuffle_mask:$sm)>,
2802 Requires<[HasSSE2]>;
2803 // Special binary v2i64 shuffle cases using SHUFPDrri.
2804 def : Pat<(v2i64 (vector_shuffle VR128:$src1, VR128:$src2,
2805 SHUFP_shuffle_mask:$sm)),
2806 (SHUFPDrri VR128:$src1, VR128:$src2, SHUFP_shuffle_mask:$sm)>,
2807 Requires<[HasSSE2]>;
2808 // Special unary SHUFPDrri case.
2809 def : Pat<(v2i64 (vector_shuffle VR128:$src1, (undef),
2810 SHUFP_unary_shuffle_mask:$sm)),
2811 (SHUFPDrri VR128:$src1, VR128:$src1, SHUFP_unary_shuffle_mask:$sm)>,
2812 Requires<[HasSSE2]>;
2814 // vector_shuffle v1, <undef>, <0, 0, 1, 1, ...>
2815 let AddedComplexity = 10 in {
2816 def : Pat<(v4f32 (vector_shuffle VR128:$src, (undef),
2817 UNPCKL_v_undef_shuffle_mask)),
2818 (UNPCKLPSrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2819 def : Pat<(v16i8 (vector_shuffle VR128:$src, (undef),
2820 UNPCKL_v_undef_shuffle_mask)),
2821 (PUNPCKLBWrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2822 def : Pat<(v8i16 (vector_shuffle VR128:$src, (undef),
2823 UNPCKL_v_undef_shuffle_mask)),
2824 (PUNPCKLWDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2825 def : Pat<(v4i32 (vector_shuffle VR128:$src, (undef),
2826 UNPCKL_v_undef_shuffle_mask)),
2827 (PUNPCKLDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE1]>;
2830 // vector_shuffle v1, <undef>, <2, 2, 3, 3, ...>
2831 let AddedComplexity = 10 in {
2832 def : Pat<(v4f32 (vector_shuffle VR128:$src, (undef),
2833 UNPCKH_v_undef_shuffle_mask)),
2834 (UNPCKHPSrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2835 def : Pat<(v16i8 (vector_shuffle VR128:$src, (undef),
2836 UNPCKH_v_undef_shuffle_mask)),
2837 (PUNPCKHBWrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2838 def : Pat<(v8i16 (vector_shuffle VR128:$src, (undef),
2839 UNPCKH_v_undef_shuffle_mask)),
2840 (PUNPCKHWDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2841 def : Pat<(v4i32 (vector_shuffle VR128:$src, (undef),
2842 UNPCKH_v_undef_shuffle_mask)),
2843 (PUNPCKHDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE1]>;
2846 let AddedComplexity = 15 in {
2847 // vector_shuffle v1, v2 <0, 1, 4, 5> using MOVLHPS
2848 def : Pat<(v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
2849 MOVHP_shuffle_mask)),
2850 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
2852 // vector_shuffle v1, v2 <6, 7, 2, 3> using MOVHLPS
2853 def : Pat<(v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
2854 MOVHLPS_shuffle_mask)),
2855 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
2857 // vector_shuffle v1, undef <2, ?, ?, ?> using MOVHLPS
2858 def : Pat<(v4f32 (vector_shuffle VR128:$src1, (undef),
2859 MOVHLPS_v_undef_shuffle_mask)),
2860 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
2861 def : Pat<(v4i32 (vector_shuffle VR128:$src1, (undef),
2862 MOVHLPS_v_undef_shuffle_mask)),
2863 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
2866 let AddedComplexity = 20 in {
2867 // vector_shuffle v1, (load v2) <4, 5, 2, 3> using MOVLPS
2868 // vector_shuffle v1, (load v2) <0, 1, 4, 5> using MOVHPS
2869 def : Pat<(v4f32 (vector_shuffle VR128:$src1, (memopv4f32 addr:$src2),
2870 MOVLP_shuffle_mask)),
2871 (MOVLPSrm VR128:$src1, addr:$src2)>, Requires<[HasSSE1]>;
2872 def : Pat<(v2f64 (vector_shuffle VR128:$src1, (memopv2f64 addr:$src2),
2873 MOVLP_shuffle_mask)),
2874 (MOVLPDrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
2875 def : Pat<(v4f32 (vector_shuffle VR128:$src1, (memopv4f32 addr:$src2),
2876 MOVHP_shuffle_mask)),
2877 (MOVHPSrm VR128:$src1, addr:$src2)>, Requires<[HasSSE1]>;
2878 def : Pat<(v2f64 (vector_shuffle VR128:$src1, (memopv2f64 addr:$src2),
2879 MOVHP_shuffle_mask)),
2880 (MOVHPDrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
2882 def : Pat<(v4i32 (vector_shuffle VR128:$src1, (bc_v4i32 (memopv2i64 addr:$src2)),
2883 MOVLP_shuffle_mask)),
2884 (MOVLPSrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
2885 def : Pat<(v2i64 (vector_shuffle VR128:$src1, (memopv2i64 addr:$src2),
2886 MOVLP_shuffle_mask)),
2887 (MOVLPDrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
2888 def : Pat<(v4i32 (vector_shuffle VR128:$src1, (bc_v4i32 (memopv2i64 addr:$src2)),
2889 MOVHP_shuffle_mask)),
2890 (MOVHPSrm VR128:$src1, addr:$src2)>, Requires<[HasSSE1]>;
2891 def : Pat<(v2i64 (vector_shuffle VR128:$src1, (memopv2i64 addr:$src2),
2892 MOVLP_shuffle_mask)),
2893 (MOVLPDrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
2896 let AddedComplexity = 15 in {
2897 // Setting the lowest element in the vector.
2898 def : Pat<(v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
2899 MOVL_shuffle_mask)),
2900 (MOVLPSrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
2901 def : Pat<(v2i64 (vector_shuffle VR128:$src1, VR128:$src2,
2902 MOVL_shuffle_mask)),
2903 (MOVLPDrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
2905 // vector_shuffle v1, v2 <4, 5, 2, 3> using MOVLPDrr (movsd)
2906 def : Pat<(v4f32 (vector_shuffle VR128:$src1, VR128:$src2,
2907 MOVLP_shuffle_mask)),
2908 (MOVLPDrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
2909 def : Pat<(v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
2910 MOVLP_shuffle_mask)),
2911 (MOVLPDrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
2914 // Set lowest element and zero upper elements.
2915 let AddedComplexity = 15 in
2916 def : Pat<(v2f64 (vector_shuffle immAllZerosV_bc, VR128:$src,
2917 MOVL_shuffle_mask)),
2918 (MOVZPQILo2PQIrr VR128:$src)>, Requires<[HasSSE2]>;
2919 def : Pat<(v2f64 (X86zvmovl (v2f64 VR128:$src))),
2920 (MOVZPQILo2PQIrr VR128:$src)>, Requires<[HasSSE2]>;
2922 // FIXME: Temporary workaround since 2-wide shuffle is broken.
2923 def : Pat<(int_x86_sse2_movs_d VR128:$src1, VR128:$src2),
2924 (v2f64 (MOVLPDrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>;
2925 def : Pat<(int_x86_sse2_loadh_pd VR128:$src1, addr:$src2),
2926 (v2f64 (MOVHPDrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>;
2927 def : Pat<(int_x86_sse2_loadl_pd VR128:$src1, addr:$src2),
2928 (v2f64 (MOVLPDrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>;
2929 def : Pat<(int_x86_sse2_shuf_pd VR128:$src1, VR128:$src2, imm:$src3),
2930 (v2f64 (SHUFPDrri VR128:$src1, VR128:$src2, imm:$src3))>,
2931 Requires<[HasSSE2]>;
2932 def : Pat<(int_x86_sse2_shuf_pd VR128:$src1, (load addr:$src2), imm:$src3),
2933 (v2f64 (SHUFPDrmi VR128:$src1, addr:$src2, imm:$src3))>,
2934 Requires<[HasSSE2]>;
2935 def : Pat<(int_x86_sse2_unpckh_pd VR128:$src1, VR128:$src2),
2936 (v2f64 (UNPCKHPDrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>;
2937 def : Pat<(int_x86_sse2_unpckh_pd VR128:$src1, (load addr:$src2)),
2938 (v2f64 (UNPCKHPDrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>;
2939 def : Pat<(int_x86_sse2_unpckl_pd VR128:$src1, VR128:$src2),
2940 (v2f64 (UNPCKLPDrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>;
2941 def : Pat<(int_x86_sse2_unpckl_pd VR128:$src1, (load addr:$src2)),
2942 (v2f64 (UNPCKLPDrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>;
2943 def : Pat<(int_x86_sse2_punpckh_qdq VR128:$src1, VR128:$src2),
2944 (v2i64 (PUNPCKHQDQrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>;
2945 def : Pat<(int_x86_sse2_punpckh_qdq VR128:$src1, (load addr:$src2)),
2946 (v2i64 (PUNPCKHQDQrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>;
2947 def : Pat<(int_x86_sse2_punpckl_qdq VR128:$src1, VR128:$src2),
2948 (v2i64 (PUNPCKLQDQrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>;
2949 def : Pat<(int_x86_sse2_punpckl_qdq VR128:$src1, (load addr:$src2)),
2950 (PUNPCKLQDQrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
2952 // Some special case pandn patterns.
2953 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v4i32 immAllOnesV))),
2955 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
2956 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v8i16 immAllOnesV))),
2958 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
2959 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v16i8 immAllOnesV))),
2961 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
2963 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v4i32 immAllOnesV))),
2964 (memopv2i64 addr:$src2))),
2965 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
2966 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v8i16 immAllOnesV))),
2967 (memopv2i64 addr:$src2))),
2968 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
2969 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v16i8 immAllOnesV))),
2970 (memopv2i64 addr:$src2))),
2971 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
2973 // vector -> vector casts
2974 def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
2975 (Int_CVTDQ2PSrr VR128:$src)>, Requires<[HasSSE2]>;
2976 def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
2977 (Int_CVTTPS2DQrr VR128:$src)>, Requires<[HasSSE2]>;
2979 // Use movaps / movups for SSE integer load / store (one byte shorter).
2980 def : Pat<(alignedloadv4i32 addr:$src),
2981 (MOVAPSrm addr:$src)>, Requires<[HasSSE1]>;
2982 def : Pat<(loadv4i32 addr:$src),
2983 (MOVUPSrm addr:$src)>, Requires<[HasSSE1]>;
2984 def : Pat<(alignedloadv2i64 addr:$src),
2985 (MOVAPSrm addr:$src)>, Requires<[HasSSE2]>;
2986 def : Pat<(loadv2i64 addr:$src),
2987 (MOVUPSrm addr:$src)>, Requires<[HasSSE2]>;
2989 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
2990 (MOVAPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
2991 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
2992 (MOVAPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
2993 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
2994 (MOVAPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
2995 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
2996 (MOVAPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
2997 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
2998 (MOVUPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
2999 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
3000 (MOVUPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
3001 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
3002 (MOVUPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
3003 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
3004 (MOVUPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
3006 //===----------------------------------------------------------------------===//
3007 // SSE4.1 Instructions
3008 //===----------------------------------------------------------------------===//
3010 multiclass sse41_fp_unop_rm<bits<8> opcss, bits<8> opcps,
3011 bits<8> opcsd, bits<8> opcpd,
3016 Intrinsic V2F64Int> {
3017 // Intrinsic operation, reg.
3018 def SSr_Int : SS4AIi8<opcss, MRMSrcReg,
3019 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
3020 !strconcat(OpcodeStr,
3021 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3022 [(set VR128:$dst, (F32Int VR128:$src1, imm:$src2))]>,
3025 // Intrinsic operation, mem.
3026 def SSm_Int : SS4AIi8<opcss, MRMSrcMem,
3027 (outs VR128:$dst), (ins ssmem:$src1, i32i8imm:$src2),
3028 !strconcat(OpcodeStr,
3029 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3030 [(set VR128:$dst, (F32Int sse_load_f32:$src1, imm:$src2))]>,
3033 // Vector intrinsic operation, reg
3034 def PSr_Int : SS4AIi8<opcps, MRMSrcReg,
3035 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
3036 !strconcat(OpcodeStr,
3037 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3038 [(set VR128:$dst, (V4F32Int VR128:$src1, imm:$src2))]>,
3041 // Vector intrinsic operation, mem
3042 def PSm_Int : SS4AIi8<opcps, MRMSrcMem,
3043 (outs VR128:$dst), (ins f128mem:$src1, i32i8imm:$src2),
3044 !strconcat(OpcodeStr,
3045 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3046 [(set VR128:$dst, (V4F32Int (load addr:$src1),imm:$src2))]>,
3049 // Intrinsic operation, reg.
3050 def SDr_Int : SS4AIi8<opcsd, MRMSrcReg,
3051 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
3052 !strconcat(OpcodeStr,
3053 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3054 [(set VR128:$dst, (F64Int VR128:$src1, imm:$src2))]>,
3057 // Intrinsic operation, mem.
3058 def SDm_Int : SS4AIi8<opcsd, MRMSrcMem,
3059 (outs VR128:$dst), (ins sdmem:$src1, i32i8imm:$src2),
3060 !strconcat(OpcodeStr,
3061 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3062 [(set VR128:$dst, (F64Int sse_load_f64:$src1, imm:$src2))]>,
3065 // Vector intrinsic operation, reg
3066 def PDr_Int : SS4AIi8<opcpd, MRMSrcReg,
3067 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
3068 !strconcat(OpcodeStr,
3069 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3070 [(set VR128:$dst, (V2F64Int VR128:$src1, imm:$src2))]>,
3073 // Vector intrinsic operation, mem
3074 def PDm_Int : SS4AIi8<opcpd, MRMSrcMem,
3075 (outs VR128:$dst), (ins f128mem:$src1, i32i8imm:$src2),
3076 !strconcat(OpcodeStr,
3077 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3078 [(set VR128:$dst, (V2F64Int (load addr:$src1),imm:$src2))]>,
3082 // FP round - roundss, roundps, roundsd, roundpd
3083 defm ROUND : sse41_fp_unop_rm<0x0A, 0x08, 0x0B, 0x09, "round",
3084 int_x86_sse41_round_ss, int_x86_sse41_round_ps,
3085 int_x86_sse41_round_sd, int_x86_sse41_round_pd>;
3087 // SS41I_unop_rm_int_v16 - SSE 4.1 unary operator whose type is v8i16.
3088 multiclass SS41I_unop_rm_int_v16<bits<8> opc, string OpcodeStr,
3089 Intrinsic IntId128> {
3090 def rr128 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
3092 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3093 [(set VR128:$dst, (IntId128 VR128:$src))]>, OpSize;
3094 def rm128 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
3096 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3099 (bitconvert (memopv8i16 addr:$src))))]>, OpSize;
3102 defm PHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "phminposuw",
3103 int_x86_sse41_phminposuw>;
3105 /// SS41I_binop_rm_int - Simple SSE 4.1 binary operator
3106 let Constraints = "$src1 = $dst" in {
3107 multiclass SS41I_binop_rm_int<bits<8> opc, string OpcodeStr,
3108 Intrinsic IntId128, bit Commutable = 0> {
3109 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
3110 (ins VR128:$src1, VR128:$src2),
3111 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3112 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
3114 let isCommutable = Commutable;
3116 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
3117 (ins VR128:$src1, i128mem:$src2),
3118 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3120 (IntId128 VR128:$src1,
3121 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
3125 defm PCMPEQQ : SS41I_binop_rm_int<0x29, "pcmpeqq",
3126 int_x86_sse41_pcmpeqq, 1>;
3127 defm PACKUSDW : SS41I_binop_rm_int<0x2B, "packusdw",
3128 int_x86_sse41_packusdw, 0>;
3129 defm PMINSB : SS41I_binop_rm_int<0x38, "pminsb",
3130 int_x86_sse41_pminsb, 1>;
3131 defm PMINSD : SS41I_binop_rm_int<0x39, "pminsd",
3132 int_x86_sse41_pminsd, 1>;
3133 defm PMINUD : SS41I_binop_rm_int<0x3B, "pminud",
3134 int_x86_sse41_pminud, 1>;
3135 defm PMINUW : SS41I_binop_rm_int<0x3A, "pminuw",
3136 int_x86_sse41_pminuw, 1>;
3137 defm PMAXSB : SS41I_binop_rm_int<0x3C, "pmaxsb",
3138 int_x86_sse41_pmaxsb, 1>;
3139 defm PMAXSD : SS41I_binop_rm_int<0x3D, "pmaxsd",
3140 int_x86_sse41_pmaxsd, 1>;
3141 defm PMAXUD : SS41I_binop_rm_int<0x3F, "pmaxud",
3142 int_x86_sse41_pmaxud, 1>;
3143 defm PMAXUW : SS41I_binop_rm_int<0x3E, "pmaxuw",
3144 int_x86_sse41_pmaxuw, 1>;
3145 defm PMULDQ : SS41I_binop_rm_int<0x28, "pmuldq",
3146 int_x86_sse41_pmuldq, 1>;
3149 /// SS41I_binop_rm_int - Simple SSE 4.1 binary operator
3150 let Constraints = "$src1 = $dst" in {
3151 multiclass SS41I_binop_patint<bits<8> opc, string OpcodeStr, SDNode OpNode,
3152 Intrinsic IntId128, bit Commutable = 0> {
3153 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
3154 (ins VR128:$src1, VR128:$src2),
3155 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3156 [(set VR128:$dst, (OpNode (v4i32 VR128:$src1),
3157 VR128:$src2))]>, OpSize {
3158 let isCommutable = Commutable;
3160 def rr_int : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
3161 (ins VR128:$src1, VR128:$src2),
3162 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3163 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
3165 let isCommutable = Commutable;
3167 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
3168 (ins VR128:$src1, i128mem:$src2),
3169 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3171 (OpNode VR128:$src1, (memopv4i32 addr:$src2)))]>, OpSize;
3172 def rm_int : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
3173 (ins VR128:$src1, i128mem:$src2),
3174 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3176 (IntId128 VR128:$src1, (memopv4i32 addr:$src2)))]>,
3180 defm PMULLD : SS41I_binop_patint<0x40, "pmulld", mul,
3181 int_x86_sse41_pmulld, 1>;
3184 /// SS41I_binop_rmi_int - SSE 4.1 binary operator with 8-bit immediate
3185 let Constraints = "$src1 = $dst" in {
3186 multiclass SS41I_binop_rmi_int<bits<8> opc, string OpcodeStr,
3187 Intrinsic IntId128, bit Commutable = 0> {
3188 def rri : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
3189 (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
3190 !strconcat(OpcodeStr,
3191 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3193 (IntId128 VR128:$src1, VR128:$src2, imm:$src3))]>,
3195 let isCommutable = Commutable;
3197 def rmi : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
3198 (ins VR128:$src1, i128mem:$src2, i32i8imm:$src3),
3199 !strconcat(OpcodeStr,
3200 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3202 (IntId128 VR128:$src1,
3203 (bitconvert (memopv16i8 addr:$src2)), imm:$src3))]>,
3208 defm BLENDPS : SS41I_binop_rmi_int<0x0C, "blendps",
3209 int_x86_sse41_blendps, 0>;
3210 defm BLENDPD : SS41I_binop_rmi_int<0x0D, "blendpd",
3211 int_x86_sse41_blendpd, 0>;
3212 defm PBLENDW : SS41I_binop_rmi_int<0x0E, "pblendw",
3213 int_x86_sse41_pblendw, 0>;
3214 defm DPPS : SS41I_binop_rmi_int<0x40, "dpps",
3215 int_x86_sse41_dpps, 1>;
3216 defm DPPD : SS41I_binop_rmi_int<0x41, "dppd",
3217 int_x86_sse41_dppd, 1>;
3218 defm MPSADBW : SS41I_binop_rmi_int<0x42, "mpsadbw",
3219 int_x86_sse41_mpsadbw, 0>;
3222 /// SS41I_ternary_int - SSE 4.1 ternary operator
3223 let Uses = [XMM0], Constraints = "$src1 = $dst" in {
3224 multiclass SS41I_ternary_int<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
3225 def rr0 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
3226 (ins VR128:$src1, VR128:$src2),
3227 !strconcat(OpcodeStr,
3228 "\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}"),
3229 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2, XMM0))]>,
3232 def rm0 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
3233 (ins VR128:$src1, i128mem:$src2),
3234 !strconcat(OpcodeStr,
3235 "\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}"),
3238 (bitconvert (memopv16i8 addr:$src2)), XMM0))]>, OpSize;
3242 defm BLENDVPD : SS41I_ternary_int<0x15, "blendvpd", int_x86_sse41_blendvpd>;
3243 defm BLENDVPS : SS41I_ternary_int<0x14, "blendvps", int_x86_sse41_blendvps>;
3244 defm PBLENDVB : SS41I_ternary_int<0x10, "pblendvb", int_x86_sse41_pblendvb>;
3247 multiclass SS41I_binop_rm_int8<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
3248 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3249 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3250 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
3252 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3253 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3255 (IntId (bitconvert (v4i32 (load addr:$src)))))]>, OpSize;
3258 defm PMOVSXBW : SS41I_binop_rm_int8<0x20, "pmovsxbw", int_x86_sse41_pmovsxbw>;
3259 defm PMOVSXWD : SS41I_binop_rm_int8<0x23, "pmovsxwd", int_x86_sse41_pmovsxwd>;
3260 defm PMOVSXDQ : SS41I_binop_rm_int8<0x25, "pmovsxdq", int_x86_sse41_pmovsxdq>;
3261 defm PMOVZXBW : SS41I_binop_rm_int8<0x30, "pmovzxbw", int_x86_sse41_pmovzxbw>;
3262 defm PMOVZXWD : SS41I_binop_rm_int8<0x33, "pmovzxwd", int_x86_sse41_pmovzxwd>;
3263 defm PMOVZXDQ : SS41I_binop_rm_int8<0x35, "pmovzxdq", int_x86_sse41_pmovzxdq>;
3265 multiclass SS41I_binop_rm_int4<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
3266 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3267 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3268 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
3270 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
3271 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3273 (IntId (bitconvert (v4i32 (load addr:$src)))))]>, OpSize;
3276 defm PMOVSXBD : SS41I_binop_rm_int4<0x21, "pmovsxbd", int_x86_sse41_pmovsxbd>;
3277 defm PMOVSXWQ : SS41I_binop_rm_int4<0x24, "pmovsxwq", int_x86_sse41_pmovsxwq>;
3278 defm PMOVZXBD : SS41I_binop_rm_int4<0x31, "pmovzxbd", int_x86_sse41_pmovzxbd>;
3279 defm PMOVZXWQ : SS41I_binop_rm_int4<0x34, "pmovzxwq", int_x86_sse41_pmovzxwq>;
3281 multiclass SS41I_binop_rm_int2<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
3282 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3283 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3284 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
3286 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i16mem:$src),
3287 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3289 (IntId (bitconvert (v4i32 (load addr:$src)))))]>, OpSize;
3292 defm PMOVSXBQ : SS41I_binop_rm_int2<0x22, "pmovsxbq", int_x86_sse41_pmovsxbq>;
3293 defm PMOVZXBQ : SS41I_binop_rm_int2<0x32, "pmovsxbq", int_x86_sse41_pmovzxbq>;
3296 /// SS41I_binop_ext8 - SSE 4.1 extract 8 bits to 32 bit reg or 8 bit mem
3297 multiclass SS41I_extract8<bits<8> opc, string OpcodeStr> {
3298 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
3299 (ins VR128:$src1, i32i8imm:$src2),
3300 !strconcat(OpcodeStr,
3301 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3302 [(set GR32:$dst, (X86pextrb (v16i8 VR128:$src1), imm:$src2))]>,
3304 def mr : SS4AIi8<opc, MRMDestMem, (outs),
3305 (ins i8mem:$dst, VR128:$src1, i32i8imm:$src2),
3306 !strconcat(OpcodeStr,
3307 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3310 // There's an AssertZext in the way of writing the store pattern
3311 // (store (i8 (trunc (X86pextrb (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
3314 defm PEXTRB : SS41I_extract8<0x14, "pextrb">;
3317 /// SS41I_extract16 - SSE 4.1 extract 16 bits to memory destination
3318 multiclass SS41I_extract16<bits<8> opc, string OpcodeStr> {
3319 def mr : SS4AIi8<opc, MRMDestMem, (outs),
3320 (ins i16mem:$dst, VR128:$src1, i32i8imm:$src2),
3321 !strconcat(OpcodeStr,
3322 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3325 // There's an AssertZext in the way of writing the store pattern
3326 // (store (i16 (trunc (X86pextrw (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
3329 defm PEXTRW : SS41I_extract16<0x15, "pextrw">;
3332 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
3333 multiclass SS41I_extract32<bits<8> opc, string OpcodeStr> {
3334 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
3335 (ins VR128:$src1, i32i8imm:$src2),
3336 !strconcat(OpcodeStr,
3337 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3339 (extractelt (v4i32 VR128:$src1), imm:$src2))]>, OpSize;
3340 def mr : SS4AIi8<opc, MRMDestMem, (outs),
3341 (ins i32mem:$dst, VR128:$src1, i32i8imm:$src2),
3342 !strconcat(OpcodeStr,
3343 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3344 [(store (extractelt (v4i32 VR128:$src1), imm:$src2),
3345 addr:$dst)]>, OpSize;
3348 defm PEXTRD : SS41I_extract32<0x16, "pextrd">;
3351 /// SS41I_extractf32 - SSE 4.1 extract 32 bits fp value to int reg or memory
3353 multiclass SS41I_extractf32<bits<8> opc, string OpcodeStr> {
3354 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
3355 (ins VR128:$src1, i32i8imm:$src2),
3356 !strconcat(OpcodeStr,
3357 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3359 (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2))]>,
3361 def mr : SS4AIi8<opc, MRMDestMem, (outs),
3362 (ins f32mem:$dst, VR128:$src1, i32i8imm:$src2),
3363 !strconcat(OpcodeStr,
3364 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3365 [(store (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2),
3366 addr:$dst)]>, OpSize;
3369 defm EXTRACTPS : SS41I_extractf32<0x17, "extractps">;
3371 let Constraints = "$src1 = $dst" in {
3372 multiclass SS41I_insert8<bits<8> opc, string OpcodeStr> {
3373 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
3374 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
3375 !strconcat(OpcodeStr,
3376 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3378 (X86pinsrb VR128:$src1, GR32:$src2, imm:$src3))]>, OpSize;
3379 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
3380 (ins VR128:$src1, i8mem:$src2, i32i8imm:$src3),
3381 !strconcat(OpcodeStr,
3382 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3384 (X86pinsrb VR128:$src1, (extloadi8 addr:$src2),
3385 imm:$src3))]>, OpSize;
3389 defm PINSRB : SS41I_insert8<0x20, "pinsrb">;
3391 let Constraints = "$src1 = $dst" in {
3392 multiclass SS41I_insert32<bits<8> opc, string OpcodeStr> {
3393 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
3394 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
3395 !strconcat(OpcodeStr,
3396 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3398 (v4i32 (insertelt VR128:$src1, GR32:$src2, imm:$src3)))]>,
3400 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
3401 (ins VR128:$src1, i32mem:$src2, i32i8imm:$src3),
3402 !strconcat(OpcodeStr,
3403 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3405 (v4i32 (insertelt VR128:$src1, (loadi32 addr:$src2),
3406 imm:$src3)))]>, OpSize;
3410 defm PINSRD : SS41I_insert32<0x22, "pinsrd">;
3412 let Constraints = "$src1 = $dst" in {
3413 multiclass SS41I_insertf32<bits<8> opc, string OpcodeStr> {
3414 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
3415 (ins VR128:$src1, FR32:$src2, i32i8imm:$src3),
3416 !strconcat(OpcodeStr,
3417 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3419 (X86insrtps VR128:$src1, FR32:$src2, imm:$src3))]>, OpSize;
3420 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
3421 (ins VR128:$src1, f32mem:$src2, i32i8imm:$src3),
3422 !strconcat(OpcodeStr,
3423 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3425 (X86insrtps VR128:$src1, (loadf32 addr:$src2),
3426 imm:$src3))]>, OpSize;
3430 defm INSERTPS : SS41I_insertf32<0x21, "insertps">;
3432 let Defs = [EFLAGS] in {
3433 def PTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
3434 "ptest \t{$src2, $src1|$src1, $src2}", []>, OpSize;
3435 def PTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, i128mem:$src2),
3436 "ptest \t{$src2, $src1|$src1, $src2}", []>, OpSize;
3439 def MOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3440 "movntdqa\t{$src, $dst|$dst, $src}",
3441 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>;