1 //====- X86InstrSSE.td - Describe the X86 Instruction Set --*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86 SSE instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
17 //===----------------------------------------------------------------------===//
18 // SSE specific DAG Nodes.
19 //===----------------------------------------------------------------------===//
21 def SDTX86FPShiftOp : SDTypeProfile<1, 2, [ SDTCisSameAs<0, 1>,
22 SDTCisFP<0>, SDTCisInt<2> ]>;
24 def X86fmin : SDNode<"X86ISD::FMIN", SDTFPBinOp>;
25 def X86fmax : SDNode<"X86ISD::FMAX", SDTFPBinOp>;
26 def X86fand : SDNode<"X86ISD::FAND", SDTFPBinOp,
27 [SDNPCommutative, SDNPAssociative]>;
28 def X86for : SDNode<"X86ISD::FOR", SDTFPBinOp,
29 [SDNPCommutative, SDNPAssociative]>;
30 def X86fxor : SDNode<"X86ISD::FXOR", SDTFPBinOp,
31 [SDNPCommutative, SDNPAssociative]>;
32 def X86frsqrt : SDNode<"X86ISD::FRSQRT", SDTFPUnaryOp>;
33 def X86frcp : SDNode<"X86ISD::FRCP", SDTFPUnaryOp>;
34 def X86fsrl : SDNode<"X86ISD::FSRL", SDTX86FPShiftOp>;
35 def X86comi : SDNode<"X86ISD::COMI", SDTX86CmpTest>;
36 def X86ucomi : SDNode<"X86ISD::UCOMI", SDTX86CmpTest>;
37 def X86pextrb : SDNode<"X86ISD::PEXTRB",
38 SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
39 def X86pextrw : SDNode<"X86ISD::PEXTRW",
40 SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
41 def X86pinsrb : SDNode<"X86ISD::PINSRB",
42 SDTypeProfile<1, 3, [SDTCisVT<0, v16i8>, SDTCisSameAs<0,1>,
43 SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
44 def X86pinsrw : SDNode<"X86ISD::PINSRW",
45 SDTypeProfile<1, 3, [SDTCisVT<0, v8i16>, SDTCisSameAs<0,1>,
46 SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
47 def X86insrtps : SDNode<"X86ISD::INSERTPS",
48 SDTypeProfile<1, 3, [SDTCisVT<0, v4f32>, SDTCisSameAs<0,1>,
49 SDTCisVT<2, f32>, SDTCisPtrTy<3>]>>;
50 def X86vzmovl : SDNode<"X86ISD::VZEXT_MOVL",
51 SDTypeProfile<1, 1, [SDTCisSameAs<0,1>]>>;
52 def X86vzload : SDNode<"X86ISD::VZEXT_LOAD", SDTLoad,
53 [SDNPHasChain, SDNPMayLoad]>;
55 //===----------------------------------------------------------------------===//
56 // SSE Complex Patterns
57 //===----------------------------------------------------------------------===//
59 // These are 'extloads' from a scalar to the low element of a vector, zeroing
60 // the top elements. These are used for the SSE 'ss' and 'sd' instruction
62 def sse_load_f32 : ComplexPattern<v4f32, 4, "SelectScalarSSELoad", [],
63 [SDNPHasChain, SDNPMayLoad]>;
64 def sse_load_f64 : ComplexPattern<v2f64, 4, "SelectScalarSSELoad", [],
65 [SDNPHasChain, SDNPMayLoad]>;
67 def ssmem : Operand<v4f32> {
68 let PrintMethod = "printf32mem";
69 let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc, i32imm);
71 def sdmem : Operand<v2f64> {
72 let PrintMethod = "printf64mem";
73 let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc, i32imm);
76 //===----------------------------------------------------------------------===//
77 // SSE pattern fragments
78 //===----------------------------------------------------------------------===//
80 def loadv4f32 : PatFrag<(ops node:$ptr), (v4f32 (load node:$ptr))>;
81 def loadv2f64 : PatFrag<(ops node:$ptr), (v2f64 (load node:$ptr))>;
82 def loadv4i32 : PatFrag<(ops node:$ptr), (v4i32 (load node:$ptr))>;
83 def loadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (load node:$ptr))>;
85 // Like 'store', but always requires vector alignment.
86 def alignedstore : PatFrag<(ops node:$val, node:$ptr),
87 (st node:$val, node:$ptr), [{
88 if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
89 return !ST->isTruncatingStore() &&
90 ST->getAddressingMode() == ISD::UNINDEXED &&
91 ST->getAlignment() >= 16;
95 // Like 'load', but always requires vector alignment.
96 def alignedload : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
97 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N))
98 return LD->getExtensionType() == ISD::NON_EXTLOAD &&
99 LD->getAddressingMode() == ISD::UNINDEXED &&
100 LD->getAlignment() >= 16;
104 def alignedloadfsf32 : PatFrag<(ops node:$ptr), (f32 (alignedload node:$ptr))>;
105 def alignedloadfsf64 : PatFrag<(ops node:$ptr), (f64 (alignedload node:$ptr))>;
106 def alignedloadv4f32 : PatFrag<(ops node:$ptr), (v4f32 (alignedload node:$ptr))>;
107 def alignedloadv2f64 : PatFrag<(ops node:$ptr), (v2f64 (alignedload node:$ptr))>;
108 def alignedloadv4i32 : PatFrag<(ops node:$ptr), (v4i32 (alignedload node:$ptr))>;
109 def alignedloadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (alignedload node:$ptr))>;
111 // Like 'load', but uses special alignment checks suitable for use in
112 // memory operands in most SSE instructions, which are required to
113 // be naturally aligned on some targets but not on others.
114 // FIXME: Actually implement support for targets that don't require the
115 // alignment. This probably wants a subtarget predicate.
116 def memop : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
117 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N))
118 return LD->getExtensionType() == ISD::NON_EXTLOAD &&
119 LD->getAddressingMode() == ISD::UNINDEXED &&
120 LD->getAlignment() >= 16;
124 def memopfsf32 : PatFrag<(ops node:$ptr), (f32 (memop node:$ptr))>;
125 def memopfsf64 : PatFrag<(ops node:$ptr), (f64 (memop node:$ptr))>;
126 def memopv4f32 : PatFrag<(ops node:$ptr), (v4f32 (memop node:$ptr))>;
127 def memopv2f64 : PatFrag<(ops node:$ptr), (v2f64 (memop node:$ptr))>;
128 def memopv4i32 : PatFrag<(ops node:$ptr), (v4i32 (memop node:$ptr))>;
129 def memopv2i64 : PatFrag<(ops node:$ptr), (v2i64 (memop node:$ptr))>;
130 def memopv16i8 : PatFrag<(ops node:$ptr), (v16i8 (memop node:$ptr))>;
132 // SSSE3 uses MMX registers for some instructions. They aren't aligned on a
134 // FIXME: 8 byte alignment for mmx reads is not required
135 def memop64 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
136 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N))
137 return LD->getExtensionType() == ISD::NON_EXTLOAD &&
138 LD->getAddressingMode() == ISD::UNINDEXED &&
139 LD->getAlignment() >= 8;
143 def memopv8i8 : PatFrag<(ops node:$ptr), (v8i8 (memop64 node:$ptr))>;
144 def memopv4i16 : PatFrag<(ops node:$ptr), (v4i16 (memop64 node:$ptr))>;
145 def memopv8i16 : PatFrag<(ops node:$ptr), (v8i16 (memop64 node:$ptr))>;
146 def memopv2i32 : PatFrag<(ops node:$ptr), (v2i32 (memop64 node:$ptr))>;
148 def bc_v4f32 : PatFrag<(ops node:$in), (v4f32 (bitconvert node:$in))>;
149 def bc_v2f64 : PatFrag<(ops node:$in), (v2f64 (bitconvert node:$in))>;
150 def bc_v16i8 : PatFrag<(ops node:$in), (v16i8 (bitconvert node:$in))>;
151 def bc_v8i16 : PatFrag<(ops node:$in), (v8i16 (bitconvert node:$in))>;
152 def bc_v4i32 : PatFrag<(ops node:$in), (v4i32 (bitconvert node:$in))>;
153 def bc_v2i64 : PatFrag<(ops node:$in), (v2i64 (bitconvert node:$in))>;
155 def fp32imm0 : PatLeaf<(f32 fpimm), [{
156 return N->isExactlyValue(+0.0);
159 def PSxLDQ_imm : SDNodeXForm<imm, [{
160 // Transformation function: imm >> 3
161 return getI32Imm(N->getValue() >> 3);
164 def SSE_CC_imm : SDNodeXForm<cond, [{
167 default: Val = 0; assert(0 && "Unexpected CondCode"); break;
168 case ISD::SETOEQ: Val = 0; break;
169 case ISD::SETOLT: Val = 1; break;
170 case ISD::SETOLE: Val = 2; break;
171 case ISD::SETUO: Val = 3; break;
172 case ISD::SETONE: Val = 4; break;
173 case ISD::SETOGE: Val = 5; break;
174 case ISD::SETOGT: Val = 6; break;
175 case ISD::SETO: Val = 7; break;
177 return getI8Imm(Val);
180 // SHUFFLE_get_shuf_imm xform function: convert vector_shuffle mask to PSHUF*,
182 def SHUFFLE_get_shuf_imm : SDNodeXForm<build_vector, [{
183 return getI8Imm(X86::getShuffleSHUFImmediate(N));
186 // SHUFFLE_get_pshufhw_imm xform function: convert vector_shuffle mask to
188 def SHUFFLE_get_pshufhw_imm : SDNodeXForm<build_vector, [{
189 return getI8Imm(X86::getShufflePSHUFHWImmediate(N));
192 // SHUFFLE_get_pshuflw_imm xform function: convert vector_shuffle mask to
194 def SHUFFLE_get_pshuflw_imm : SDNodeXForm<build_vector, [{
195 return getI8Imm(X86::getShufflePSHUFLWImmediate(N));
198 def SSE_splat_mask : PatLeaf<(build_vector), [{
199 return X86::isSplatMask(N);
200 }], SHUFFLE_get_shuf_imm>;
202 def SSE_splat_lo_mask : PatLeaf<(build_vector), [{
203 return X86::isSplatLoMask(N);
206 def MOVHLPS_shuffle_mask : PatLeaf<(build_vector), [{
207 return X86::isMOVHLPSMask(N);
210 def MOVHLPS_v_undef_shuffle_mask : PatLeaf<(build_vector), [{
211 return X86::isMOVHLPS_v_undef_Mask(N);
214 def MOVHP_shuffle_mask : PatLeaf<(build_vector), [{
215 return X86::isMOVHPMask(N);
218 def MOVLP_shuffle_mask : PatLeaf<(build_vector), [{
219 return X86::isMOVLPMask(N);
222 def MOVL_shuffle_mask : PatLeaf<(build_vector), [{
223 return X86::isMOVLMask(N);
226 def MOVSHDUP_shuffle_mask : PatLeaf<(build_vector), [{
227 return X86::isMOVSHDUPMask(N);
230 def MOVSLDUP_shuffle_mask : PatLeaf<(build_vector), [{
231 return X86::isMOVSLDUPMask(N);
234 def UNPCKL_shuffle_mask : PatLeaf<(build_vector), [{
235 return X86::isUNPCKLMask(N);
238 def UNPCKH_shuffle_mask : PatLeaf<(build_vector), [{
239 return X86::isUNPCKHMask(N);
242 def UNPCKL_v_undef_shuffle_mask : PatLeaf<(build_vector), [{
243 return X86::isUNPCKL_v_undef_Mask(N);
246 def UNPCKH_v_undef_shuffle_mask : PatLeaf<(build_vector), [{
247 return X86::isUNPCKH_v_undef_Mask(N);
250 def PSHUFD_shuffle_mask : PatLeaf<(build_vector), [{
251 return X86::isPSHUFDMask(N);
252 }], SHUFFLE_get_shuf_imm>;
254 def PSHUFHW_shuffle_mask : PatLeaf<(build_vector), [{
255 return X86::isPSHUFHWMask(N);
256 }], SHUFFLE_get_pshufhw_imm>;
258 def PSHUFLW_shuffle_mask : PatLeaf<(build_vector), [{
259 return X86::isPSHUFLWMask(N);
260 }], SHUFFLE_get_pshuflw_imm>;
262 def SHUFP_unary_shuffle_mask : PatLeaf<(build_vector), [{
263 return X86::isPSHUFDMask(N);
264 }], SHUFFLE_get_shuf_imm>;
266 def SHUFP_shuffle_mask : PatLeaf<(build_vector), [{
267 return X86::isSHUFPMask(N);
268 }], SHUFFLE_get_shuf_imm>;
270 def PSHUFD_binary_shuffle_mask : PatLeaf<(build_vector), [{
271 return X86::isSHUFPMask(N);
272 }], SHUFFLE_get_shuf_imm>;
275 //===----------------------------------------------------------------------===//
276 // SSE scalar FP Instructions
277 //===----------------------------------------------------------------------===//
279 // CMOV* - Used to implement the SSE SELECT DAG operation. Expanded by the
280 // scheduler into a branch sequence.
281 // These are expanded by the scheduler.
282 let Uses = [EFLAGS], usesCustomDAGSchedInserter = 1 in {
283 def CMOV_FR32 : I<0, Pseudo,
284 (outs FR32:$dst), (ins FR32:$t, FR32:$f, i8imm:$cond),
285 "#CMOV_FR32 PSEUDO!",
286 [(set FR32:$dst, (X86cmov FR32:$t, FR32:$f, imm:$cond,
288 def CMOV_FR64 : I<0, Pseudo,
289 (outs FR64:$dst), (ins FR64:$t, FR64:$f, i8imm:$cond),
290 "#CMOV_FR64 PSEUDO!",
291 [(set FR64:$dst, (X86cmov FR64:$t, FR64:$f, imm:$cond,
293 def CMOV_V4F32 : I<0, Pseudo,
294 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
295 "#CMOV_V4F32 PSEUDO!",
297 (v4f32 (X86cmov VR128:$t, VR128:$f, imm:$cond,
299 def CMOV_V2F64 : I<0, Pseudo,
300 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
301 "#CMOV_V2F64 PSEUDO!",
303 (v2f64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
305 def CMOV_V2I64 : I<0, Pseudo,
306 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
307 "#CMOV_V2I64 PSEUDO!",
309 (v2i64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
313 //===----------------------------------------------------------------------===//
315 //===----------------------------------------------------------------------===//
318 let neverHasSideEffects = 1 in
319 def MOVSSrr : SSI<0x10, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
320 "movss\t{$src, $dst|$dst, $src}", []>;
321 let isSimpleLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in
322 def MOVSSrm : SSI<0x10, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
323 "movss\t{$src, $dst|$dst, $src}",
324 [(set FR32:$dst, (loadf32 addr:$src))]>;
325 def MOVSSmr : SSI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
326 "movss\t{$src, $dst|$dst, $src}",
327 [(store FR32:$src, addr:$dst)]>;
329 // Conversion instructions
330 def CVTTSS2SIrr : SSI<0x2C, MRMSrcReg, (outs GR32:$dst), (ins FR32:$src),
331 "cvttss2si\t{$src, $dst|$dst, $src}",
332 [(set GR32:$dst, (fp_to_sint FR32:$src))]>;
333 def CVTTSS2SIrm : SSI<0x2C, MRMSrcMem, (outs GR32:$dst), (ins f32mem:$src),
334 "cvttss2si\t{$src, $dst|$dst, $src}",
335 [(set GR32:$dst, (fp_to_sint (loadf32 addr:$src)))]>;
336 def CVTSI2SSrr : SSI<0x2A, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
337 "cvtsi2ss\t{$src, $dst|$dst, $src}",
338 [(set FR32:$dst, (sint_to_fp GR32:$src))]>;
339 def CVTSI2SSrm : SSI<0x2A, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
340 "cvtsi2ss\t{$src, $dst|$dst, $src}",
341 [(set FR32:$dst, (sint_to_fp (loadi32 addr:$src)))]>;
343 // Match intrinsics which expect XMM operand(s).
344 def Int_CVTSS2SIrr : SSI<0x2D, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
345 "cvtss2si\t{$src, $dst|$dst, $src}",
346 [(set GR32:$dst, (int_x86_sse_cvtss2si VR128:$src))]>;
347 def Int_CVTSS2SIrm : SSI<0x2D, MRMSrcMem, (outs GR32:$dst), (ins f32mem:$src),
348 "cvtss2si\t{$src, $dst|$dst, $src}",
349 [(set GR32:$dst, (int_x86_sse_cvtss2si
350 (load addr:$src)))]>;
352 // Match intrinisics which expect MM and XMM operand(s).
353 def Int_CVTPS2PIrr : PSI<0x2D, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
354 "cvtps2pi\t{$src, $dst|$dst, $src}",
355 [(set VR64:$dst, (int_x86_sse_cvtps2pi VR128:$src))]>;
356 def Int_CVTPS2PIrm : PSI<0x2D, MRMSrcMem, (outs VR64:$dst), (ins f64mem:$src),
357 "cvtps2pi\t{$src, $dst|$dst, $src}",
358 [(set VR64:$dst, (int_x86_sse_cvtps2pi
359 (load addr:$src)))]>;
360 def Int_CVTTPS2PIrr: PSI<0x2C, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
361 "cvttps2pi\t{$src, $dst|$dst, $src}",
362 [(set VR64:$dst, (int_x86_sse_cvttps2pi VR128:$src))]>;
363 def Int_CVTTPS2PIrm: PSI<0x2C, MRMSrcMem, (outs VR64:$dst), (ins f64mem:$src),
364 "cvttps2pi\t{$src, $dst|$dst, $src}",
365 [(set VR64:$dst, (int_x86_sse_cvttps2pi
366 (load addr:$src)))]>;
367 let Constraints = "$src1 = $dst" in {
368 def Int_CVTPI2PSrr : PSI<0x2A, MRMSrcReg,
369 (outs VR128:$dst), (ins VR128:$src1, VR64:$src2),
370 "cvtpi2ps\t{$src2, $dst|$dst, $src2}",
371 [(set VR128:$dst, (int_x86_sse_cvtpi2ps VR128:$src1,
373 def Int_CVTPI2PSrm : PSI<0x2A, MRMSrcMem,
374 (outs VR128:$dst), (ins VR128:$src1, i64mem:$src2),
375 "cvtpi2ps\t{$src2, $dst|$dst, $src2}",
376 [(set VR128:$dst, (int_x86_sse_cvtpi2ps VR128:$src1,
377 (load addr:$src2)))]>;
380 // Aliases for intrinsics
381 def Int_CVTTSS2SIrr : SSI<0x2C, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
382 "cvttss2si\t{$src, $dst|$dst, $src}",
384 (int_x86_sse_cvttss2si VR128:$src))]>;
385 def Int_CVTTSS2SIrm : SSI<0x2C, MRMSrcMem, (outs GR32:$dst), (ins f32mem:$src),
386 "cvttss2si\t{$src, $dst|$dst, $src}",
388 (int_x86_sse_cvttss2si(load addr:$src)))]>;
390 let Constraints = "$src1 = $dst" in {
391 def Int_CVTSI2SSrr : SSI<0x2A, MRMSrcReg,
392 (outs VR128:$dst), (ins VR128:$src1, GR32:$src2),
393 "cvtsi2ss\t{$src2, $dst|$dst, $src2}",
394 [(set VR128:$dst, (int_x86_sse_cvtsi2ss VR128:$src1,
396 def Int_CVTSI2SSrm : SSI<0x2A, MRMSrcMem,
397 (outs VR128:$dst), (ins VR128:$src1, i32mem:$src2),
398 "cvtsi2ss\t{$src2, $dst|$dst, $src2}",
399 [(set VR128:$dst, (int_x86_sse_cvtsi2ss VR128:$src1,
400 (loadi32 addr:$src2)))]>;
403 // Comparison instructions
404 let Constraints = "$src1 = $dst" in {
405 let neverHasSideEffects = 1 in
406 def CMPSSrr : SSIi8<0xC2, MRMSrcReg,
407 (outs FR32:$dst), (ins FR32:$src1, FR32:$src, SSECC:$cc),
408 "cmp${cc}ss\t{$src, $dst|$dst, $src}", []>;
409 let neverHasSideEffects = 1, mayLoad = 1 in
410 def CMPSSrm : SSIi8<0xC2, MRMSrcMem,
411 (outs FR32:$dst), (ins FR32:$src1, f32mem:$src, SSECC:$cc),
412 "cmp${cc}ss\t{$src, $dst|$dst, $src}", []>;
415 let Defs = [EFLAGS] in {
416 def UCOMISSrr: PSI<0x2E, MRMSrcReg, (outs), (ins FR32:$src1, FR32:$src2),
417 "ucomiss\t{$src2, $src1|$src1, $src2}",
418 [(X86cmp FR32:$src1, FR32:$src2), (implicit EFLAGS)]>;
419 def UCOMISSrm: PSI<0x2E, MRMSrcMem, (outs), (ins FR32:$src1, f32mem:$src2),
420 "ucomiss\t{$src2, $src1|$src1, $src2}",
421 [(X86cmp FR32:$src1, (loadf32 addr:$src2)),
425 // Aliases to match intrinsics which expect XMM operand(s).
426 let Constraints = "$src1 = $dst" in {
427 def Int_CMPSSrr : SSIi8<0xC2, MRMSrcReg,
428 (outs VR128:$dst), (ins VR128:$src1, VR128:$src, SSECC:$cc),
429 "cmp${cc}ss\t{$src, $dst|$dst, $src}",
430 [(set VR128:$dst, (int_x86_sse_cmp_ss VR128:$src1,
431 VR128:$src, imm:$cc))]>;
432 def Int_CMPSSrm : SSIi8<0xC2, MRMSrcMem,
433 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src, SSECC:$cc),
434 "cmp${cc}ss\t{$src, $dst|$dst, $src}",
435 [(set VR128:$dst, (int_x86_sse_cmp_ss VR128:$src1,
436 (load addr:$src), imm:$cc))]>;
439 let Defs = [EFLAGS] in {
440 def Int_UCOMISSrr: PSI<0x2E, MRMSrcReg, (outs),
441 (ins VR128:$src1, VR128:$src2),
442 "ucomiss\t{$src2, $src1|$src1, $src2}",
443 [(X86ucomi (v4f32 VR128:$src1), VR128:$src2),
445 def Int_UCOMISSrm: PSI<0x2E, MRMSrcMem, (outs),
446 (ins VR128:$src1, f128mem:$src2),
447 "ucomiss\t{$src2, $src1|$src1, $src2}",
448 [(X86ucomi (v4f32 VR128:$src1), (load addr:$src2)),
451 def Int_COMISSrr: PSI<0x2F, MRMSrcReg, (outs),
452 (ins VR128:$src1, VR128:$src2),
453 "comiss\t{$src2, $src1|$src1, $src2}",
454 [(X86comi (v4f32 VR128:$src1), VR128:$src2),
456 def Int_COMISSrm: PSI<0x2F, MRMSrcMem, (outs),
457 (ins VR128:$src1, f128mem:$src2),
458 "comiss\t{$src2, $src1|$src1, $src2}",
459 [(X86comi (v4f32 VR128:$src1), (load addr:$src2)),
463 // Aliases of packed SSE1 instructions for scalar use. These all have names that
466 // Alias instructions that map fld0 to pxor for sse.
467 let isReMaterializable = 1 in
468 def FsFLD0SS : I<0xEF, MRMInitReg, (outs FR32:$dst), (ins),
469 "pxor\t$dst, $dst", [(set FR32:$dst, fp32imm0)]>,
470 Requires<[HasSSE1]>, TB, OpSize;
472 // Alias instruction to do FR32 reg-to-reg copy using movaps. Upper bits are
474 let neverHasSideEffects = 1 in
475 def FsMOVAPSrr : PSI<0x28, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
476 "movaps\t{$src, $dst|$dst, $src}", []>;
478 // Alias instruction to load FR32 from f128mem using movaps. Upper bits are
480 let isSimpleLoad = 1 in
481 def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
482 "movaps\t{$src, $dst|$dst, $src}",
483 [(set FR32:$dst, (alignedloadfsf32 addr:$src))]>;
485 // Alias bitwise logical operations using SSE logical ops on packed FP values.
486 let Constraints = "$src1 = $dst" in {
487 let isCommutable = 1 in {
488 def FsANDPSrr : PSI<0x54, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
489 "andps\t{$src2, $dst|$dst, $src2}",
490 [(set FR32:$dst, (X86fand FR32:$src1, FR32:$src2))]>;
491 def FsORPSrr : PSI<0x56, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
492 "orps\t{$src2, $dst|$dst, $src2}",
493 [(set FR32:$dst, (X86for FR32:$src1, FR32:$src2))]>;
494 def FsXORPSrr : PSI<0x57, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
495 "xorps\t{$src2, $dst|$dst, $src2}",
496 [(set FR32:$dst, (X86fxor FR32:$src1, FR32:$src2))]>;
499 def FsANDPSrm : PSI<0x54, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f128mem:$src2),
500 "andps\t{$src2, $dst|$dst, $src2}",
501 [(set FR32:$dst, (X86fand FR32:$src1,
502 (memopfsf32 addr:$src2)))]>;
503 def FsORPSrm : PSI<0x56, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f128mem:$src2),
504 "orps\t{$src2, $dst|$dst, $src2}",
505 [(set FR32:$dst, (X86for FR32:$src1,
506 (memopfsf32 addr:$src2)))]>;
507 def FsXORPSrm : PSI<0x57, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f128mem:$src2),
508 "xorps\t{$src2, $dst|$dst, $src2}",
509 [(set FR32:$dst, (X86fxor FR32:$src1,
510 (memopfsf32 addr:$src2)))]>;
511 let neverHasSideEffects = 1 in {
512 def FsANDNPSrr : PSI<0x55, MRMSrcReg,
513 (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
514 "andnps\t{$src2, $dst|$dst, $src2}", []>;
517 def FsANDNPSrm : PSI<0x55, MRMSrcMem,
518 (outs FR32:$dst), (ins FR32:$src1, f128mem:$src2),
519 "andnps\t{$src2, $dst|$dst, $src2}", []>;
523 /// basic_sse1_fp_binop_rm - SSE1 binops come in both scalar and vector forms.
525 /// In addition, we also have a special variant of the scalar form here to
526 /// represent the associated intrinsic operation. This form is unlike the
527 /// plain scalar form, in that it takes an entire vector (instead of a scalar)
528 /// and leaves the top elements undefined.
530 /// These three forms can each be reg+reg or reg+mem, so there are a total of
531 /// six "instructions".
533 let Constraints = "$src1 = $dst" in {
534 multiclass basic_sse1_fp_binop_rm<bits<8> opc, string OpcodeStr,
535 SDNode OpNode, Intrinsic F32Int,
536 bit Commutable = 0> {
537 // Scalar operation, reg+reg.
538 def SSrr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
539 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
540 [(set FR32:$dst, (OpNode FR32:$src1, FR32:$src2))]> {
541 let isCommutable = Commutable;
544 // Scalar operation, reg+mem.
545 def SSrm : SSI<opc, MRMSrcMem, (outs FR32:$dst),
546 (ins FR32:$src1, f32mem:$src2),
547 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
548 [(set FR32:$dst, (OpNode FR32:$src1, (load addr:$src2)))]>;
550 // Vector operation, reg+reg.
551 def PSrr : PSI<opc, MRMSrcReg, (outs VR128:$dst),
552 (ins VR128:$src1, VR128:$src2),
553 !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
554 [(set VR128:$dst, (v4f32 (OpNode VR128:$src1, VR128:$src2)))]> {
555 let isCommutable = Commutable;
558 // Vector operation, reg+mem.
559 def PSrm : PSI<opc, MRMSrcMem, (outs VR128:$dst),
560 (ins VR128:$src1, f128mem:$src2),
561 !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
562 [(set VR128:$dst, (OpNode VR128:$src1, (memopv4f32 addr:$src2)))]>;
564 // Intrinsic operation, reg+reg.
565 def SSrr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst),
566 (ins VR128:$src1, VR128:$src2),
567 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
568 [(set VR128:$dst, (F32Int VR128:$src1, VR128:$src2))]> {
569 let isCommutable = Commutable;
572 // Intrinsic operation, reg+mem.
573 def SSrm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst),
574 (ins VR128:$src1, ssmem:$src2),
575 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
576 [(set VR128:$dst, (F32Int VR128:$src1,
577 sse_load_f32:$src2))]>;
581 // Arithmetic instructions
582 defm ADD : basic_sse1_fp_binop_rm<0x58, "add", fadd, int_x86_sse_add_ss, 1>;
583 defm MUL : basic_sse1_fp_binop_rm<0x59, "mul", fmul, int_x86_sse_mul_ss, 1>;
584 defm SUB : basic_sse1_fp_binop_rm<0x5C, "sub", fsub, int_x86_sse_sub_ss>;
585 defm DIV : basic_sse1_fp_binop_rm<0x5E, "div", fdiv, int_x86_sse_div_ss>;
587 /// sse1_fp_binop_rm - Other SSE1 binops
589 /// This multiclass is like basic_sse1_fp_binop_rm, with the addition of
590 /// instructions for a full-vector intrinsic form. Operations that map
591 /// onto C operators don't use this form since they just use the plain
592 /// vector form instead of having a separate vector intrinsic form.
594 /// This provides a total of eight "instructions".
596 let Constraints = "$src1 = $dst" in {
597 multiclass sse1_fp_binop_rm<bits<8> opc, string OpcodeStr,
601 bit Commutable = 0> {
603 // Scalar operation, reg+reg.
604 def SSrr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
605 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
606 [(set FR32:$dst, (OpNode FR32:$src1, FR32:$src2))]> {
607 let isCommutable = Commutable;
610 // Scalar operation, reg+mem.
611 def SSrm : SSI<opc, MRMSrcMem, (outs FR32:$dst),
612 (ins FR32:$src1, f32mem:$src2),
613 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
614 [(set FR32:$dst, (OpNode FR32:$src1, (load addr:$src2)))]>;
616 // Vector operation, reg+reg.
617 def PSrr : PSI<opc, MRMSrcReg, (outs VR128:$dst),
618 (ins VR128:$src1, VR128:$src2),
619 !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
620 [(set VR128:$dst, (v4f32 (OpNode VR128:$src1, VR128:$src2)))]> {
621 let isCommutable = Commutable;
624 // Vector operation, reg+mem.
625 def PSrm : PSI<opc, MRMSrcMem, (outs VR128:$dst),
626 (ins VR128:$src1, f128mem:$src2),
627 !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
628 [(set VR128:$dst, (OpNode VR128:$src1, (memopv4f32 addr:$src2)))]>;
630 // Intrinsic operation, reg+reg.
631 def SSrr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst),
632 (ins VR128:$src1, VR128:$src2),
633 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
634 [(set VR128:$dst, (F32Int VR128:$src1, VR128:$src2))]> {
635 let isCommutable = Commutable;
638 // Intrinsic operation, reg+mem.
639 def SSrm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst),
640 (ins VR128:$src1, ssmem:$src2),
641 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
642 [(set VR128:$dst, (F32Int VR128:$src1,
643 sse_load_f32:$src2))]>;
645 // Vector intrinsic operation, reg+reg.
646 def PSrr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst),
647 (ins VR128:$src1, VR128:$src2),
648 !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
649 [(set VR128:$dst, (V4F32Int VR128:$src1, VR128:$src2))]> {
650 let isCommutable = Commutable;
653 // Vector intrinsic operation, reg+mem.
654 def PSrm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst),
655 (ins VR128:$src1, f128mem:$src2),
656 !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
657 [(set VR128:$dst, (V4F32Int VR128:$src1, (memopv4f32 addr:$src2)))]>;
661 defm MAX : sse1_fp_binop_rm<0x5F, "max", X86fmax,
662 int_x86_sse_max_ss, int_x86_sse_max_ps>;
663 defm MIN : sse1_fp_binop_rm<0x5D, "min", X86fmin,
664 int_x86_sse_min_ss, int_x86_sse_min_ps>;
666 //===----------------------------------------------------------------------===//
667 // SSE packed FP Instructions
670 let neverHasSideEffects = 1 in
671 def MOVAPSrr : PSI<0x28, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
672 "movaps\t{$src, $dst|$dst, $src}", []>;
673 let isSimpleLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in
674 def MOVAPSrm : PSI<0x28, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
675 "movaps\t{$src, $dst|$dst, $src}",
676 [(set VR128:$dst, (alignedloadv4f32 addr:$src))]>;
678 def MOVAPSmr : PSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
679 "movaps\t{$src, $dst|$dst, $src}",
680 [(alignedstore (v4f32 VR128:$src), addr:$dst)]>;
682 let neverHasSideEffects = 1 in
683 def MOVUPSrr : PSI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
684 "movups\t{$src, $dst|$dst, $src}", []>;
685 let isSimpleLoad = 1 in
686 def MOVUPSrm : PSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
687 "movups\t{$src, $dst|$dst, $src}",
688 [(set VR128:$dst, (loadv4f32 addr:$src))]>;
689 def MOVUPSmr : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
690 "movups\t{$src, $dst|$dst, $src}",
691 [(store (v4f32 VR128:$src), addr:$dst)]>;
693 // Intrinsic forms of MOVUPS load and store
694 let isSimpleLoad = 1 in
695 def MOVUPSrm_Int : PSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
696 "movups\t{$src, $dst|$dst, $src}",
697 [(set VR128:$dst, (int_x86_sse_loadu_ps addr:$src))]>;
698 def MOVUPSmr_Int : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
699 "movups\t{$src, $dst|$dst, $src}",
700 [(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>;
702 let Constraints = "$src1 = $dst" in {
703 let AddedComplexity = 20 in {
704 def MOVLPSrm : PSI<0x12, MRMSrcMem,
705 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
706 "movlps\t{$src2, $dst|$dst, $src2}",
708 (v4f32 (vector_shuffle VR128:$src1,
709 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2)))),
710 MOVLP_shuffle_mask)))]>;
711 def MOVHPSrm : PSI<0x16, MRMSrcMem,
712 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
713 "movhps\t{$src2, $dst|$dst, $src2}",
715 (v4f32 (vector_shuffle VR128:$src1,
716 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2)))),
717 MOVHP_shuffle_mask)))]>;
719 } // Constraints = "$src1 = $dst"
722 def MOVLPSmr : PSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
723 "movlps\t{$src, $dst|$dst, $src}",
724 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
725 (iPTR 0))), addr:$dst)]>;
727 // v2f64 extract element 1 is always custom lowered to unpack high to low
728 // and extract element 0 so the non-store version isn't too horrible.
729 def MOVHPSmr : PSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
730 "movhps\t{$src, $dst|$dst, $src}",
731 [(store (f64 (vector_extract
732 (v2f64 (vector_shuffle
733 (bc_v2f64 (v4f32 VR128:$src)), (undef),
734 UNPCKH_shuffle_mask)), (iPTR 0))),
737 let Constraints = "$src1 = $dst" in {
738 let AddedComplexity = 15 in {
739 def MOVLHPSrr : PSI<0x16, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
740 "movlhps\t{$src2, $dst|$dst, $src2}",
742 (v4f32 (vector_shuffle VR128:$src1, VR128:$src2,
743 MOVHP_shuffle_mask)))]>;
745 def MOVHLPSrr : PSI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
746 "movhlps\t{$src2, $dst|$dst, $src2}",
748 (v4f32 (vector_shuffle VR128:$src1, VR128:$src2,
749 MOVHLPS_shuffle_mask)))]>;
751 } // Constraints = "$src1 = $dst"
757 /// sse1_fp_unop_rm - SSE1 unops come in both scalar and vector forms.
759 /// In addition, we also have a special variant of the scalar form here to
760 /// represent the associated intrinsic operation. This form is unlike the
761 /// plain scalar form, in that it takes an entire vector (instead of a
762 /// scalar) and leaves the top elements undefined.
764 /// And, we have a special variant form for a full-vector intrinsic form.
766 /// These four forms can each have a reg or a mem operand, so there are a
767 /// total of eight "instructions".
769 multiclass sse1_fp_unop_rm<bits<8> opc, string OpcodeStr,
773 bit Commutable = 0> {
774 // Scalar operation, reg.
775 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
776 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
777 [(set FR32:$dst, (OpNode FR32:$src))]> {
778 let isCommutable = Commutable;
781 // Scalar operation, mem.
782 def SSm : SSI<opc, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
783 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
784 [(set FR32:$dst, (OpNode (load addr:$src)))]>;
786 // Vector operation, reg.
787 def PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
788 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
789 [(set VR128:$dst, (v4f32 (OpNode VR128:$src)))]> {
790 let isCommutable = Commutable;
793 // Vector operation, mem.
794 def PSm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
795 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
796 [(set VR128:$dst, (OpNode (memopv4f32 addr:$src)))]>;
798 // Intrinsic operation, reg.
799 def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
800 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
801 [(set VR128:$dst, (F32Int VR128:$src))]> {
802 let isCommutable = Commutable;
805 // Intrinsic operation, mem.
806 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins ssmem:$src),
807 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
808 [(set VR128:$dst, (F32Int sse_load_f32:$src))]>;
810 // Vector intrinsic operation, reg
811 def PSr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
812 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
813 [(set VR128:$dst, (V4F32Int VR128:$src))]> {
814 let isCommutable = Commutable;
817 // Vector intrinsic operation, mem
818 def PSm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
819 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
820 [(set VR128:$dst, (V4F32Int (memopv4f32 addr:$src)))]>;
824 defm SQRT : sse1_fp_unop_rm<0x51, "sqrt", fsqrt,
825 int_x86_sse_sqrt_ss, int_x86_sse_sqrt_ps>;
827 // Reciprocal approximations. Note that these typically require refinement
828 // in order to obtain suitable precision.
829 defm RSQRT : sse1_fp_unop_rm<0x52, "rsqrt", X86frsqrt,
830 int_x86_sse_rsqrt_ss, int_x86_sse_rsqrt_ps>;
831 defm RCP : sse1_fp_unop_rm<0x53, "rcp", X86frcp,
832 int_x86_sse_rcp_ss, int_x86_sse_rcp_ps>;
835 let Constraints = "$src1 = $dst" in {
836 let isCommutable = 1 in {
837 def ANDPSrr : PSI<0x54, MRMSrcReg,
838 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
839 "andps\t{$src2, $dst|$dst, $src2}",
840 [(set VR128:$dst, (v2i64
841 (and VR128:$src1, VR128:$src2)))]>;
842 def ORPSrr : PSI<0x56, MRMSrcReg,
843 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
844 "orps\t{$src2, $dst|$dst, $src2}",
845 [(set VR128:$dst, (v2i64
846 (or VR128:$src1, VR128:$src2)))]>;
847 def XORPSrr : PSI<0x57, MRMSrcReg,
848 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
849 "xorps\t{$src2, $dst|$dst, $src2}",
850 [(set VR128:$dst, (v2i64
851 (xor VR128:$src1, VR128:$src2)))]>;
854 def ANDPSrm : PSI<0x54, MRMSrcMem,
855 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
856 "andps\t{$src2, $dst|$dst, $src2}",
857 [(set VR128:$dst, (and (bc_v2i64 (v4f32 VR128:$src1)),
858 (memopv2i64 addr:$src2)))]>;
859 def ORPSrm : PSI<0x56, MRMSrcMem,
860 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
861 "orps\t{$src2, $dst|$dst, $src2}",
862 [(set VR128:$dst, (or (bc_v2i64 (v4f32 VR128:$src1)),
863 (memopv2i64 addr:$src2)))]>;
864 def XORPSrm : PSI<0x57, MRMSrcMem,
865 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
866 "xorps\t{$src2, $dst|$dst, $src2}",
867 [(set VR128:$dst, (xor (bc_v2i64 (v4f32 VR128:$src1)),
868 (memopv2i64 addr:$src2)))]>;
869 def ANDNPSrr : PSI<0x55, MRMSrcReg,
870 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
871 "andnps\t{$src2, $dst|$dst, $src2}",
873 (v2i64 (and (xor VR128:$src1,
874 (bc_v2i64 (v4i32 immAllOnesV))),
876 def ANDNPSrm : PSI<0x55, MRMSrcMem,
877 (outs VR128:$dst), (ins VR128:$src1,f128mem:$src2),
878 "andnps\t{$src2, $dst|$dst, $src2}",
880 (v2i64 (and (xor (bc_v2i64 (v4f32 VR128:$src1)),
881 (bc_v2i64 (v4i32 immAllOnesV))),
882 (memopv2i64 addr:$src2))))]>;
885 let Constraints = "$src1 = $dst" in {
886 def CMPPSrri : PSIi8<0xC2, MRMSrcReg,
887 (outs VR128:$dst), (ins VR128:$src1, VR128:$src, SSECC:$cc),
888 "cmp${cc}ps\t{$src, $dst|$dst, $src}",
889 [(set VR128:$dst, (int_x86_sse_cmp_ps VR128:$src1,
890 VR128:$src, imm:$cc))]>;
891 def CMPPSrmi : PSIi8<0xC2, MRMSrcMem,
892 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src, SSECC:$cc),
893 "cmp${cc}ps\t{$src, $dst|$dst, $src}",
894 [(set VR128:$dst, (int_x86_sse_cmp_ps VR128:$src1,
895 (memop addr:$src), imm:$cc))]>;
897 def : Pat<(v4i32 (vsetcc (v4f32 VR128:$src1), VR128:$src2, cond:$cc)),
898 (CMPPSrri VR128:$src1, VR128:$src2, (SSE_CC_imm cond:$cc))>;
899 def : Pat<(v4i32 (vsetcc (v4f32 VR128:$src1), (memop addr:$src2), cond:$cc)),
900 (CMPPSrmi VR128:$src1, addr:$src2, (SSE_CC_imm cond:$cc))>;
902 // Shuffle and unpack instructions
903 let Constraints = "$src1 = $dst" in {
904 let isConvertibleToThreeAddress = 1 in // Convert to pshufd
905 def SHUFPSrri : PSIi8<0xC6, MRMSrcReg,
906 (outs VR128:$dst), (ins VR128:$src1,
907 VR128:$src2, i32i8imm:$src3),
908 "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
910 (v4f32 (vector_shuffle
911 VR128:$src1, VR128:$src2,
912 SHUFP_shuffle_mask:$src3)))]>;
913 def SHUFPSrmi : PSIi8<0xC6, MRMSrcMem,
914 (outs VR128:$dst), (ins VR128:$src1,
915 f128mem:$src2, i32i8imm:$src3),
916 "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
918 (v4f32 (vector_shuffle
919 VR128:$src1, (memopv4f32 addr:$src2),
920 SHUFP_shuffle_mask:$src3)))]>;
922 let AddedComplexity = 10 in {
923 def UNPCKHPSrr : PSI<0x15, MRMSrcReg,
924 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
925 "unpckhps\t{$src2, $dst|$dst, $src2}",
927 (v4f32 (vector_shuffle
928 VR128:$src1, VR128:$src2,
929 UNPCKH_shuffle_mask)))]>;
930 def UNPCKHPSrm : PSI<0x15, MRMSrcMem,
931 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
932 "unpckhps\t{$src2, $dst|$dst, $src2}",
934 (v4f32 (vector_shuffle
935 VR128:$src1, (memopv4f32 addr:$src2),
936 UNPCKH_shuffle_mask)))]>;
938 def UNPCKLPSrr : PSI<0x14, MRMSrcReg,
939 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
940 "unpcklps\t{$src2, $dst|$dst, $src2}",
942 (v4f32 (vector_shuffle
943 VR128:$src1, VR128:$src2,
944 UNPCKL_shuffle_mask)))]>;
945 def UNPCKLPSrm : PSI<0x14, MRMSrcMem,
946 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
947 "unpcklps\t{$src2, $dst|$dst, $src2}",
949 (v4f32 (vector_shuffle
950 VR128:$src1, (memopv4f32 addr:$src2),
951 UNPCKL_shuffle_mask)))]>;
953 } // Constraints = "$src1 = $dst"
956 def MOVMSKPSrr : PSI<0x50, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
957 "movmskps\t{$src, $dst|$dst, $src}",
958 [(set GR32:$dst, (int_x86_sse_movmsk_ps VR128:$src))]>;
959 def MOVMSKPDrr : PSI<0x50, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
960 "movmskpd\t{$src, $dst|$dst, $src}",
961 [(set GR32:$dst, (int_x86_sse2_movmsk_pd VR128:$src))]>;
963 // Prefetch intrinsic.
964 def PREFETCHT0 : PSI<0x18, MRM1m, (outs), (ins i8mem:$src),
965 "prefetcht0\t$src", [(prefetch addr:$src, imm, (i32 3))]>;
966 def PREFETCHT1 : PSI<0x18, MRM2m, (outs), (ins i8mem:$src),
967 "prefetcht1\t$src", [(prefetch addr:$src, imm, (i32 2))]>;
968 def PREFETCHT2 : PSI<0x18, MRM3m, (outs), (ins i8mem:$src),
969 "prefetcht2\t$src", [(prefetch addr:$src, imm, (i32 1))]>;
970 def PREFETCHNTA : PSI<0x18, MRM0m, (outs), (ins i8mem:$src),
971 "prefetchnta\t$src", [(prefetch addr:$src, imm, (i32 0))]>;
973 // Non-temporal stores
974 def MOVNTPSmr : PSI<0x2B, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
975 "movntps\t{$src, $dst|$dst, $src}",
976 [(int_x86_sse_movnt_ps addr:$dst, VR128:$src)]>;
978 // Load, store, and memory fence
979 def SFENCE : PSI<0xAE, MRM7m, (outs), (ins), "sfence", [(int_x86_sse_sfence)]>;
982 def LDMXCSR : PSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
983 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>;
984 def STMXCSR : PSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
985 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>;
987 // Alias instructions that map zero vector to pxor / xorp* for sse.
988 let isReMaterializable = 1 in
989 def V_SET0 : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins),
991 [(set VR128:$dst, (v4i32 immAllZerosV))]>;
993 let Predicates = [HasSSE1] in {
994 def : Pat<(v2i64 immAllZerosV), (V_SET0)>;
995 def : Pat<(v8i16 immAllZerosV), (V_SET0)>;
996 def : Pat<(v16i8 immAllZerosV), (V_SET0)>;
997 def : Pat<(v2f64 immAllZerosV), (V_SET0)>;
998 def : Pat<(v4f32 immAllZerosV), (V_SET0)>;
1001 // FR32 to 128-bit vector conversion.
1002 def MOVSS2PSrr : SSI<0x10, MRMSrcReg, (outs VR128:$dst), (ins FR32:$src),
1003 "movss\t{$src, $dst|$dst, $src}",
1005 (v4f32 (scalar_to_vector FR32:$src)))]>;
1006 def MOVSS2PSrm : SSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f32mem:$src),
1007 "movss\t{$src, $dst|$dst, $src}",
1009 (v4f32 (scalar_to_vector (loadf32 addr:$src))))]>;
1011 // FIXME: may not be able to eliminate this movss with coalescing the src and
1012 // dest register classes are different. We really want to write this pattern
1014 // def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
1015 // (f32 FR32:$src)>;
1016 def MOVPS2SSrr : SSI<0x10, MRMSrcReg, (outs FR32:$dst), (ins VR128:$src),
1017 "movss\t{$src, $dst|$dst, $src}",
1018 [(set FR32:$dst, (vector_extract (v4f32 VR128:$src),
1020 def MOVPS2SSmr : SSI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, VR128:$src),
1021 "movss\t{$src, $dst|$dst, $src}",
1022 [(store (f32 (vector_extract (v4f32 VR128:$src),
1023 (iPTR 0))), addr:$dst)]>;
1026 // Move to lower bits of a VR128, leaving upper bits alone.
1027 // Three operand (but two address) aliases.
1028 let Constraints = "$src1 = $dst" in {
1029 let neverHasSideEffects = 1 in
1030 def MOVLSS2PSrr : SSI<0x10, MRMSrcReg,
1031 (outs VR128:$dst), (ins VR128:$src1, FR32:$src2),
1032 "movss\t{$src2, $dst|$dst, $src2}", []>;
1034 let AddedComplexity = 15 in
1035 def MOVLPSrr : SSI<0x10, MRMSrcReg,
1036 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1037 "movss\t{$src2, $dst|$dst, $src2}",
1039 (v4f32 (vector_shuffle VR128:$src1, VR128:$src2,
1040 MOVL_shuffle_mask)))]>;
1043 // Move to lower bits of a VR128 and zeroing upper bits.
1044 // Loading from memory automatically zeroing upper bits.
1045 let AddedComplexity = 20 in
1046 def MOVZSS2PSrm : SSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f32mem:$src),
1047 "movss\t{$src, $dst|$dst, $src}",
1048 [(set VR128:$dst, (v4f32 (X86vzmovl (v4f32 (scalar_to_vector
1049 (loadf32 addr:$src))))))]>;
1051 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
1052 (MOVZSS2PSrm addr:$src)>;
1054 //===----------------------------------------------------------------------===//
1055 // SSE2 Instructions
1056 //===----------------------------------------------------------------------===//
1058 // Move Instructions
1059 let neverHasSideEffects = 1 in
1060 def MOVSDrr : SDI<0x10, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
1061 "movsd\t{$src, $dst|$dst, $src}", []>;
1062 let isSimpleLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in
1063 def MOVSDrm : SDI<0x10, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
1064 "movsd\t{$src, $dst|$dst, $src}",
1065 [(set FR64:$dst, (loadf64 addr:$src))]>;
1066 def MOVSDmr : SDI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
1067 "movsd\t{$src, $dst|$dst, $src}",
1068 [(store FR64:$src, addr:$dst)]>;
1070 // Conversion instructions
1071 def CVTTSD2SIrr : SDI<0x2C, MRMSrcReg, (outs GR32:$dst), (ins FR64:$src),
1072 "cvttsd2si\t{$src, $dst|$dst, $src}",
1073 [(set GR32:$dst, (fp_to_sint FR64:$src))]>;
1074 def CVTTSD2SIrm : SDI<0x2C, MRMSrcMem, (outs GR32:$dst), (ins f64mem:$src),
1075 "cvttsd2si\t{$src, $dst|$dst, $src}",
1076 [(set GR32:$dst, (fp_to_sint (loadf64 addr:$src)))]>;
1077 def CVTSD2SSrr : SDI<0x5A, MRMSrcReg, (outs FR32:$dst), (ins FR64:$src),
1078 "cvtsd2ss\t{$src, $dst|$dst, $src}",
1079 [(set FR32:$dst, (fround FR64:$src))]>;
1080 def CVTSD2SSrm : SDI<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src),
1081 "cvtsd2ss\t{$src, $dst|$dst, $src}",
1082 [(set FR32:$dst, (fround (loadf64 addr:$src)))]>;
1083 def CVTSI2SDrr : SDI<0x2A, MRMSrcReg, (outs FR64:$dst), (ins GR32:$src),
1084 "cvtsi2sd\t{$src, $dst|$dst, $src}",
1085 [(set FR64:$dst, (sint_to_fp GR32:$src))]>;
1086 def CVTSI2SDrm : SDI<0x2A, MRMSrcMem, (outs FR64:$dst), (ins i32mem:$src),
1087 "cvtsi2sd\t{$src, $dst|$dst, $src}",
1088 [(set FR64:$dst, (sint_to_fp (loadi32 addr:$src)))]>;
1090 // SSE2 instructions with XS prefix
1091 def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src),
1092 "cvtss2sd\t{$src, $dst|$dst, $src}",
1093 [(set FR64:$dst, (fextend FR32:$src))]>, XS,
1094 Requires<[HasSSE2]>;
1095 def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src),
1096 "cvtss2sd\t{$src, $dst|$dst, $src}",
1097 [(set FR64:$dst, (extloadf32 addr:$src))]>, XS,
1098 Requires<[HasSSE2]>;
1100 // Match intrinsics which expect XMM operand(s).
1101 def Int_CVTSD2SIrr : SDI<0x2D, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
1102 "cvtsd2si\t{$src, $dst|$dst, $src}",
1103 [(set GR32:$dst, (int_x86_sse2_cvtsd2si VR128:$src))]>;
1104 def Int_CVTSD2SIrm : SDI<0x2D, MRMSrcMem, (outs GR32:$dst), (ins f128mem:$src),
1105 "cvtsd2si\t{$src, $dst|$dst, $src}",
1106 [(set GR32:$dst, (int_x86_sse2_cvtsd2si
1107 (load addr:$src)))]>;
1109 // Match intrinisics which expect MM and XMM operand(s).
1110 def Int_CVTPD2PIrr : PDI<0x2D, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
1111 "cvtpd2pi\t{$src, $dst|$dst, $src}",
1112 [(set VR64:$dst, (int_x86_sse_cvtpd2pi VR128:$src))]>;
1113 def Int_CVTPD2PIrm : PDI<0x2D, MRMSrcMem, (outs VR64:$dst), (ins f128mem:$src),
1114 "cvtpd2pi\t{$src, $dst|$dst, $src}",
1115 [(set VR64:$dst, (int_x86_sse_cvtpd2pi
1116 (memop addr:$src)))]>;
1117 def Int_CVTTPD2PIrr: PDI<0x2C, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
1118 "cvttpd2pi\t{$src, $dst|$dst, $src}",
1119 [(set VR64:$dst, (int_x86_sse_cvttpd2pi VR128:$src))]>;
1120 def Int_CVTTPD2PIrm: PDI<0x2C, MRMSrcMem, (outs VR64:$dst), (ins f128mem:$src),
1121 "cvttpd2pi\t{$src, $dst|$dst, $src}",
1122 [(set VR64:$dst, (int_x86_sse_cvttpd2pi
1123 (memop addr:$src)))]>;
1124 def Int_CVTPI2PDrr : PDI<0x2A, MRMSrcReg, (outs VR128:$dst), (ins VR64:$src),
1125 "cvtpi2pd\t{$src, $dst|$dst, $src}",
1126 [(set VR128:$dst, (int_x86_sse_cvtpi2pd VR64:$src))]>;
1127 def Int_CVTPI2PDrm : PDI<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
1128 "cvtpi2pd\t{$src, $dst|$dst, $src}",
1129 [(set VR128:$dst, (int_x86_sse_cvtpi2pd
1130 (load addr:$src)))]>;
1132 // Aliases for intrinsics
1133 def Int_CVTTSD2SIrr : SDI<0x2C, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
1134 "cvttsd2si\t{$src, $dst|$dst, $src}",
1136 (int_x86_sse2_cvttsd2si VR128:$src))]>;
1137 def Int_CVTTSD2SIrm : SDI<0x2C, MRMSrcMem, (outs GR32:$dst), (ins f128mem:$src),
1138 "cvttsd2si\t{$src, $dst|$dst, $src}",
1139 [(set GR32:$dst, (int_x86_sse2_cvttsd2si
1140 (load addr:$src)))]>;
1142 // Comparison instructions
1143 let Constraints = "$src1 = $dst", neverHasSideEffects = 1 in {
1144 def CMPSDrr : SDIi8<0xC2, MRMSrcReg,
1145 (outs FR64:$dst), (ins FR64:$src1, FR64:$src, SSECC:$cc),
1146 "cmp${cc}sd\t{$src, $dst|$dst, $src}", []>;
1148 def CMPSDrm : SDIi8<0xC2, MRMSrcMem,
1149 (outs FR64:$dst), (ins FR64:$src1, f64mem:$src, SSECC:$cc),
1150 "cmp${cc}sd\t{$src, $dst|$dst, $src}", []>;
1153 let Defs = [EFLAGS] in {
1154 def UCOMISDrr: PDI<0x2E, MRMSrcReg, (outs), (ins FR64:$src1, FR64:$src2),
1155 "ucomisd\t{$src2, $src1|$src1, $src2}",
1156 [(X86cmp FR64:$src1, FR64:$src2), (implicit EFLAGS)]>;
1157 def UCOMISDrm: PDI<0x2E, MRMSrcMem, (outs), (ins FR64:$src1, f64mem:$src2),
1158 "ucomisd\t{$src2, $src1|$src1, $src2}",
1159 [(X86cmp FR64:$src1, (loadf64 addr:$src2)),
1160 (implicit EFLAGS)]>;
1163 // Aliases to match intrinsics which expect XMM operand(s).
1164 let Constraints = "$src1 = $dst" in {
1165 def Int_CMPSDrr : SDIi8<0xC2, MRMSrcReg,
1166 (outs VR128:$dst), (ins VR128:$src1, VR128:$src, SSECC:$cc),
1167 "cmp${cc}sd\t{$src, $dst|$dst, $src}",
1168 [(set VR128:$dst, (int_x86_sse2_cmp_sd VR128:$src1,
1169 VR128:$src, imm:$cc))]>;
1170 def Int_CMPSDrm : SDIi8<0xC2, MRMSrcMem,
1171 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src, SSECC:$cc),
1172 "cmp${cc}sd\t{$src, $dst|$dst, $src}",
1173 [(set VR128:$dst, (int_x86_sse2_cmp_sd VR128:$src1,
1174 (load addr:$src), imm:$cc))]>;
1177 let Defs = [EFLAGS] in {
1178 def Int_UCOMISDrr: PDI<0x2E, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
1179 "ucomisd\t{$src2, $src1|$src1, $src2}",
1180 [(X86ucomi (v2f64 VR128:$src1), (v2f64 VR128:$src2)),
1181 (implicit EFLAGS)]>;
1182 def Int_UCOMISDrm: PDI<0x2E, MRMSrcMem, (outs),(ins VR128:$src1, f128mem:$src2),
1183 "ucomisd\t{$src2, $src1|$src1, $src2}",
1184 [(X86ucomi (v2f64 VR128:$src1), (load addr:$src2)),
1185 (implicit EFLAGS)]>;
1187 def Int_COMISDrr: PDI<0x2F, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
1188 "comisd\t{$src2, $src1|$src1, $src2}",
1189 [(X86comi (v2f64 VR128:$src1), (v2f64 VR128:$src2)),
1190 (implicit EFLAGS)]>;
1191 def Int_COMISDrm: PDI<0x2F, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
1192 "comisd\t{$src2, $src1|$src1, $src2}",
1193 [(X86comi (v2f64 VR128:$src1), (load addr:$src2)),
1194 (implicit EFLAGS)]>;
1197 // Aliases of packed SSE2 instructions for scalar use. These all have names that
1200 // Alias instructions that map fld0 to pxor for sse.
1201 let isReMaterializable = 1 in
1202 def FsFLD0SD : I<0xEF, MRMInitReg, (outs FR64:$dst), (ins),
1203 "pxor\t$dst, $dst", [(set FR64:$dst, fpimm0)]>,
1204 Requires<[HasSSE2]>, TB, OpSize;
1206 // Alias instruction to do FR64 reg-to-reg copy using movapd. Upper bits are
1208 let neverHasSideEffects = 1 in
1209 def FsMOVAPDrr : PDI<0x28, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
1210 "movapd\t{$src, $dst|$dst, $src}", []>;
1212 // Alias instruction to load FR64 from f128mem using movapd. Upper bits are
1214 let isSimpleLoad = 1 in
1215 def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
1216 "movapd\t{$src, $dst|$dst, $src}",
1217 [(set FR64:$dst, (alignedloadfsf64 addr:$src))]>;
1219 // Alias bitwise logical operations using SSE logical ops on packed FP values.
1220 let Constraints = "$src1 = $dst" in {
1221 let isCommutable = 1 in {
1222 def FsANDPDrr : PDI<0x54, MRMSrcReg, (outs FR64:$dst),
1223 (ins FR64:$src1, FR64:$src2),
1224 "andpd\t{$src2, $dst|$dst, $src2}",
1225 [(set FR64:$dst, (X86fand FR64:$src1, FR64:$src2))]>;
1226 def FsORPDrr : PDI<0x56, MRMSrcReg, (outs FR64:$dst),
1227 (ins FR64:$src1, FR64:$src2),
1228 "orpd\t{$src2, $dst|$dst, $src2}",
1229 [(set FR64:$dst, (X86for FR64:$src1, FR64:$src2))]>;
1230 def FsXORPDrr : PDI<0x57, MRMSrcReg, (outs FR64:$dst),
1231 (ins FR64:$src1, FR64:$src2),
1232 "xorpd\t{$src2, $dst|$dst, $src2}",
1233 [(set FR64:$dst, (X86fxor FR64:$src1, FR64:$src2))]>;
1236 def FsANDPDrm : PDI<0x54, MRMSrcMem, (outs FR64:$dst),
1237 (ins FR64:$src1, f128mem:$src2),
1238 "andpd\t{$src2, $dst|$dst, $src2}",
1239 [(set FR64:$dst, (X86fand FR64:$src1,
1240 (memopfsf64 addr:$src2)))]>;
1241 def FsORPDrm : PDI<0x56, MRMSrcMem, (outs FR64:$dst),
1242 (ins FR64:$src1, f128mem:$src2),
1243 "orpd\t{$src2, $dst|$dst, $src2}",
1244 [(set FR64:$dst, (X86for FR64:$src1,
1245 (memopfsf64 addr:$src2)))]>;
1246 def FsXORPDrm : PDI<0x57, MRMSrcMem, (outs FR64:$dst),
1247 (ins FR64:$src1, f128mem:$src2),
1248 "xorpd\t{$src2, $dst|$dst, $src2}",
1249 [(set FR64:$dst, (X86fxor FR64:$src1,
1250 (memopfsf64 addr:$src2)))]>;
1252 let neverHasSideEffects = 1 in {
1253 def FsANDNPDrr : PDI<0x55, MRMSrcReg,
1254 (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
1255 "andnpd\t{$src2, $dst|$dst, $src2}", []>;
1257 def FsANDNPDrm : PDI<0x55, MRMSrcMem,
1258 (outs FR64:$dst), (ins FR64:$src1, f128mem:$src2),
1259 "andnpd\t{$src2, $dst|$dst, $src2}", []>;
1263 /// basic_sse2_fp_binop_rm - SSE2 binops come in both scalar and vector forms.
1265 /// In addition, we also have a special variant of the scalar form here to
1266 /// represent the associated intrinsic operation. This form is unlike the
1267 /// plain scalar form, in that it takes an entire vector (instead of a scalar)
1268 /// and leaves the top elements undefined.
1270 /// These three forms can each be reg+reg or reg+mem, so there are a total of
1271 /// six "instructions".
1273 let Constraints = "$src1 = $dst" in {
1274 multiclass basic_sse2_fp_binop_rm<bits<8> opc, string OpcodeStr,
1275 SDNode OpNode, Intrinsic F64Int,
1276 bit Commutable = 0> {
1277 // Scalar operation, reg+reg.
1278 def SDrr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
1279 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
1280 [(set FR64:$dst, (OpNode FR64:$src1, FR64:$src2))]> {
1281 let isCommutable = Commutable;
1284 // Scalar operation, reg+mem.
1285 def SDrm : SDI<opc, MRMSrcMem, (outs FR64:$dst), (ins FR64:$src1, f64mem:$src2),
1286 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
1287 [(set FR64:$dst, (OpNode FR64:$src1, (load addr:$src2)))]>;
1289 // Vector operation, reg+reg.
1290 def PDrr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1291 !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
1292 [(set VR128:$dst, (v2f64 (OpNode VR128:$src1, VR128:$src2)))]> {
1293 let isCommutable = Commutable;
1296 // Vector operation, reg+mem.
1297 def PDrm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
1298 !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
1299 [(set VR128:$dst, (OpNode VR128:$src1, (memopv2f64 addr:$src2)))]>;
1301 // Intrinsic operation, reg+reg.
1302 def SDrr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1303 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
1304 [(set VR128:$dst, (F64Int VR128:$src1, VR128:$src2))]> {
1305 let isCommutable = Commutable;
1308 // Intrinsic operation, reg+mem.
1309 def SDrm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2),
1310 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
1311 [(set VR128:$dst, (F64Int VR128:$src1,
1312 sse_load_f64:$src2))]>;
1316 // Arithmetic instructions
1317 defm ADD : basic_sse2_fp_binop_rm<0x58, "add", fadd, int_x86_sse2_add_sd, 1>;
1318 defm MUL : basic_sse2_fp_binop_rm<0x59, "mul", fmul, int_x86_sse2_mul_sd, 1>;
1319 defm SUB : basic_sse2_fp_binop_rm<0x5C, "sub", fsub, int_x86_sse2_sub_sd>;
1320 defm DIV : basic_sse2_fp_binop_rm<0x5E, "div", fdiv, int_x86_sse2_div_sd>;
1322 /// sse2_fp_binop_rm - Other SSE2 binops
1324 /// This multiclass is like basic_sse2_fp_binop_rm, with the addition of
1325 /// instructions for a full-vector intrinsic form. Operations that map
1326 /// onto C operators don't use this form since they just use the plain
1327 /// vector form instead of having a separate vector intrinsic form.
1329 /// This provides a total of eight "instructions".
1331 let Constraints = "$src1 = $dst" in {
1332 multiclass sse2_fp_binop_rm<bits<8> opc, string OpcodeStr,
1336 bit Commutable = 0> {
1338 // Scalar operation, reg+reg.
1339 def SDrr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
1340 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
1341 [(set FR64:$dst, (OpNode FR64:$src1, FR64:$src2))]> {
1342 let isCommutable = Commutable;
1345 // Scalar operation, reg+mem.
1346 def SDrm : SDI<opc, MRMSrcMem, (outs FR64:$dst),
1347 (ins FR64:$src1, f64mem:$src2),
1348 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
1349 [(set FR64:$dst, (OpNode FR64:$src1, (load addr:$src2)))]>;
1351 // Vector operation, reg+reg.
1352 def PDrr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
1353 (ins VR128:$src1, VR128:$src2),
1354 !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
1355 [(set VR128:$dst, (v2f64 (OpNode VR128:$src1, VR128:$src2)))]> {
1356 let isCommutable = Commutable;
1359 // Vector operation, reg+mem.
1360 def PDrm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
1361 (ins VR128:$src1, f128mem:$src2),
1362 !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
1363 [(set VR128:$dst, (OpNode VR128:$src1, (memopv2f64 addr:$src2)))]>;
1365 // Intrinsic operation, reg+reg.
1366 def SDrr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst),
1367 (ins VR128:$src1, VR128:$src2),
1368 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
1369 [(set VR128:$dst, (F64Int VR128:$src1, VR128:$src2))]> {
1370 let isCommutable = Commutable;
1373 // Intrinsic operation, reg+mem.
1374 def SDrm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst),
1375 (ins VR128:$src1, sdmem:$src2),
1376 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
1377 [(set VR128:$dst, (F64Int VR128:$src1,
1378 sse_load_f64:$src2))]>;
1380 // Vector intrinsic operation, reg+reg.
1381 def PDrr_Int : PDI<opc, MRMSrcReg, (outs VR128:$dst),
1382 (ins VR128:$src1, VR128:$src2),
1383 !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
1384 [(set VR128:$dst, (V2F64Int VR128:$src1, VR128:$src2))]> {
1385 let isCommutable = Commutable;
1388 // Vector intrinsic operation, reg+mem.
1389 def PDrm_Int : PDI<opc, MRMSrcMem, (outs VR128:$dst),
1390 (ins VR128:$src1, f128mem:$src2),
1391 !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
1392 [(set VR128:$dst, (V2F64Int VR128:$src1,
1393 (memopv2f64 addr:$src2)))]>;
1397 defm MAX : sse2_fp_binop_rm<0x5F, "max", X86fmax,
1398 int_x86_sse2_max_sd, int_x86_sse2_max_pd>;
1399 defm MIN : sse2_fp_binop_rm<0x5D, "min", X86fmin,
1400 int_x86_sse2_min_sd, int_x86_sse2_min_pd>;
1402 //===----------------------------------------------------------------------===//
1403 // SSE packed FP Instructions
1405 // Move Instructions
1406 let neverHasSideEffects = 1 in
1407 def MOVAPDrr : PDI<0x28, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1408 "movapd\t{$src, $dst|$dst, $src}", []>;
1409 let isSimpleLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in
1410 def MOVAPDrm : PDI<0x28, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1411 "movapd\t{$src, $dst|$dst, $src}",
1412 [(set VR128:$dst, (alignedloadv2f64 addr:$src))]>;
1414 def MOVAPDmr : PDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
1415 "movapd\t{$src, $dst|$dst, $src}",
1416 [(alignedstore (v2f64 VR128:$src), addr:$dst)]>;
1418 let neverHasSideEffects = 1 in
1419 def MOVUPDrr : PDI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1420 "movupd\t{$src, $dst|$dst, $src}", []>;
1421 let isSimpleLoad = 1 in
1422 def MOVUPDrm : PDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1423 "movupd\t{$src, $dst|$dst, $src}",
1424 [(set VR128:$dst, (loadv2f64 addr:$src))]>;
1425 def MOVUPDmr : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
1426 "movupd\t{$src, $dst|$dst, $src}",
1427 [(store (v2f64 VR128:$src), addr:$dst)]>;
1429 // Intrinsic forms of MOVUPD load and store
1430 def MOVUPDrm_Int : PDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1431 "movupd\t{$src, $dst|$dst, $src}",
1432 [(set VR128:$dst, (int_x86_sse2_loadu_pd addr:$src))]>;
1433 def MOVUPDmr_Int : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
1434 "movupd\t{$src, $dst|$dst, $src}",
1435 [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>;
1437 let Constraints = "$src1 = $dst" in {
1438 let AddedComplexity = 20 in {
1439 def MOVLPDrm : PDI<0x12, MRMSrcMem,
1440 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
1441 "movlpd\t{$src2, $dst|$dst, $src2}",
1443 (v2f64 (vector_shuffle VR128:$src1,
1444 (scalar_to_vector (loadf64 addr:$src2)),
1445 MOVLP_shuffle_mask)))]>;
1446 def MOVHPDrm : PDI<0x16, MRMSrcMem,
1447 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
1448 "movhpd\t{$src2, $dst|$dst, $src2}",
1450 (v2f64 (vector_shuffle VR128:$src1,
1451 (scalar_to_vector (loadf64 addr:$src2)),
1452 MOVHP_shuffle_mask)))]>;
1453 } // AddedComplexity
1454 } // Constraints = "$src1 = $dst"
1456 def MOVLPDmr : PDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1457 "movlpd\t{$src, $dst|$dst, $src}",
1458 [(store (f64 (vector_extract (v2f64 VR128:$src),
1459 (iPTR 0))), addr:$dst)]>;
1461 // v2f64 extract element 1 is always custom lowered to unpack high to low
1462 // and extract element 0 so the non-store version isn't too horrible.
1463 def MOVHPDmr : PDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1464 "movhpd\t{$src, $dst|$dst, $src}",
1465 [(store (f64 (vector_extract
1466 (v2f64 (vector_shuffle VR128:$src, (undef),
1467 UNPCKH_shuffle_mask)), (iPTR 0))),
1470 // SSE2 instructions without OpSize prefix
1471 def Int_CVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1472 "cvtdq2ps\t{$src, $dst|$dst, $src}",
1473 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
1474 TB, Requires<[HasSSE2]>;
1475 def Int_CVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
1476 "cvtdq2ps\t{$src, $dst|$dst, $src}",
1477 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps
1478 (bitconvert (memopv2i64 addr:$src))))]>,
1479 TB, Requires<[HasSSE2]>;
1481 // SSE2 instructions with XS prefix
1482 def Int_CVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1483 "cvtdq2pd\t{$src, $dst|$dst, $src}",
1484 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
1485 XS, Requires<[HasSSE2]>;
1486 def Int_CVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
1487 "cvtdq2pd\t{$src, $dst|$dst, $src}",
1488 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd
1489 (bitconvert (memopv2i64 addr:$src))))]>,
1490 XS, Requires<[HasSSE2]>;
1492 def Int_CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1493 "cvtps2dq\t{$src, $dst|$dst, $src}",
1494 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>;
1495 def Int_CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1496 "cvtps2dq\t{$src, $dst|$dst, $src}",
1497 [(set VR128:$dst, (int_x86_sse2_cvtps2dq
1498 (memop addr:$src)))]>;
1499 // SSE2 packed instructions with XS prefix
1500 def Int_CVTTPS2DQrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1501 "cvttps2dq\t{$src, $dst|$dst, $src}",
1502 [(set VR128:$dst, (int_x86_sse2_cvttps2dq VR128:$src))]>,
1503 XS, Requires<[HasSSE2]>;
1504 def Int_CVTTPS2DQrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1505 "cvttps2dq\t{$src, $dst|$dst, $src}",
1506 [(set VR128:$dst, (int_x86_sse2_cvttps2dq
1507 (memop addr:$src)))]>,
1508 XS, Requires<[HasSSE2]>;
1510 // SSE2 packed instructions with XD prefix
1511 def Int_CVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1512 "cvtpd2dq\t{$src, $dst|$dst, $src}",
1513 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
1514 XD, Requires<[HasSSE2]>;
1515 def Int_CVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1516 "cvtpd2dq\t{$src, $dst|$dst, $src}",
1517 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
1518 (memop addr:$src)))]>,
1519 XD, Requires<[HasSSE2]>;
1521 def Int_CVTTPD2DQrr : PDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1522 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1523 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>;
1524 def Int_CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst),(ins f128mem:$src),
1525 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1526 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
1527 (memop addr:$src)))]>;
1529 // SSE2 instructions without OpSize prefix
1530 def Int_CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1531 "cvtps2pd\t{$src, $dst|$dst, $src}",
1532 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
1533 TB, Requires<[HasSSE2]>;
1534 def Int_CVTPS2PDrm : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins f64mem:$src),
1535 "cvtps2pd\t{$src, $dst|$dst, $src}",
1536 [(set VR128:$dst, (int_x86_sse2_cvtps2pd
1537 (load addr:$src)))]>,
1538 TB, Requires<[HasSSE2]>;
1540 def Int_CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1541 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1542 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
1543 def Int_CVTPD2PSrm : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins f128mem:$src),
1544 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1545 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps
1546 (memop addr:$src)))]>;
1548 // Match intrinsics which expect XMM operand(s).
1549 // Aliases for intrinsics
1550 let Constraints = "$src1 = $dst" in {
1551 def Int_CVTSI2SDrr: SDI<0x2A, MRMSrcReg,
1552 (outs VR128:$dst), (ins VR128:$src1, GR32:$src2),
1553 "cvtsi2sd\t{$src2, $dst|$dst, $src2}",
1554 [(set VR128:$dst, (int_x86_sse2_cvtsi2sd VR128:$src1,
1556 def Int_CVTSI2SDrm: SDI<0x2A, MRMSrcMem,
1557 (outs VR128:$dst), (ins VR128:$src1, i32mem:$src2),
1558 "cvtsi2sd\t{$src2, $dst|$dst, $src2}",
1559 [(set VR128:$dst, (int_x86_sse2_cvtsi2sd VR128:$src1,
1560 (loadi32 addr:$src2)))]>;
1561 def Int_CVTSD2SSrr: SDI<0x5A, MRMSrcReg,
1562 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1563 "cvtsd2ss\t{$src2, $dst|$dst, $src2}",
1564 [(set VR128:$dst, (int_x86_sse2_cvtsd2ss VR128:$src1,
1566 def Int_CVTSD2SSrm: SDI<0x5A, MRMSrcMem,
1567 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
1568 "cvtsd2ss\t{$src2, $dst|$dst, $src2}",
1569 [(set VR128:$dst, (int_x86_sse2_cvtsd2ss VR128:$src1,
1570 (load addr:$src2)))]>;
1571 def Int_CVTSS2SDrr: I<0x5A, MRMSrcReg,
1572 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1573 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
1574 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
1575 VR128:$src2))]>, XS,
1576 Requires<[HasSSE2]>;
1577 def Int_CVTSS2SDrm: I<0x5A, MRMSrcMem,
1578 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
1579 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
1580 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
1581 (load addr:$src2)))]>, XS,
1582 Requires<[HasSSE2]>;
1587 /// sse2_fp_unop_rm - SSE2 unops come in both scalar and vector forms.
1589 /// In addition, we also have a special variant of the scalar form here to
1590 /// represent the associated intrinsic operation. This form is unlike the
1591 /// plain scalar form, in that it takes an entire vector (instead of a
1592 /// scalar) and leaves the top elements undefined.
1594 /// And, we have a special variant form for a full-vector intrinsic form.
1596 /// These four forms can each have a reg or a mem operand, so there are a
1597 /// total of eight "instructions".
1599 multiclass sse2_fp_unop_rm<bits<8> opc, string OpcodeStr,
1603 bit Commutable = 0> {
1604 // Scalar operation, reg.
1605 def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
1606 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1607 [(set FR64:$dst, (OpNode FR64:$src))]> {
1608 let isCommutable = Commutable;
1611 // Scalar operation, mem.
1612 def SDm : SDI<opc, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
1613 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1614 [(set FR64:$dst, (OpNode (load addr:$src)))]>;
1616 // Vector operation, reg.
1617 def PDr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1618 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1619 [(set VR128:$dst, (v2f64 (OpNode VR128:$src)))]> {
1620 let isCommutable = Commutable;
1623 // Vector operation, mem.
1624 def PDm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1625 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1626 [(set VR128:$dst, (OpNode (memopv2f64 addr:$src)))]>;
1628 // Intrinsic operation, reg.
1629 def SDr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1630 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1631 [(set VR128:$dst, (F64Int VR128:$src))]> {
1632 let isCommutable = Commutable;
1635 // Intrinsic operation, mem.
1636 def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins sdmem:$src),
1637 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1638 [(set VR128:$dst, (F64Int sse_load_f64:$src))]>;
1640 // Vector intrinsic operation, reg
1641 def PDr_Int : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1642 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1643 [(set VR128:$dst, (V2F64Int VR128:$src))]> {
1644 let isCommutable = Commutable;
1647 // Vector intrinsic operation, mem
1648 def PDm_Int : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1649 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1650 [(set VR128:$dst, (V2F64Int (memopv2f64 addr:$src)))]>;
1654 defm SQRT : sse2_fp_unop_rm<0x51, "sqrt", fsqrt,
1655 int_x86_sse2_sqrt_sd, int_x86_sse2_sqrt_pd>;
1657 // There is no f64 version of the reciprocal approximation instructions.
1660 let Constraints = "$src1 = $dst" in {
1661 let isCommutable = 1 in {
1662 def ANDPDrr : PDI<0x54, MRMSrcReg,
1663 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1664 "andpd\t{$src2, $dst|$dst, $src2}",
1666 (and (bc_v2i64 (v2f64 VR128:$src1)),
1667 (bc_v2i64 (v2f64 VR128:$src2))))]>;
1668 def ORPDrr : PDI<0x56, MRMSrcReg,
1669 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1670 "orpd\t{$src2, $dst|$dst, $src2}",
1672 (or (bc_v2i64 (v2f64 VR128:$src1)),
1673 (bc_v2i64 (v2f64 VR128:$src2))))]>;
1674 def XORPDrr : PDI<0x57, MRMSrcReg,
1675 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1676 "xorpd\t{$src2, $dst|$dst, $src2}",
1678 (xor (bc_v2i64 (v2f64 VR128:$src1)),
1679 (bc_v2i64 (v2f64 VR128:$src2))))]>;
1682 def ANDPDrm : PDI<0x54, MRMSrcMem,
1683 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
1684 "andpd\t{$src2, $dst|$dst, $src2}",
1686 (and (bc_v2i64 (v2f64 VR128:$src1)),
1687 (memopv2i64 addr:$src2)))]>;
1688 def ORPDrm : PDI<0x56, MRMSrcMem,
1689 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
1690 "orpd\t{$src2, $dst|$dst, $src2}",
1692 (or (bc_v2i64 (v2f64 VR128:$src1)),
1693 (memopv2i64 addr:$src2)))]>;
1694 def XORPDrm : PDI<0x57, MRMSrcMem,
1695 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
1696 "xorpd\t{$src2, $dst|$dst, $src2}",
1698 (xor (bc_v2i64 (v2f64 VR128:$src1)),
1699 (memopv2i64 addr:$src2)))]>;
1700 def ANDNPDrr : PDI<0x55, MRMSrcReg,
1701 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1702 "andnpd\t{$src2, $dst|$dst, $src2}",
1704 (and (vnot (bc_v2i64 (v2f64 VR128:$src1))),
1705 (bc_v2i64 (v2f64 VR128:$src2))))]>;
1706 def ANDNPDrm : PDI<0x55, MRMSrcMem,
1707 (outs VR128:$dst), (ins VR128:$src1,f128mem:$src2),
1708 "andnpd\t{$src2, $dst|$dst, $src2}",
1710 (and (vnot (bc_v2i64 (v2f64 VR128:$src1))),
1711 (memopv2i64 addr:$src2)))]>;
1714 let Constraints = "$src1 = $dst" in {
1715 def CMPPDrri : PDIi8<0xC2, MRMSrcReg,
1716 (outs VR128:$dst), (ins VR128:$src1, VR128:$src, SSECC:$cc),
1717 "cmp${cc}pd\t{$src, $dst|$dst, $src}",
1718 [(set VR128:$dst, (int_x86_sse2_cmp_pd VR128:$src1,
1719 VR128:$src, imm:$cc))]>;
1720 def CMPPDrmi : PDIi8<0xC2, MRMSrcMem,
1721 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src, SSECC:$cc),
1722 "cmp${cc}pd\t{$src, $dst|$dst, $src}",
1723 [(set VR128:$dst, (int_x86_sse2_cmp_pd VR128:$src1,
1724 (memop addr:$src), imm:$cc))]>;
1726 def : Pat<(v2i64 (vsetcc (v2f64 VR128:$src1), VR128:$src2, cond:$cc)),
1727 (CMPPDrri VR128:$src1, VR128:$src2, (SSE_CC_imm cond:$cc))>;
1728 def : Pat<(v2i64 (vsetcc (v2f64 VR128:$src1), (memop addr:$src2), cond:$cc)),
1729 (CMPPDrmi VR128:$src1, addr:$src2, (SSE_CC_imm cond:$cc))>;
1731 // Shuffle and unpack instructions
1732 let Constraints = "$src1 = $dst" in {
1733 def SHUFPDrri : PDIi8<0xC6, MRMSrcReg,
1734 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i8imm:$src3),
1735 "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1736 [(set VR128:$dst, (v2f64 (vector_shuffle
1737 VR128:$src1, VR128:$src2,
1738 SHUFP_shuffle_mask:$src3)))]>;
1739 def SHUFPDrmi : PDIi8<0xC6, MRMSrcMem,
1740 (outs VR128:$dst), (ins VR128:$src1,
1741 f128mem:$src2, i8imm:$src3),
1742 "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1744 (v2f64 (vector_shuffle
1745 VR128:$src1, (memopv2f64 addr:$src2),
1746 SHUFP_shuffle_mask:$src3)))]>;
1748 let AddedComplexity = 10 in {
1749 def UNPCKHPDrr : PDI<0x15, MRMSrcReg,
1750 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1751 "unpckhpd\t{$src2, $dst|$dst, $src2}",
1753 (v2f64 (vector_shuffle
1754 VR128:$src1, VR128:$src2,
1755 UNPCKH_shuffle_mask)))]>;
1756 def UNPCKHPDrm : PDI<0x15, MRMSrcMem,
1757 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
1758 "unpckhpd\t{$src2, $dst|$dst, $src2}",
1760 (v2f64 (vector_shuffle
1761 VR128:$src1, (memopv2f64 addr:$src2),
1762 UNPCKH_shuffle_mask)))]>;
1764 def UNPCKLPDrr : PDI<0x14, MRMSrcReg,
1765 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1766 "unpcklpd\t{$src2, $dst|$dst, $src2}",
1768 (v2f64 (vector_shuffle
1769 VR128:$src1, VR128:$src2,
1770 UNPCKL_shuffle_mask)))]>;
1771 def UNPCKLPDrm : PDI<0x14, MRMSrcMem,
1772 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
1773 "unpcklpd\t{$src2, $dst|$dst, $src2}",
1775 (v2f64 (vector_shuffle
1776 VR128:$src1, (memopv2f64 addr:$src2),
1777 UNPCKL_shuffle_mask)))]>;
1778 } // AddedComplexity
1779 } // Constraints = "$src1 = $dst"
1782 //===----------------------------------------------------------------------===//
1783 // SSE integer instructions
1785 // Move Instructions
1786 let neverHasSideEffects = 1 in
1787 def MOVDQArr : PDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1788 "movdqa\t{$src, $dst|$dst, $src}", []>;
1789 let isSimpleLoad = 1, mayLoad = 1 in
1790 def MOVDQArm : PDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
1791 "movdqa\t{$src, $dst|$dst, $src}",
1792 [/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/]>;
1794 def MOVDQAmr : PDI<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
1795 "movdqa\t{$src, $dst|$dst, $src}",
1796 [/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/]>;
1797 let isSimpleLoad = 1, mayLoad = 1 in
1798 def MOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
1799 "movdqu\t{$src, $dst|$dst, $src}",
1800 [/*(set VR128:$dst, (loadv2i64 addr:$src))*/]>,
1801 XS, Requires<[HasSSE2]>;
1803 def MOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
1804 "movdqu\t{$src, $dst|$dst, $src}",
1805 [/*(store (v2i64 VR128:$src), addr:$dst)*/]>,
1806 XS, Requires<[HasSSE2]>;
1808 // Intrinsic forms of MOVDQU load and store
1809 let isSimpleLoad = 1 in
1810 def MOVDQUrm_Int : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
1811 "movdqu\t{$src, $dst|$dst, $src}",
1812 [(set VR128:$dst, (int_x86_sse2_loadu_dq addr:$src))]>,
1813 XS, Requires<[HasSSE2]>;
1814 def MOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
1815 "movdqu\t{$src, $dst|$dst, $src}",
1816 [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
1817 XS, Requires<[HasSSE2]>;
1819 let Constraints = "$src1 = $dst" in {
1821 multiclass PDI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId,
1822 bit Commutable = 0> {
1823 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1824 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
1825 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]> {
1826 let isCommutable = Commutable;
1828 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
1829 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
1830 [(set VR128:$dst, (IntId VR128:$src1,
1831 (bitconvert (memopv2i64 addr:$src2))))]>;
1834 multiclass PDI_binop_rmi_int<bits<8> opc, bits<8> opc2, Format ImmForm,
1836 Intrinsic IntId, Intrinsic IntId2> {
1837 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1838 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
1839 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
1840 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
1841 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
1842 [(set VR128:$dst, (IntId VR128:$src1,
1843 (bitconvert (memopv2i64 addr:$src2))))]>;
1844 def ri : PDIi8<opc2, ImmForm, (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
1845 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
1846 [(set VR128:$dst, (IntId2 VR128:$src1, (i32 imm:$src2)))]>;
1849 /// PDI_binop_rm - Simple SSE2 binary operator.
1850 multiclass PDI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
1851 ValueType OpVT, bit Commutable = 0> {
1852 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1853 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
1854 [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]> {
1855 let isCommutable = Commutable;
1857 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
1858 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
1859 [(set VR128:$dst, (OpVT (OpNode VR128:$src1,
1860 (bitconvert (memopv2i64 addr:$src2)))))]>;
1863 /// PDI_binop_rm_v2i64 - Simple SSE2 binary operator whose type is v2i64.
1865 /// FIXME: we could eliminate this and use PDI_binop_rm instead if tblgen knew
1866 /// to collapse (bitconvert VT to VT) into its operand.
1868 multiclass PDI_binop_rm_v2i64<bits<8> opc, string OpcodeStr, SDNode OpNode,
1869 bit Commutable = 0> {
1870 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1871 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
1872 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))]> {
1873 let isCommutable = Commutable;
1875 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
1876 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
1877 [(set VR128:$dst, (OpNode VR128:$src1,(memopv2i64 addr:$src2)))]>;
1880 } // Constraints = "$src1 = $dst"
1882 // 128-bit Integer Arithmetic
1884 defm PADDB : PDI_binop_rm<0xFC, "paddb", add, v16i8, 1>;
1885 defm PADDW : PDI_binop_rm<0xFD, "paddw", add, v8i16, 1>;
1886 defm PADDD : PDI_binop_rm<0xFE, "paddd", add, v4i32, 1>;
1887 defm PADDQ : PDI_binop_rm_v2i64<0xD4, "paddq", add, 1>;
1889 defm PADDSB : PDI_binop_rm_int<0xEC, "paddsb" , int_x86_sse2_padds_b, 1>;
1890 defm PADDSW : PDI_binop_rm_int<0xED, "paddsw" , int_x86_sse2_padds_w, 1>;
1891 defm PADDUSB : PDI_binop_rm_int<0xDC, "paddusb", int_x86_sse2_paddus_b, 1>;
1892 defm PADDUSW : PDI_binop_rm_int<0xDD, "paddusw", int_x86_sse2_paddus_w, 1>;
1894 defm PSUBB : PDI_binop_rm<0xF8, "psubb", sub, v16i8>;
1895 defm PSUBW : PDI_binop_rm<0xF9, "psubw", sub, v8i16>;
1896 defm PSUBD : PDI_binop_rm<0xFA, "psubd", sub, v4i32>;
1897 defm PSUBQ : PDI_binop_rm_v2i64<0xFB, "psubq", sub>;
1899 defm PSUBSB : PDI_binop_rm_int<0xE8, "psubsb" , int_x86_sse2_psubs_b>;
1900 defm PSUBSW : PDI_binop_rm_int<0xE9, "psubsw" , int_x86_sse2_psubs_w>;
1901 defm PSUBUSB : PDI_binop_rm_int<0xD8, "psubusb", int_x86_sse2_psubus_b>;
1902 defm PSUBUSW : PDI_binop_rm_int<0xD9, "psubusw", int_x86_sse2_psubus_w>;
1904 defm PMULLW : PDI_binop_rm<0xD5, "pmullw", mul, v8i16, 1>;
1906 defm PMULHUW : PDI_binop_rm_int<0xE4, "pmulhuw", int_x86_sse2_pmulhu_w, 1>;
1907 defm PMULHW : PDI_binop_rm_int<0xE5, "pmulhw" , int_x86_sse2_pmulh_w , 1>;
1908 defm PMULUDQ : PDI_binop_rm_int<0xF4, "pmuludq", int_x86_sse2_pmulu_dq, 1>;
1910 defm PMADDWD : PDI_binop_rm_int<0xF5, "pmaddwd", int_x86_sse2_pmadd_wd, 1>;
1912 defm PAVGB : PDI_binop_rm_int<0xE0, "pavgb", int_x86_sse2_pavg_b, 1>;
1913 defm PAVGW : PDI_binop_rm_int<0xE3, "pavgw", int_x86_sse2_pavg_w, 1>;
1916 defm PMINUB : PDI_binop_rm_int<0xDA, "pminub", int_x86_sse2_pminu_b, 1>;
1917 defm PMINSW : PDI_binop_rm_int<0xEA, "pminsw", int_x86_sse2_pmins_w, 1>;
1918 defm PMAXUB : PDI_binop_rm_int<0xDE, "pmaxub", int_x86_sse2_pmaxu_b, 1>;
1919 defm PMAXSW : PDI_binop_rm_int<0xEE, "pmaxsw", int_x86_sse2_pmaxs_w, 1>;
1920 defm PSADBW : PDI_binop_rm_int<0xE0, "psadbw", int_x86_sse2_psad_bw, 1>;
1923 defm PSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "psllw",
1924 int_x86_sse2_psll_w, int_x86_sse2_pslli_w>;
1925 defm PSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "pslld",
1926 int_x86_sse2_psll_d, int_x86_sse2_pslli_d>;
1927 defm PSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "psllq",
1928 int_x86_sse2_psll_q, int_x86_sse2_pslli_q>;
1930 defm PSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "psrlw",
1931 int_x86_sse2_psrl_w, int_x86_sse2_psrli_w>;
1932 defm PSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "psrld",
1933 int_x86_sse2_psrl_d, int_x86_sse2_psrli_d>;
1934 defm PSRLQ : PDI_binop_rmi_int<0xD3, 0x73, MRM2r, "psrlq",
1935 int_x86_sse2_psrl_q, int_x86_sse2_psrli_q>;
1937 defm PSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "psraw",
1938 int_x86_sse2_psra_w, int_x86_sse2_psrai_w>;
1939 defm PSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "psrad",
1940 int_x86_sse2_psra_d, int_x86_sse2_psrai_d>;
1942 // 128-bit logical shifts.
1943 let Constraints = "$src1 = $dst", neverHasSideEffects = 1 in {
1944 def PSLLDQri : PDIi8<0x73, MRM7r,
1945 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
1946 "pslldq\t{$src2, $dst|$dst, $src2}", []>;
1947 def PSRLDQri : PDIi8<0x73, MRM3r,
1948 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
1949 "psrldq\t{$src2, $dst|$dst, $src2}", []>;
1950 // PSRADQri doesn't exist in SSE[1-3].
1953 let Predicates = [HasSSE2] in {
1954 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
1955 (v2i64 (PSLLDQri VR128:$src1, (PSxLDQ_imm imm:$src2)))>;
1956 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
1957 (v2i64 (PSRLDQri VR128:$src1, (PSxLDQ_imm imm:$src2)))>;
1958 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
1959 (v2f64 (PSRLDQri VR128:$src1, (PSxLDQ_imm imm:$src2)))>;
1963 defm PAND : PDI_binop_rm_v2i64<0xDB, "pand", and, 1>;
1964 defm POR : PDI_binop_rm_v2i64<0xEB, "por" , or , 1>;
1965 defm PXOR : PDI_binop_rm_v2i64<0xEF, "pxor", xor, 1>;
1967 let Constraints = "$src1 = $dst" in {
1968 def PANDNrr : PDI<0xDF, MRMSrcReg,
1969 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1970 "pandn\t{$src2, $dst|$dst, $src2}",
1971 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
1974 def PANDNrm : PDI<0xDF, MRMSrcMem,
1975 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
1976 "pandn\t{$src2, $dst|$dst, $src2}",
1977 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
1978 (memopv2i64 addr:$src2))))]>;
1981 // SSE2 Integer comparison
1982 defm PCMPEQB : PDI_binop_rm_int<0x74, "pcmpeqb", int_x86_sse2_pcmpeq_b>;
1983 defm PCMPEQW : PDI_binop_rm_int<0x75, "pcmpeqw", int_x86_sse2_pcmpeq_w>;
1984 defm PCMPEQD : PDI_binop_rm_int<0x76, "pcmpeqd", int_x86_sse2_pcmpeq_d>;
1985 defm PCMPGTB : PDI_binop_rm_int<0x64, "pcmpgtb", int_x86_sse2_pcmpgt_b>;
1986 defm PCMPGTW : PDI_binop_rm_int<0x65, "pcmpgtw", int_x86_sse2_pcmpgt_w>;
1987 defm PCMPGTD : PDI_binop_rm_int<0x66, "pcmpgtd", int_x86_sse2_pcmpgt_d>;
1989 def : Pat<(v16i8 (vsetcc (v16i8 VR128:$src1), VR128:$src2, SETEQ)),
1990 (PCMPEQBrr VR128:$src1, VR128:$src2)>;
1991 def : Pat<(v16i8 (vsetcc (v16i8 VR128:$src1), (memop addr:$src2), SETEQ)),
1992 (PCMPEQBrm VR128:$src1, addr:$src2)>;
1993 def : Pat<(v8i16 (vsetcc (v8i16 VR128:$src1), VR128:$src2, SETEQ)),
1994 (PCMPEQWrr VR128:$src1, VR128:$src2)>;
1995 def : Pat<(v8i16 (vsetcc (v8i16 VR128:$src1), (memop addr:$src2), SETEQ)),
1996 (PCMPEQWrm VR128:$src1, addr:$src2)>;
1997 def : Pat<(v4i32 (vsetcc (v4i32 VR128:$src1), VR128:$src2, SETEQ)),
1998 (PCMPEQDrr VR128:$src1, VR128:$src2)>;
1999 def : Pat<(v4i32 (vsetcc (v4i32 VR128:$src1), (memop addr:$src2), SETEQ)),
2000 (PCMPEQDrm VR128:$src1, addr:$src2)>;
2002 def : Pat<(v16i8 (vsetcc (v16i8 VR128:$src1), VR128:$src2, SETGT)),
2003 (PCMPGTBrr VR128:$src1, VR128:$src2)>;
2004 def : Pat<(v16i8 (vsetcc (v16i8 VR128:$src1), (memop addr:$src2), SETGT)),
2005 (PCMPGTBrm VR128:$src1, addr:$src2)>;
2006 def : Pat<(v8i16 (vsetcc (v8i16 VR128:$src1), VR128:$src2, SETGT)),
2007 (PCMPGTWrr VR128:$src1, VR128:$src2)>;
2008 def : Pat<(v8i16 (vsetcc (v8i16 VR128:$src1), (memop addr:$src2), SETGT)),
2009 (PCMPGTWrm VR128:$src1, addr:$src2)>;
2010 def : Pat<(v4i32 (vsetcc (v4i32 VR128:$src1), VR128:$src2, SETGT)),
2011 (PCMPGTDrr VR128:$src1, VR128:$src2)>;
2012 def : Pat<(v4i32 (vsetcc (v4i32 VR128:$src1), (memop addr:$src2), SETGT)),
2013 (PCMPGTDrm VR128:$src1, addr:$src2)>;
2016 // Pack instructions
2017 defm PACKSSWB : PDI_binop_rm_int<0x63, "packsswb", int_x86_sse2_packsswb_128>;
2018 defm PACKSSDW : PDI_binop_rm_int<0x6B, "packssdw", int_x86_sse2_packssdw_128>;
2019 defm PACKUSWB : PDI_binop_rm_int<0x67, "packuswb", int_x86_sse2_packuswb_128>;
2021 // Shuffle and unpack instructions
2022 def PSHUFDri : PDIi8<0x70, MRMSrcReg,
2023 (outs VR128:$dst), (ins VR128:$src1, i8imm:$src2),
2024 "pshufd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2025 [(set VR128:$dst, (v4i32 (vector_shuffle
2026 VR128:$src1, (undef),
2027 PSHUFD_shuffle_mask:$src2)))]>;
2028 def PSHUFDmi : PDIi8<0x70, MRMSrcMem,
2029 (outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
2030 "pshufd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2031 [(set VR128:$dst, (v4i32 (vector_shuffle
2032 (bc_v4i32(memopv2i64 addr:$src1)),
2034 PSHUFD_shuffle_mask:$src2)))]>;
2036 // SSE2 with ImmT == Imm8 and XS prefix.
2037 def PSHUFHWri : Ii8<0x70, MRMSrcReg,
2038 (outs VR128:$dst), (ins VR128:$src1, i8imm:$src2),
2039 "pshufhw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2040 [(set VR128:$dst, (v8i16 (vector_shuffle
2041 VR128:$src1, (undef),
2042 PSHUFHW_shuffle_mask:$src2)))]>,
2043 XS, Requires<[HasSSE2]>;
2044 def PSHUFHWmi : Ii8<0x70, MRMSrcMem,
2045 (outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
2046 "pshufhw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2047 [(set VR128:$dst, (v8i16 (vector_shuffle
2048 (bc_v8i16 (memopv2i64 addr:$src1)),
2050 PSHUFHW_shuffle_mask:$src2)))]>,
2051 XS, Requires<[HasSSE2]>;
2053 // SSE2 with ImmT == Imm8 and XD prefix.
2054 def PSHUFLWri : Ii8<0x70, MRMSrcReg,
2055 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2056 "pshuflw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2057 [(set VR128:$dst, (v8i16 (vector_shuffle
2058 VR128:$src1, (undef),
2059 PSHUFLW_shuffle_mask:$src2)))]>,
2060 XD, Requires<[HasSSE2]>;
2061 def PSHUFLWmi : Ii8<0x70, MRMSrcMem,
2062 (outs VR128:$dst), (ins i128mem:$src1, i32i8imm:$src2),
2063 "pshuflw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2064 [(set VR128:$dst, (v8i16 (vector_shuffle
2065 (bc_v8i16 (memopv2i64 addr:$src1)),
2067 PSHUFLW_shuffle_mask:$src2)))]>,
2068 XD, Requires<[HasSSE2]>;
2071 let Constraints = "$src1 = $dst" in {
2072 def PUNPCKLBWrr : PDI<0x60, MRMSrcReg,
2073 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2074 "punpcklbw\t{$src2, $dst|$dst, $src2}",
2076 (v16i8 (vector_shuffle VR128:$src1, VR128:$src2,
2077 UNPCKL_shuffle_mask)))]>;
2078 def PUNPCKLBWrm : PDI<0x60, MRMSrcMem,
2079 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2080 "punpcklbw\t{$src2, $dst|$dst, $src2}",
2082 (v16i8 (vector_shuffle VR128:$src1,
2083 (bc_v16i8 (memopv2i64 addr:$src2)),
2084 UNPCKL_shuffle_mask)))]>;
2085 def PUNPCKLWDrr : PDI<0x61, MRMSrcReg,
2086 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2087 "punpcklwd\t{$src2, $dst|$dst, $src2}",
2089 (v8i16 (vector_shuffle VR128:$src1, VR128:$src2,
2090 UNPCKL_shuffle_mask)))]>;
2091 def PUNPCKLWDrm : PDI<0x61, MRMSrcMem,
2092 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2093 "punpcklwd\t{$src2, $dst|$dst, $src2}",
2095 (v8i16 (vector_shuffle VR128:$src1,
2096 (bc_v8i16 (memopv2i64 addr:$src2)),
2097 UNPCKL_shuffle_mask)))]>;
2098 def PUNPCKLDQrr : PDI<0x62, MRMSrcReg,
2099 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2100 "punpckldq\t{$src2, $dst|$dst, $src2}",
2102 (v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
2103 UNPCKL_shuffle_mask)))]>;
2104 def PUNPCKLDQrm : PDI<0x62, MRMSrcMem,
2105 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2106 "punpckldq\t{$src2, $dst|$dst, $src2}",
2108 (v4i32 (vector_shuffle VR128:$src1,
2109 (bc_v4i32 (memopv2i64 addr:$src2)),
2110 UNPCKL_shuffle_mask)))]>;
2111 def PUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
2112 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2113 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
2115 (v2i64 (vector_shuffle VR128:$src1, VR128:$src2,
2116 UNPCKL_shuffle_mask)))]>;
2117 def PUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
2118 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2119 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
2121 (v2i64 (vector_shuffle VR128:$src1,
2122 (memopv2i64 addr:$src2),
2123 UNPCKL_shuffle_mask)))]>;
2125 def PUNPCKHBWrr : PDI<0x68, MRMSrcReg,
2126 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2127 "punpckhbw\t{$src2, $dst|$dst, $src2}",
2129 (v16i8 (vector_shuffle VR128:$src1, VR128:$src2,
2130 UNPCKH_shuffle_mask)))]>;
2131 def PUNPCKHBWrm : PDI<0x68, MRMSrcMem,
2132 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2133 "punpckhbw\t{$src2, $dst|$dst, $src2}",
2135 (v16i8 (vector_shuffle VR128:$src1,
2136 (bc_v16i8 (memopv2i64 addr:$src2)),
2137 UNPCKH_shuffle_mask)))]>;
2138 def PUNPCKHWDrr : PDI<0x69, MRMSrcReg,
2139 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2140 "punpckhwd\t{$src2, $dst|$dst, $src2}",
2142 (v8i16 (vector_shuffle VR128:$src1, VR128:$src2,
2143 UNPCKH_shuffle_mask)))]>;
2144 def PUNPCKHWDrm : PDI<0x69, MRMSrcMem,
2145 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2146 "punpckhwd\t{$src2, $dst|$dst, $src2}",
2148 (v8i16 (vector_shuffle VR128:$src1,
2149 (bc_v8i16 (memopv2i64 addr:$src2)),
2150 UNPCKH_shuffle_mask)))]>;
2151 def PUNPCKHDQrr : PDI<0x6A, MRMSrcReg,
2152 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2153 "punpckhdq\t{$src2, $dst|$dst, $src2}",
2155 (v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
2156 UNPCKH_shuffle_mask)))]>;
2157 def PUNPCKHDQrm : PDI<0x6A, MRMSrcMem,
2158 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2159 "punpckhdq\t{$src2, $dst|$dst, $src2}",
2161 (v4i32 (vector_shuffle VR128:$src1,
2162 (bc_v4i32 (memopv2i64 addr:$src2)),
2163 UNPCKH_shuffle_mask)))]>;
2164 def PUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
2165 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2166 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
2168 (v2i64 (vector_shuffle VR128:$src1, VR128:$src2,
2169 UNPCKH_shuffle_mask)))]>;
2170 def PUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
2171 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2172 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
2174 (v2i64 (vector_shuffle VR128:$src1,
2175 (memopv2i64 addr:$src2),
2176 UNPCKH_shuffle_mask)))]>;
2180 def PEXTRWri : PDIi8<0xC5, MRMSrcReg,
2181 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
2182 "pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2183 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
2185 let Constraints = "$src1 = $dst" in {
2186 def PINSRWrri : PDIi8<0xC4, MRMSrcReg,
2187 (outs VR128:$dst), (ins VR128:$src1,
2188 GR32:$src2, i32i8imm:$src3),
2189 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2191 (X86pinsrw VR128:$src1, GR32:$src2, imm:$src3))]>;
2192 def PINSRWrmi : PDIi8<0xC4, MRMSrcMem,
2193 (outs VR128:$dst), (ins VR128:$src1,
2194 i16mem:$src2, i32i8imm:$src3),
2195 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2197 (X86pinsrw VR128:$src1, (extloadi16 addr:$src2),
2202 def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
2203 "pmovmskb\t{$src, $dst|$dst, $src}",
2204 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>;
2206 // Conditional store
2208 def MASKMOVDQU : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
2209 "maskmovdqu\t{$mask, $src|$src, $mask}",
2210 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>;
2212 // Non-temporal stores
2213 def MOVNTPDmr : PDI<0x2B, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2214 "movntpd\t{$src, $dst|$dst, $src}",
2215 [(int_x86_sse2_movnt_pd addr:$dst, VR128:$src)]>;
2216 def MOVNTDQmr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2217 "movntdq\t{$src, $dst|$dst, $src}",
2218 [(int_x86_sse2_movnt_dq addr:$dst, VR128:$src)]>;
2219 def MOVNTImr : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
2220 "movnti\t{$src, $dst|$dst, $src}",
2221 [(int_x86_sse2_movnt_i addr:$dst, GR32:$src)]>,
2222 TB, Requires<[HasSSE2]>;
2225 def CLFLUSH : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
2226 "clflush\t$src", [(int_x86_sse2_clflush addr:$src)]>,
2227 TB, Requires<[HasSSE2]>;
2229 // Load, store, and memory fence
2230 def LFENCE : I<0xAE, MRM5m, (outs), (ins),
2231 "lfence", [(int_x86_sse2_lfence)]>, TB, Requires<[HasSSE2]>;
2232 def MFENCE : I<0xAE, MRM6m, (outs), (ins),
2233 "mfence", [(int_x86_sse2_mfence)]>, TB, Requires<[HasSSE2]>;
2235 //TODO: custom lower this so as to never even generate the noop
2236 def : Pat<(membarrier (i8 imm:$ll), (i8 imm:$ls), (i8 imm:$sl), (i8 imm:$ss),
2238 def : Pat<(membarrier (i8 0), (i8 0), (i8 0), (i8 1), (i8 1)), (SFENCE)>;
2239 def : Pat<(membarrier (i8 1), (i8 0), (i8 0), (i8 0), (i8 1)), (LFENCE)>;
2240 def : Pat<(membarrier (i8 imm:$ll), (i8 imm:$ls), (i8 imm:$sl), (i8 imm:$ss),
2243 // Alias instructions that map zero vector to pxor / xorp* for sse.
2244 let isReMaterializable = 1 in
2245 def V_SETALLONES : PDI<0x76, MRMInitReg, (outs VR128:$dst), (ins),
2246 "pcmpeqd\t$dst, $dst",
2247 [(set VR128:$dst, (v4i32 immAllOnesV))]>;
2249 // FR64 to 128-bit vector conversion.
2250 def MOVSD2PDrr : SDI<0x10, MRMSrcReg, (outs VR128:$dst), (ins FR64:$src),
2251 "movsd\t{$src, $dst|$dst, $src}",
2253 (v2f64 (scalar_to_vector FR64:$src)))]>;
2254 def MOVSD2PDrm : SDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
2255 "movsd\t{$src, $dst|$dst, $src}",
2257 (v2f64 (scalar_to_vector (loadf64 addr:$src))))]>;
2259 def MOVDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
2260 "movd\t{$src, $dst|$dst, $src}",
2262 (v4i32 (scalar_to_vector GR32:$src)))]>;
2263 def MOVDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
2264 "movd\t{$src, $dst|$dst, $src}",
2266 (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>;
2268 def MOVDI2SSrr : PDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
2269 "movd\t{$src, $dst|$dst, $src}",
2270 [(set FR32:$dst, (bitconvert GR32:$src))]>;
2272 def MOVDI2SSrm : PDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
2273 "movd\t{$src, $dst|$dst, $src}",
2274 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>;
2276 // SSE2 instructions with XS prefix
2277 def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
2278 "movq\t{$src, $dst|$dst, $src}",
2280 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
2281 Requires<[HasSSE2]>;
2282 def MOVPQI2QImr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
2283 "movq\t{$src, $dst|$dst, $src}",
2284 [(store (i64 (vector_extract (v2i64 VR128:$src),
2285 (iPTR 0))), addr:$dst)]>;
2287 // FIXME: may not be able to eliminate this movss with coalescing the src and
2288 // dest register classes are different. We really want to write this pattern
2290 // def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
2291 // (f32 FR32:$src)>;
2292 def MOVPD2SDrr : SDI<0x10, MRMSrcReg, (outs FR64:$dst), (ins VR128:$src),
2293 "movsd\t{$src, $dst|$dst, $src}",
2294 [(set FR64:$dst, (vector_extract (v2f64 VR128:$src),
2296 def MOVPD2SDmr : SDI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
2297 "movsd\t{$src, $dst|$dst, $src}",
2298 [(store (f64 (vector_extract (v2f64 VR128:$src),
2299 (iPTR 0))), addr:$dst)]>;
2300 def MOVPDI2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
2301 "movd\t{$src, $dst|$dst, $src}",
2302 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
2304 def MOVPDI2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src),
2305 "movd\t{$src, $dst|$dst, $src}",
2306 [(store (i32 (vector_extract (v4i32 VR128:$src),
2307 (iPTR 0))), addr:$dst)]>;
2309 def MOVSS2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
2310 "movd\t{$src, $dst|$dst, $src}",
2311 [(set GR32:$dst, (bitconvert FR32:$src))]>;
2312 def MOVSS2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
2313 "movd\t{$src, $dst|$dst, $src}",
2314 [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>;
2317 // Move to lower bits of a VR128, leaving upper bits alone.
2318 // Three operand (but two address) aliases.
2319 let Constraints = "$src1 = $dst" in {
2320 let neverHasSideEffects = 1 in
2321 def MOVLSD2PDrr : SDI<0x10, MRMSrcReg,
2322 (outs VR128:$dst), (ins VR128:$src1, FR64:$src2),
2323 "movsd\t{$src2, $dst|$dst, $src2}", []>;
2325 let AddedComplexity = 15 in
2326 def MOVLPDrr : SDI<0x10, MRMSrcReg,
2327 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2328 "movsd\t{$src2, $dst|$dst, $src2}",
2330 (v2f64 (vector_shuffle VR128:$src1, VR128:$src2,
2331 MOVL_shuffle_mask)))]>;
2334 // Store / copy lower 64-bits of a XMM register.
2335 def MOVLQ128mr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
2336 "movq\t{$src, $dst|$dst, $src}",
2337 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>;
2339 // Move to lower bits of a VR128 and zeroing upper bits.
2340 // Loading from memory automatically zeroing upper bits.
2341 let AddedComplexity = 20 in {
2342 def MOVZSD2PDrm : SDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
2343 "movsd\t{$src, $dst|$dst, $src}",
2345 (v2f64 (X86vzmovl (v2f64 (scalar_to_vector
2346 (loadf64 addr:$src))))))]>;
2348 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
2349 (MOVZSD2PDrm addr:$src)>;
2350 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
2351 (MOVZSD2PDrm addr:$src)>;
2352 def : Pat<(v2f64 (X86vzload addr:$src)), (MOVZSD2PDrm addr:$src)>;
2355 // movd / movq to XMM register zero-extends
2356 let AddedComplexity = 15 in {
2357 def MOVZDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
2358 "movd\t{$src, $dst|$dst, $src}",
2359 [(set VR128:$dst, (v4i32 (X86vzmovl
2360 (v4i32 (scalar_to_vector GR32:$src)))))]>;
2361 // This is X86-64 only.
2362 def MOVZQI2PQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
2363 "mov{d|q}\t{$src, $dst|$dst, $src}",
2364 [(set VR128:$dst, (v2i64 (X86vzmovl
2365 (v2i64 (scalar_to_vector GR64:$src)))))]>;
2368 let AddedComplexity = 20 in {
2369 def MOVZDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
2370 "movd\t{$src, $dst|$dst, $src}",
2372 (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
2373 (loadi32 addr:$src))))))]>;
2375 def : Pat<(v4i32 (X86vzmovl (loadv4i32 addr:$src))),
2376 (MOVZDI2PDIrm addr:$src)>;
2377 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
2378 (MOVZDI2PDIrm addr:$src)>;
2380 def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
2381 "movq\t{$src, $dst|$dst, $src}",
2383 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
2384 (loadi64 addr:$src))))))]>, XS,
2385 Requires<[HasSSE2]>;
2387 def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
2388 (MOVZQI2PQIrm addr:$src)>;
2389 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4f32 addr:$src)))),
2390 (MOVZQI2PQIrm addr:$src)>;
2391 def : Pat<(v2i64 (X86vzload addr:$src)), (MOVZQI2PQIrm addr:$src)>;
2394 // Moving from XMM to XMM and clear upper 64 bits. Note, there is a bug in
2395 // IA32 document. movq xmm1, xmm2 does clear the high bits.
2396 let AddedComplexity = 15 in
2397 def MOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2398 "movq\t{$src, $dst|$dst, $src}",
2399 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
2400 XS, Requires<[HasSSE2]>;
2402 let AddedComplexity = 20 in {
2403 def MOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2404 "movq\t{$src, $dst|$dst, $src}",
2405 [(set VR128:$dst, (v2i64 (X86vzmovl
2406 (loadv2i64 addr:$src))))]>,
2407 XS, Requires<[HasSSE2]>;
2409 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4i32 addr:$src)))),
2410 (MOVZPQILo2PQIrm addr:$src)>;
2413 //===----------------------------------------------------------------------===//
2414 // SSE3 Instructions
2415 //===----------------------------------------------------------------------===//
2417 // Move Instructions
2418 def MOVSHDUPrr : S3SI<0x16, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2419 "movshdup\t{$src, $dst|$dst, $src}",
2420 [(set VR128:$dst, (v4f32 (vector_shuffle
2421 VR128:$src, (undef),
2422 MOVSHDUP_shuffle_mask)))]>;
2423 def MOVSHDUPrm : S3SI<0x16, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2424 "movshdup\t{$src, $dst|$dst, $src}",
2425 [(set VR128:$dst, (v4f32 (vector_shuffle
2426 (memopv4f32 addr:$src), (undef),
2427 MOVSHDUP_shuffle_mask)))]>;
2429 def MOVSLDUPrr : S3SI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2430 "movsldup\t{$src, $dst|$dst, $src}",
2431 [(set VR128:$dst, (v4f32 (vector_shuffle
2432 VR128:$src, (undef),
2433 MOVSLDUP_shuffle_mask)))]>;
2434 def MOVSLDUPrm : S3SI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2435 "movsldup\t{$src, $dst|$dst, $src}",
2436 [(set VR128:$dst, (v4f32 (vector_shuffle
2437 (memopv4f32 addr:$src), (undef),
2438 MOVSLDUP_shuffle_mask)))]>;
2440 def MOVDDUPrr : S3DI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2441 "movddup\t{$src, $dst|$dst, $src}",
2442 [(set VR128:$dst, (v2f64 (vector_shuffle
2443 VR128:$src, (undef),
2444 SSE_splat_lo_mask)))]>;
2445 def MOVDDUPrm : S3DI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
2446 "movddup\t{$src, $dst|$dst, $src}",
2448 (v2f64 (vector_shuffle
2449 (scalar_to_vector (loadf64 addr:$src)),
2451 SSE_splat_lo_mask)))]>;
2454 let Constraints = "$src1 = $dst" in {
2455 def ADDSUBPSrr : S3DI<0xD0, MRMSrcReg,
2456 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2457 "addsubps\t{$src2, $dst|$dst, $src2}",
2458 [(set VR128:$dst, (int_x86_sse3_addsub_ps VR128:$src1,
2460 def ADDSUBPSrm : S3DI<0xD0, MRMSrcMem,
2461 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
2462 "addsubps\t{$src2, $dst|$dst, $src2}",
2463 [(set VR128:$dst, (int_x86_sse3_addsub_ps VR128:$src1,
2464 (memop addr:$src2)))]>;
2465 def ADDSUBPDrr : S3I<0xD0, MRMSrcReg,
2466 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2467 "addsubpd\t{$src2, $dst|$dst, $src2}",
2468 [(set VR128:$dst, (int_x86_sse3_addsub_pd VR128:$src1,
2470 def ADDSUBPDrm : S3I<0xD0, MRMSrcMem,
2471 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
2472 "addsubpd\t{$src2, $dst|$dst, $src2}",
2473 [(set VR128:$dst, (int_x86_sse3_addsub_pd VR128:$src1,
2474 (memop addr:$src2)))]>;
2477 def LDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2478 "lddqu\t{$src, $dst|$dst, $src}",
2479 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>;
2482 class S3D_Intrr<bits<8> o, string OpcodeStr, Intrinsic IntId>
2483 : S3DI<o, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2484 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2485 [(set VR128:$dst, (v4f32 (IntId VR128:$src1, VR128:$src2)))]>;
2486 class S3D_Intrm<bits<8> o, string OpcodeStr, Intrinsic IntId>
2487 : S3DI<o, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
2488 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2489 [(set VR128:$dst, (v4f32 (IntId VR128:$src1, (memop addr:$src2))))]>;
2490 class S3_Intrr<bits<8> o, string OpcodeStr, Intrinsic IntId>
2491 : S3I<o, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2492 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2493 [(set VR128:$dst, (v2f64 (IntId VR128:$src1, VR128:$src2)))]>;
2494 class S3_Intrm<bits<8> o, string OpcodeStr, Intrinsic IntId>
2495 : S3I<o, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
2496 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2497 [(set VR128:$dst, (v2f64 (IntId VR128:$src1, (memopv2f64 addr:$src2))))]>;
2499 let Constraints = "$src1 = $dst" in {
2500 def HADDPSrr : S3D_Intrr<0x7C, "haddps", int_x86_sse3_hadd_ps>;
2501 def HADDPSrm : S3D_Intrm<0x7C, "haddps", int_x86_sse3_hadd_ps>;
2502 def HADDPDrr : S3_Intrr <0x7C, "haddpd", int_x86_sse3_hadd_pd>;
2503 def HADDPDrm : S3_Intrm <0x7C, "haddpd", int_x86_sse3_hadd_pd>;
2504 def HSUBPSrr : S3D_Intrr<0x7D, "hsubps", int_x86_sse3_hsub_ps>;
2505 def HSUBPSrm : S3D_Intrm<0x7D, "hsubps", int_x86_sse3_hsub_ps>;
2506 def HSUBPDrr : S3_Intrr <0x7D, "hsubpd", int_x86_sse3_hsub_pd>;
2507 def HSUBPDrm : S3_Intrm <0x7D, "hsubpd", int_x86_sse3_hsub_pd>;
2510 // Thread synchronization
2511 def MONITOR : I<0xC8, RawFrm, (outs), (ins), "monitor",
2512 [(int_x86_sse3_monitor EAX, ECX, EDX)]>,TB, Requires<[HasSSE3]>;
2513 def MWAIT : I<0xC9, RawFrm, (outs), (ins), "mwait",
2514 [(int_x86_sse3_mwait ECX, EAX)]>, TB, Requires<[HasSSE3]>;
2516 // vector_shuffle v1, <undef> <1, 1, 3, 3>
2517 let AddedComplexity = 15 in
2518 def : Pat<(v4i32 (vector_shuffle VR128:$src, (undef),
2519 MOVSHDUP_shuffle_mask)),
2520 (MOVSHDUPrr VR128:$src)>, Requires<[HasSSE3]>;
2521 let AddedComplexity = 20 in
2522 def : Pat<(v4i32 (vector_shuffle (bc_v4i32 (memopv2i64 addr:$src)), (undef),
2523 MOVSHDUP_shuffle_mask)),
2524 (MOVSHDUPrm addr:$src)>, Requires<[HasSSE3]>;
2526 // vector_shuffle v1, <undef> <0, 0, 2, 2>
2527 let AddedComplexity = 15 in
2528 def : Pat<(v4i32 (vector_shuffle VR128:$src, (undef),
2529 MOVSLDUP_shuffle_mask)),
2530 (MOVSLDUPrr VR128:$src)>, Requires<[HasSSE3]>;
2531 let AddedComplexity = 20 in
2532 def : Pat<(v4i32 (vector_shuffle (bc_v4i32 (memopv2i64 addr:$src)), (undef),
2533 MOVSLDUP_shuffle_mask)),
2534 (MOVSLDUPrm addr:$src)>, Requires<[HasSSE3]>;
2536 //===----------------------------------------------------------------------===//
2537 // SSSE3 Instructions
2538 //===----------------------------------------------------------------------===//
2540 /// SS3I_unop_rm_int_8 - Simple SSSE3 unary operator whose type is v*i8.
2541 multiclass SS3I_unop_rm_int_8<bits<8> opc, string OpcodeStr,
2542 Intrinsic IntId64, Intrinsic IntId128> {
2543 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst), (ins VR64:$src),
2544 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2545 [(set VR64:$dst, (IntId64 VR64:$src))]>;
2547 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst), (ins i64mem:$src),
2548 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2550 (IntId64 (bitconvert (memopv8i8 addr:$src))))]>;
2552 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
2554 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2555 [(set VR128:$dst, (IntId128 VR128:$src))]>,
2558 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
2560 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2563 (bitconvert (memopv16i8 addr:$src))))]>, OpSize;
2566 /// SS3I_unop_rm_int_16 - Simple SSSE3 unary operator whose type is v*i16.
2567 multiclass SS3I_unop_rm_int_16<bits<8> opc, string OpcodeStr,
2568 Intrinsic IntId64, Intrinsic IntId128> {
2569 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
2571 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2572 [(set VR64:$dst, (IntId64 VR64:$src))]>;
2574 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
2576 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2579 (bitconvert (memopv4i16 addr:$src))))]>;
2581 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
2583 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2584 [(set VR128:$dst, (IntId128 VR128:$src))]>,
2587 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
2589 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2592 (bitconvert (memopv8i16 addr:$src))))]>, OpSize;
2595 /// SS3I_unop_rm_int_32 - Simple SSSE3 unary operator whose type is v*i32.
2596 multiclass SS3I_unop_rm_int_32<bits<8> opc, string OpcodeStr,
2597 Intrinsic IntId64, Intrinsic IntId128> {
2598 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
2600 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2601 [(set VR64:$dst, (IntId64 VR64:$src))]>;
2603 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
2605 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2608 (bitconvert (memopv2i32 addr:$src))))]>;
2610 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
2612 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2613 [(set VR128:$dst, (IntId128 VR128:$src))]>,
2616 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
2618 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2621 (bitconvert (memopv4i32 addr:$src))))]>, OpSize;
2624 defm PABSB : SS3I_unop_rm_int_8 <0x1C, "pabsb",
2625 int_x86_ssse3_pabs_b,
2626 int_x86_ssse3_pabs_b_128>;
2627 defm PABSW : SS3I_unop_rm_int_16<0x1D, "pabsw",
2628 int_x86_ssse3_pabs_w,
2629 int_x86_ssse3_pabs_w_128>;
2630 defm PABSD : SS3I_unop_rm_int_32<0x1E, "pabsd",
2631 int_x86_ssse3_pabs_d,
2632 int_x86_ssse3_pabs_d_128>;
2634 /// SS3I_binop_rm_int_8 - Simple SSSE3 binary operator whose type is v*i8.
2635 let Constraints = "$src1 = $dst" in {
2636 multiclass SS3I_binop_rm_int_8<bits<8> opc, string OpcodeStr,
2637 Intrinsic IntId64, Intrinsic IntId128,
2638 bit Commutable = 0> {
2639 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
2640 (ins VR64:$src1, VR64:$src2),
2641 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2642 [(set VR64:$dst, (IntId64 VR64:$src1, VR64:$src2))]> {
2643 let isCommutable = Commutable;
2645 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
2646 (ins VR64:$src1, i64mem:$src2),
2647 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2649 (IntId64 VR64:$src1,
2650 (bitconvert (memopv8i8 addr:$src2))))]>;
2652 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
2653 (ins VR128:$src1, VR128:$src2),
2654 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2655 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
2657 let isCommutable = Commutable;
2659 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
2660 (ins VR128:$src1, i128mem:$src2),
2661 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2663 (IntId128 VR128:$src1,
2664 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
2668 /// SS3I_binop_rm_int_16 - Simple SSSE3 binary operator whose type is v*i16.
2669 let Constraints = "$src1 = $dst" in {
2670 multiclass SS3I_binop_rm_int_16<bits<8> opc, string OpcodeStr,
2671 Intrinsic IntId64, Intrinsic IntId128,
2672 bit Commutable = 0> {
2673 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
2674 (ins VR64:$src1, VR64:$src2),
2675 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2676 [(set VR64:$dst, (IntId64 VR64:$src1, VR64:$src2))]> {
2677 let isCommutable = Commutable;
2679 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
2680 (ins VR64:$src1, i64mem:$src2),
2681 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2683 (IntId64 VR64:$src1,
2684 (bitconvert (memopv4i16 addr:$src2))))]>;
2686 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
2687 (ins VR128:$src1, VR128:$src2),
2688 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2689 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
2691 let isCommutable = Commutable;
2693 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
2694 (ins VR128:$src1, i128mem:$src2),
2695 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2697 (IntId128 VR128:$src1,
2698 (bitconvert (memopv8i16 addr:$src2))))]>, OpSize;
2702 /// SS3I_binop_rm_int_32 - Simple SSSE3 binary operator whose type is v*i32.
2703 let Constraints = "$src1 = $dst" in {
2704 multiclass SS3I_binop_rm_int_32<bits<8> opc, string OpcodeStr,
2705 Intrinsic IntId64, Intrinsic IntId128,
2706 bit Commutable = 0> {
2707 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
2708 (ins VR64:$src1, VR64:$src2),
2709 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2710 [(set VR64:$dst, (IntId64 VR64:$src1, VR64:$src2))]> {
2711 let isCommutable = Commutable;
2713 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
2714 (ins VR64:$src1, i64mem:$src2),
2715 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2717 (IntId64 VR64:$src1,
2718 (bitconvert (memopv2i32 addr:$src2))))]>;
2720 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
2721 (ins VR128:$src1, VR128:$src2),
2722 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2723 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
2725 let isCommutable = Commutable;
2727 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
2728 (ins VR128:$src1, i128mem:$src2),
2729 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2731 (IntId128 VR128:$src1,
2732 (bitconvert (memopv4i32 addr:$src2))))]>, OpSize;
2736 defm PHADDW : SS3I_binop_rm_int_16<0x01, "phaddw",
2737 int_x86_ssse3_phadd_w,
2738 int_x86_ssse3_phadd_w_128, 1>;
2739 defm PHADDD : SS3I_binop_rm_int_32<0x02, "phaddd",
2740 int_x86_ssse3_phadd_d,
2741 int_x86_ssse3_phadd_d_128, 1>;
2742 defm PHADDSW : SS3I_binop_rm_int_16<0x03, "phaddsw",
2743 int_x86_ssse3_phadd_sw,
2744 int_x86_ssse3_phadd_sw_128, 1>;
2745 defm PHSUBW : SS3I_binop_rm_int_16<0x05, "phsubw",
2746 int_x86_ssse3_phsub_w,
2747 int_x86_ssse3_phsub_w_128>;
2748 defm PHSUBD : SS3I_binop_rm_int_32<0x06, "phsubd",
2749 int_x86_ssse3_phsub_d,
2750 int_x86_ssse3_phsub_d_128>;
2751 defm PHSUBSW : SS3I_binop_rm_int_16<0x07, "phsubsw",
2752 int_x86_ssse3_phsub_sw,
2753 int_x86_ssse3_phsub_sw_128>;
2754 defm PMADDUBSW : SS3I_binop_rm_int_8 <0x04, "pmaddubsw",
2755 int_x86_ssse3_pmadd_ub_sw,
2756 int_x86_ssse3_pmadd_ub_sw_128, 1>;
2757 defm PMULHRSW : SS3I_binop_rm_int_16<0x0B, "pmulhrsw",
2758 int_x86_ssse3_pmul_hr_sw,
2759 int_x86_ssse3_pmul_hr_sw_128, 1>;
2760 defm PSHUFB : SS3I_binop_rm_int_8 <0x00, "pshufb",
2761 int_x86_ssse3_pshuf_b,
2762 int_x86_ssse3_pshuf_b_128>;
2763 defm PSIGNB : SS3I_binop_rm_int_8 <0x08, "psignb",
2764 int_x86_ssse3_psign_b,
2765 int_x86_ssse3_psign_b_128>;
2766 defm PSIGNW : SS3I_binop_rm_int_16<0x09, "psignw",
2767 int_x86_ssse3_psign_w,
2768 int_x86_ssse3_psign_w_128>;
2769 defm PSIGND : SS3I_binop_rm_int_32<0x09, "psignd",
2770 int_x86_ssse3_psign_d,
2771 int_x86_ssse3_psign_d_128>;
2773 let Constraints = "$src1 = $dst" in {
2774 def PALIGNR64rr : SS3AI<0x0F, MRMSrcReg, (outs VR64:$dst),
2775 (ins VR64:$src1, VR64:$src2, i16imm:$src3),
2776 "palignr\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2778 (int_x86_ssse3_palign_r
2779 VR64:$src1, VR64:$src2,
2781 def PALIGNR64rm : SS3AI<0x0F, MRMSrcReg, (outs VR64:$dst),
2782 (ins VR64:$src1, i64mem:$src2, i16imm:$src3),
2783 "palignr\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2785 (int_x86_ssse3_palign_r
2787 (bitconvert (memopv2i32 addr:$src2)),
2790 def PALIGNR128rr : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
2791 (ins VR128:$src1, VR128:$src2, i32imm:$src3),
2792 "palignr\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2794 (int_x86_ssse3_palign_r_128
2795 VR128:$src1, VR128:$src2,
2796 imm:$src3))]>, OpSize;
2797 def PALIGNR128rm : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
2798 (ins VR128:$src1, i128mem:$src2, i32imm:$src3),
2799 "palignr\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2801 (int_x86_ssse3_palign_r_128
2803 (bitconvert (memopv4i32 addr:$src2)),
2804 imm:$src3))]>, OpSize;
2807 //===----------------------------------------------------------------------===//
2808 // Non-Instruction Patterns
2809 //===----------------------------------------------------------------------===//
2811 // extload f32 -> f64. This matches load+fextend because we have a hack in
2812 // the isel (PreprocessForFPConvert) that can introduce loads after dag combine.
2813 // Since these loads aren't folded into the fextend, we have to match it
2815 let Predicates = [HasSSE2] in
2816 def : Pat<(fextend (loadf32 addr:$src)),
2817 (CVTSS2SDrm addr:$src)>;
2820 let Predicates = [HasSSE2] in {
2821 def : Pat<(v2i64 (bitconvert (v4i32 VR128:$src))), (v2i64 VR128:$src)>;
2822 def : Pat<(v2i64 (bitconvert (v8i16 VR128:$src))), (v2i64 VR128:$src)>;
2823 def : Pat<(v2i64 (bitconvert (v16i8 VR128:$src))), (v2i64 VR128:$src)>;
2824 def : Pat<(v2i64 (bitconvert (v2f64 VR128:$src))), (v2i64 VR128:$src)>;
2825 def : Pat<(v2i64 (bitconvert (v4f32 VR128:$src))), (v2i64 VR128:$src)>;
2826 def : Pat<(v4i32 (bitconvert (v2i64 VR128:$src))), (v4i32 VR128:$src)>;
2827 def : Pat<(v4i32 (bitconvert (v8i16 VR128:$src))), (v4i32 VR128:$src)>;
2828 def : Pat<(v4i32 (bitconvert (v16i8 VR128:$src))), (v4i32 VR128:$src)>;
2829 def : Pat<(v4i32 (bitconvert (v2f64 VR128:$src))), (v4i32 VR128:$src)>;
2830 def : Pat<(v4i32 (bitconvert (v4f32 VR128:$src))), (v4i32 VR128:$src)>;
2831 def : Pat<(v8i16 (bitconvert (v2i64 VR128:$src))), (v8i16 VR128:$src)>;
2832 def : Pat<(v8i16 (bitconvert (v4i32 VR128:$src))), (v8i16 VR128:$src)>;
2833 def : Pat<(v8i16 (bitconvert (v16i8 VR128:$src))), (v8i16 VR128:$src)>;
2834 def : Pat<(v8i16 (bitconvert (v2f64 VR128:$src))), (v8i16 VR128:$src)>;
2835 def : Pat<(v8i16 (bitconvert (v4f32 VR128:$src))), (v8i16 VR128:$src)>;
2836 def : Pat<(v16i8 (bitconvert (v2i64 VR128:$src))), (v16i8 VR128:$src)>;
2837 def : Pat<(v16i8 (bitconvert (v4i32 VR128:$src))), (v16i8 VR128:$src)>;
2838 def : Pat<(v16i8 (bitconvert (v8i16 VR128:$src))), (v16i8 VR128:$src)>;
2839 def : Pat<(v16i8 (bitconvert (v2f64 VR128:$src))), (v16i8 VR128:$src)>;
2840 def : Pat<(v16i8 (bitconvert (v4f32 VR128:$src))), (v16i8 VR128:$src)>;
2841 def : Pat<(v4f32 (bitconvert (v2i64 VR128:$src))), (v4f32 VR128:$src)>;
2842 def : Pat<(v4f32 (bitconvert (v4i32 VR128:$src))), (v4f32 VR128:$src)>;
2843 def : Pat<(v4f32 (bitconvert (v8i16 VR128:$src))), (v4f32 VR128:$src)>;
2844 def : Pat<(v4f32 (bitconvert (v16i8 VR128:$src))), (v4f32 VR128:$src)>;
2845 def : Pat<(v4f32 (bitconvert (v2f64 VR128:$src))), (v4f32 VR128:$src)>;
2846 def : Pat<(v2f64 (bitconvert (v2i64 VR128:$src))), (v2f64 VR128:$src)>;
2847 def : Pat<(v2f64 (bitconvert (v4i32 VR128:$src))), (v2f64 VR128:$src)>;
2848 def : Pat<(v2f64 (bitconvert (v8i16 VR128:$src))), (v2f64 VR128:$src)>;
2849 def : Pat<(v2f64 (bitconvert (v16i8 VR128:$src))), (v2f64 VR128:$src)>;
2850 def : Pat<(v2f64 (bitconvert (v4f32 VR128:$src))), (v2f64 VR128:$src)>;
2853 // Move scalar to XMM zero-extended
2854 // movd to XMM register zero-extends
2855 let AddedComplexity = 15 in {
2856 // Zeroing a VR128 then do a MOVS{S|D} to the lower bits.
2857 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))),
2858 (MOVLSD2PDrr (V_SET0), FR64:$src)>, Requires<[HasSSE2]>;
2859 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),
2860 (MOVLSS2PSrr (V_SET0), FR32:$src)>, Requires<[HasSSE2]>;
2861 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
2862 (MOVLPSrr (V_SET0), VR128:$src)>, Requires<[HasSSE2]>;
2865 // Splat v2f64 / v2i64
2866 let AddedComplexity = 10 in {
2867 def : Pat<(vector_shuffle (v2f64 VR128:$src), (undef), SSE_splat_lo_mask:$sm),
2868 (UNPCKLPDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2869 def : Pat<(vector_shuffle (v2f64 VR128:$src), (undef), UNPCKH_shuffle_mask:$sm),
2870 (UNPCKHPDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2871 def : Pat<(vector_shuffle (v2i64 VR128:$src), (undef), SSE_splat_lo_mask:$sm),
2872 (PUNPCKLQDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2873 def : Pat<(vector_shuffle (v2i64 VR128:$src), (undef), UNPCKH_shuffle_mask:$sm),
2874 (PUNPCKHQDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2877 // Special unary SHUFPSrri case.
2878 def : Pat<(v4f32 (vector_shuffle VR128:$src1, (undef),
2879 SHUFP_unary_shuffle_mask:$sm)),
2880 (SHUFPSrri VR128:$src1, VR128:$src1, SHUFP_unary_shuffle_mask:$sm)>,
2881 Requires<[HasSSE1]>;
2882 // Special unary SHUFPDrri case.
2883 def : Pat<(v2f64 (vector_shuffle VR128:$src1, (undef),
2884 SHUFP_unary_shuffle_mask:$sm)),
2885 (SHUFPDrri VR128:$src1, VR128:$src1, SHUFP_unary_shuffle_mask:$sm)>,
2886 Requires<[HasSSE2]>;
2887 // Unary v4f32 shuffle with PSHUF* in order to fold a load.
2888 def : Pat<(vector_shuffle (bc_v4i32 (memopv4f32 addr:$src1)), (undef),
2889 SHUFP_unary_shuffle_mask:$sm),
2890 (PSHUFDmi addr:$src1, SHUFP_unary_shuffle_mask:$sm)>,
2891 Requires<[HasSSE2]>;
2892 // Special binary v4i32 shuffle cases with SHUFPS.
2893 def : Pat<(v4i32 (vector_shuffle VR128:$src1, (v4i32 VR128:$src2),
2894 PSHUFD_binary_shuffle_mask:$sm)),
2895 (SHUFPSrri VR128:$src1, VR128:$src2, PSHUFD_binary_shuffle_mask:$sm)>,
2896 Requires<[HasSSE2]>;
2897 def : Pat<(v4i32 (vector_shuffle VR128:$src1,
2898 (bc_v4i32 (memopv2i64 addr:$src2)), PSHUFD_binary_shuffle_mask:$sm)),
2899 (SHUFPSrmi VR128:$src1, addr:$src2, PSHUFD_binary_shuffle_mask:$sm)>,
2900 Requires<[HasSSE2]>;
2901 // Special binary v2i64 shuffle cases using SHUFPDrri.
2902 def : Pat<(v2i64 (vector_shuffle VR128:$src1, VR128:$src2,
2903 SHUFP_shuffle_mask:$sm)),
2904 (SHUFPDrri VR128:$src1, VR128:$src2, SHUFP_shuffle_mask:$sm)>,
2905 Requires<[HasSSE2]>;
2906 // Special unary SHUFPDrri case.
2907 def : Pat<(v2i64 (vector_shuffle VR128:$src1, (undef),
2908 SHUFP_unary_shuffle_mask:$sm)),
2909 (SHUFPDrri VR128:$src1, VR128:$src1, SHUFP_unary_shuffle_mask:$sm)>,
2910 Requires<[HasSSE2]>;
2912 // vector_shuffle v1, <undef>, <0, 0, 1, 1, ...>
2913 let AddedComplexity = 10 in {
2914 def : Pat<(v4f32 (vector_shuffle VR128:$src, (undef),
2915 UNPCKL_v_undef_shuffle_mask)),
2916 (UNPCKLPSrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2917 def : Pat<(v16i8 (vector_shuffle VR128:$src, (undef),
2918 UNPCKL_v_undef_shuffle_mask)),
2919 (PUNPCKLBWrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2920 def : Pat<(v8i16 (vector_shuffle VR128:$src, (undef),
2921 UNPCKL_v_undef_shuffle_mask)),
2922 (PUNPCKLWDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2923 def : Pat<(v4i32 (vector_shuffle VR128:$src, (undef),
2924 UNPCKL_v_undef_shuffle_mask)),
2925 (PUNPCKLDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE1]>;
2928 // vector_shuffle v1, <undef>, <2, 2, 3, 3, ...>
2929 let AddedComplexity = 10 in {
2930 def : Pat<(v4f32 (vector_shuffle VR128:$src, (undef),
2931 UNPCKH_v_undef_shuffle_mask)),
2932 (UNPCKHPSrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2933 def : Pat<(v16i8 (vector_shuffle VR128:$src, (undef),
2934 UNPCKH_v_undef_shuffle_mask)),
2935 (PUNPCKHBWrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2936 def : Pat<(v8i16 (vector_shuffle VR128:$src, (undef),
2937 UNPCKH_v_undef_shuffle_mask)),
2938 (PUNPCKHWDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2939 def : Pat<(v4i32 (vector_shuffle VR128:$src, (undef),
2940 UNPCKH_v_undef_shuffle_mask)),
2941 (PUNPCKHDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE1]>;
2944 let AddedComplexity = 15 in {
2945 // vector_shuffle v1, v2 <0, 1, 4, 5> using MOVLHPS
2946 def : Pat<(v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
2947 MOVHP_shuffle_mask)),
2948 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
2950 // vector_shuffle v1, v2 <6, 7, 2, 3> using MOVHLPS
2951 def : Pat<(v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
2952 MOVHLPS_shuffle_mask)),
2953 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
2955 // vector_shuffle v1, undef <2, ?, ?, ?> using MOVHLPS
2956 def : Pat<(v4f32 (vector_shuffle VR128:$src1, (undef),
2957 MOVHLPS_v_undef_shuffle_mask)),
2958 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
2959 def : Pat<(v4i32 (vector_shuffle VR128:$src1, (undef),
2960 MOVHLPS_v_undef_shuffle_mask)),
2961 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
2964 let AddedComplexity = 20 in {
2965 // vector_shuffle v1, (load v2) <4, 5, 2, 3> using MOVLPS
2966 // vector_shuffle v1, (load v2) <0, 1, 4, 5> using MOVHPS
2967 def : Pat<(v4f32 (vector_shuffle VR128:$src1, (memop addr:$src2),
2968 MOVLP_shuffle_mask)),
2969 (MOVLPSrm VR128:$src1, addr:$src2)>, Requires<[HasSSE1]>;
2970 def : Pat<(v2f64 (vector_shuffle VR128:$src1, (memop addr:$src2),
2971 MOVLP_shuffle_mask)),
2972 (MOVLPDrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
2973 def : Pat<(v4f32 (vector_shuffle VR128:$src1, (memop addr:$src2),
2974 MOVHP_shuffle_mask)),
2975 (MOVHPSrm VR128:$src1, addr:$src2)>, Requires<[HasSSE1]>;
2976 def : Pat<(v2f64 (vector_shuffle VR128:$src1, (memop addr:$src2),
2977 MOVHP_shuffle_mask)),
2978 (MOVHPDrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
2980 def : Pat<(v4i32 (vector_shuffle VR128:$src1,
2981 (bc_v4i32 (memopv2i64 addr:$src2)),
2982 MOVLP_shuffle_mask)),
2983 (MOVLPSrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
2984 def : Pat<(v2i64 (vector_shuffle VR128:$src1, (memop addr:$src2),
2985 MOVLP_shuffle_mask)),
2986 (MOVLPDrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
2987 def : Pat<(v4i32 (vector_shuffle VR128:$src1,
2988 (bc_v4i32 (memopv2i64 addr:$src2)),
2989 MOVHP_shuffle_mask)),
2990 (MOVHPSrm VR128:$src1, addr:$src2)>, Requires<[HasSSE1]>;
2991 def : Pat<(v2i64 (vector_shuffle VR128:$src1, (memop addr:$src2),
2992 MOVHP_shuffle_mask)),
2993 (MOVHPDrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
2996 // (store (vector_shuffle (load addr), v2, <4, 5, 2, 3>), addr) using MOVLPS
2997 // (store (vector_shuffle (load addr), v2, <0, 1, 4, 5>), addr) using MOVHPS
2998 def : Pat<(store (v4f32 (vector_shuffle (memop addr:$src1), VR128:$src2,
2999 MOVLP_shuffle_mask)), addr:$src1),
3000 (MOVLPSmr addr:$src1, VR128:$src2)>, Requires<[HasSSE1]>;
3001 def : Pat<(store (v2f64 (vector_shuffle (memop addr:$src1), VR128:$src2,
3002 MOVLP_shuffle_mask)), addr:$src1),
3003 (MOVLPDmr addr:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3004 def : Pat<(store (v4f32 (vector_shuffle (memop addr:$src1), VR128:$src2,
3005 MOVHP_shuffle_mask)), addr:$src1),
3006 (MOVHPSmr addr:$src1, VR128:$src2)>, Requires<[HasSSE1]>;
3007 def : Pat<(store (v2f64 (vector_shuffle (memop addr:$src1), VR128:$src2,
3008 MOVHP_shuffle_mask)), addr:$src1),
3009 (MOVHPDmr addr:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3011 def : Pat<(store (v4i32 (vector_shuffle
3012 (bc_v4i32 (memopv2i64 addr:$src1)), VR128:$src2,
3013 MOVLP_shuffle_mask)), addr:$src1),
3014 (MOVLPSmr addr:$src1, VR128:$src2)>, Requires<[HasSSE1]>;
3015 def : Pat<(store (v2i64 (vector_shuffle (memop addr:$src1), VR128:$src2,
3016 MOVLP_shuffle_mask)), addr:$src1),
3017 (MOVLPDmr addr:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3018 def : Pat<(store (v4i32 (vector_shuffle
3019 (bc_v4i32 (memopv2i64 addr:$src1)), VR128:$src2,
3020 MOVHP_shuffle_mask)), addr:$src1),
3021 (MOVHPSmr addr:$src1, VR128:$src2)>, Requires<[HasSSE1]>;
3022 def : Pat<(store (v2i64 (vector_shuffle (memop addr:$src1), VR128:$src2,
3023 MOVHP_shuffle_mask)), addr:$src1),
3024 (MOVHPDmr addr:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3027 let AddedComplexity = 15 in {
3028 // Setting the lowest element in the vector.
3029 def : Pat<(v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
3030 MOVL_shuffle_mask)),
3031 (MOVLPSrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3032 def : Pat<(v2i64 (vector_shuffle VR128:$src1, VR128:$src2,
3033 MOVL_shuffle_mask)),
3034 (MOVLPDrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3036 // vector_shuffle v1, v2 <4, 5, 2, 3> using MOVLPDrr (movsd)
3037 def : Pat<(v4f32 (vector_shuffle VR128:$src1, VR128:$src2,
3038 MOVLP_shuffle_mask)),
3039 (MOVLPDrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3040 def : Pat<(v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
3041 MOVLP_shuffle_mask)),
3042 (MOVLPDrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3045 // Set lowest element and zero upper elements.
3046 let AddedComplexity = 15 in
3047 def : Pat<(v2f64 (vector_shuffle immAllZerosV_bc, VR128:$src,
3048 MOVL_shuffle_mask)),
3049 (MOVZPQILo2PQIrr VR128:$src)>, Requires<[HasSSE2]>;
3050 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
3051 (MOVZPQILo2PQIrr VR128:$src)>, Requires<[HasSSE2]>;
3053 // FIXME: Temporary workaround since 2-wide shuffle is broken.
3054 def : Pat<(int_x86_sse2_punpckh_qdq VR128:$src1, VR128:$src2),
3055 (v2i64 (PUNPCKHQDQrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>;
3056 def : Pat<(int_x86_sse2_punpckh_qdq VR128:$src1, (memop addr:$src2)),
3057 (v2i64 (PUNPCKHQDQrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>;
3058 def : Pat<(int_x86_sse2_punpckl_qdq VR128:$src1, VR128:$src2),
3059 (v2i64 (PUNPCKLQDQrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>;
3060 def : Pat<(int_x86_sse2_punpckl_qdq VR128:$src1, (memop addr:$src2)),
3061 (PUNPCKLQDQrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
3063 // Some special case pandn patterns.
3064 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v4i32 immAllOnesV))),
3066 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3067 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v8i16 immAllOnesV))),
3069 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3070 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v16i8 immAllOnesV))),
3072 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3074 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v4i32 immAllOnesV))),
3075 (memop addr:$src2))),
3076 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
3077 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v8i16 immAllOnesV))),
3078 (memop addr:$src2))),
3079 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
3080 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v16i8 immAllOnesV))),
3081 (memop addr:$src2))),
3082 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
3084 // vector -> vector casts
3085 def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
3086 (Int_CVTDQ2PSrr VR128:$src)>, Requires<[HasSSE2]>;
3087 def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
3088 (Int_CVTTPS2DQrr VR128:$src)>, Requires<[HasSSE2]>;
3090 // Use movaps / movups for SSE integer load / store (one byte shorter).
3091 def : Pat<(alignedloadv4i32 addr:$src),
3092 (MOVAPSrm addr:$src)>, Requires<[HasSSE1]>;
3093 def : Pat<(loadv4i32 addr:$src),
3094 (MOVUPSrm addr:$src)>, Requires<[HasSSE1]>;
3095 def : Pat<(alignedloadv2i64 addr:$src),
3096 (MOVAPSrm addr:$src)>, Requires<[HasSSE2]>;
3097 def : Pat<(loadv2i64 addr:$src),
3098 (MOVUPSrm addr:$src)>, Requires<[HasSSE2]>;
3100 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
3101 (MOVAPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
3102 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
3103 (MOVAPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
3104 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
3105 (MOVAPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
3106 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
3107 (MOVAPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
3108 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
3109 (MOVUPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
3110 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
3111 (MOVUPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
3112 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
3113 (MOVUPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
3114 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
3115 (MOVUPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
3117 //===----------------------------------------------------------------------===//
3118 // SSE4.1 Instructions
3119 //===----------------------------------------------------------------------===//
3121 multiclass sse41_fp_unop_rm<bits<8> opcss, bits<8> opcps,
3122 bits<8> opcsd, bits<8> opcpd,
3127 Intrinsic V2F64Int> {
3128 // Intrinsic operation, reg.
3129 def SSr_Int : SS4AIi8<opcss, MRMSrcReg,
3130 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
3131 !strconcat(OpcodeStr,
3132 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3133 [(set VR128:$dst, (F32Int VR128:$src1, imm:$src2))]>,
3136 // Intrinsic operation, mem.
3137 def SSm_Int : SS4AIi8<opcss, MRMSrcMem,
3138 (outs VR128:$dst), (ins ssmem:$src1, i32i8imm:$src2),
3139 !strconcat(OpcodeStr,
3140 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3141 [(set VR128:$dst, (F32Int sse_load_f32:$src1, imm:$src2))]>,
3144 // Vector intrinsic operation, reg
3145 def PSr_Int : SS4AIi8<opcps, MRMSrcReg,
3146 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
3147 !strconcat(OpcodeStr,
3148 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3149 [(set VR128:$dst, (V4F32Int VR128:$src1, imm:$src2))]>,
3152 // Vector intrinsic operation, mem
3153 def PSm_Int : SS4AIi8<opcps, MRMSrcMem,
3154 (outs VR128:$dst), (ins f128mem:$src1, i32i8imm:$src2),
3155 !strconcat(OpcodeStr,
3156 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3158 (V4F32Int (memopv4f32 addr:$src1),imm:$src2))]>,
3161 // Intrinsic operation, reg.
3162 def SDr_Int : SS4AIi8<opcsd, MRMSrcReg,
3163 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
3164 !strconcat(OpcodeStr,
3165 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3166 [(set VR128:$dst, (F64Int VR128:$src1, imm:$src2))]>,
3169 // Intrinsic operation, mem.
3170 def SDm_Int : SS4AIi8<opcsd, MRMSrcMem,
3171 (outs VR128:$dst), (ins sdmem:$src1, i32i8imm:$src2),
3172 !strconcat(OpcodeStr,
3173 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3174 [(set VR128:$dst, (F64Int sse_load_f64:$src1, imm:$src2))]>,
3177 // Vector intrinsic operation, reg
3178 def PDr_Int : SS4AIi8<opcpd, MRMSrcReg,
3179 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
3180 !strconcat(OpcodeStr,
3181 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3182 [(set VR128:$dst, (V2F64Int VR128:$src1, imm:$src2))]>,
3185 // Vector intrinsic operation, mem
3186 def PDm_Int : SS4AIi8<opcpd, MRMSrcMem,
3187 (outs VR128:$dst), (ins f128mem:$src1, i32i8imm:$src2),
3188 !strconcat(OpcodeStr,
3189 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3191 (V2F64Int (memopv2f64 addr:$src1),imm:$src2))]>,
3195 // FP round - roundss, roundps, roundsd, roundpd
3196 defm ROUND : sse41_fp_unop_rm<0x0A, 0x08, 0x0B, 0x09, "round",
3197 int_x86_sse41_round_ss, int_x86_sse41_round_ps,
3198 int_x86_sse41_round_sd, int_x86_sse41_round_pd>;
3200 // SS41I_unop_rm_int_v16 - SSE 4.1 unary operator whose type is v8i16.
3201 multiclass SS41I_unop_rm_int_v16<bits<8> opc, string OpcodeStr,
3202 Intrinsic IntId128> {
3203 def rr128 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
3205 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3206 [(set VR128:$dst, (IntId128 VR128:$src))]>, OpSize;
3207 def rm128 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
3209 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3212 (bitconvert (memopv8i16 addr:$src))))]>, OpSize;
3215 defm PHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "phminposuw",
3216 int_x86_sse41_phminposuw>;
3218 /// SS41I_binop_rm_int - Simple SSE 4.1 binary operator
3219 let Constraints = "$src1 = $dst" in {
3220 multiclass SS41I_binop_rm_int<bits<8> opc, string OpcodeStr,
3221 Intrinsic IntId128, bit Commutable = 0> {
3222 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
3223 (ins VR128:$src1, VR128:$src2),
3224 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3225 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
3227 let isCommutable = Commutable;
3229 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
3230 (ins VR128:$src1, i128mem:$src2),
3231 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3233 (IntId128 VR128:$src1,
3234 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
3238 defm PCMPEQQ : SS41I_binop_rm_int<0x29, "pcmpeqq",
3239 int_x86_sse41_pcmpeqq, 1>;
3240 defm PACKUSDW : SS41I_binop_rm_int<0x2B, "packusdw",
3241 int_x86_sse41_packusdw, 0>;
3242 defm PMINSB : SS41I_binop_rm_int<0x38, "pminsb",
3243 int_x86_sse41_pminsb, 1>;
3244 defm PMINSD : SS41I_binop_rm_int<0x39, "pminsd",
3245 int_x86_sse41_pminsd, 1>;
3246 defm PMINUD : SS41I_binop_rm_int<0x3B, "pminud",
3247 int_x86_sse41_pminud, 1>;
3248 defm PMINUW : SS41I_binop_rm_int<0x3A, "pminuw",
3249 int_x86_sse41_pminuw, 1>;
3250 defm PMAXSB : SS41I_binop_rm_int<0x3C, "pmaxsb",
3251 int_x86_sse41_pmaxsb, 1>;
3252 defm PMAXSD : SS41I_binop_rm_int<0x3D, "pmaxsd",
3253 int_x86_sse41_pmaxsd, 1>;
3254 defm PMAXUD : SS41I_binop_rm_int<0x3F, "pmaxud",
3255 int_x86_sse41_pmaxud, 1>;
3256 defm PMAXUW : SS41I_binop_rm_int<0x3E, "pmaxuw",
3257 int_x86_sse41_pmaxuw, 1>;
3260 /// SS41I_binop_rm_int - Simple SSE 4.1 binary operator
3261 let Constraints = "$src1 = $dst" in {
3262 multiclass SS41I_binop_patint<bits<8> opc, string OpcodeStr, ValueType OpVT,
3263 SDNode OpNode, Intrinsic IntId128,
3264 bit Commutable = 0> {
3265 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
3266 (ins VR128:$src1, VR128:$src2),
3267 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3268 [(set VR128:$dst, (OpNode (OpVT VR128:$src1),
3269 VR128:$src2))]>, OpSize {
3270 let isCommutable = Commutable;
3272 def rr_int : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
3273 (ins VR128:$src1, VR128:$src2),
3274 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3275 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
3277 let isCommutable = Commutable;
3279 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
3280 (ins VR128:$src1, i128mem:$src2),
3281 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3283 (OpNode VR128:$src1, (memop addr:$src2)))]>, OpSize;
3284 def rm_int : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
3285 (ins VR128:$src1, i128mem:$src2),
3286 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3288 (IntId128 VR128:$src1, (memop addr:$src2)))]>,
3292 defm PMULLD : SS41I_binop_patint<0x40, "pmulld", v4i32, mul,
3293 int_x86_sse41_pmulld, 1>;
3294 defm PMULDQ : SS41I_binop_patint<0x28, "pmuldq", v2i64, mul,
3295 int_x86_sse41_pmuldq, 1>;
3298 /// SS41I_binop_rmi_int - SSE 4.1 binary operator with 8-bit immediate
3299 let Constraints = "$src1 = $dst" in {
3300 multiclass SS41I_binop_rmi_int<bits<8> opc, string OpcodeStr,
3301 Intrinsic IntId128, bit Commutable = 0> {
3302 def rri : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
3303 (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
3304 !strconcat(OpcodeStr,
3305 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3307 (IntId128 VR128:$src1, VR128:$src2, imm:$src3))]>,
3309 let isCommutable = Commutable;
3311 def rmi : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
3312 (ins VR128:$src1, i128mem:$src2, i32i8imm:$src3),
3313 !strconcat(OpcodeStr,
3314 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3316 (IntId128 VR128:$src1,
3317 (bitconvert (memopv16i8 addr:$src2)), imm:$src3))]>,
3322 defm BLENDPS : SS41I_binop_rmi_int<0x0C, "blendps",
3323 int_x86_sse41_blendps, 0>;
3324 defm BLENDPD : SS41I_binop_rmi_int<0x0D, "blendpd",
3325 int_x86_sse41_blendpd, 0>;
3326 defm PBLENDW : SS41I_binop_rmi_int<0x0E, "pblendw",
3327 int_x86_sse41_pblendw, 0>;
3328 defm DPPS : SS41I_binop_rmi_int<0x40, "dpps",
3329 int_x86_sse41_dpps, 1>;
3330 defm DPPD : SS41I_binop_rmi_int<0x41, "dppd",
3331 int_x86_sse41_dppd, 1>;
3332 defm MPSADBW : SS41I_binop_rmi_int<0x42, "mpsadbw",
3333 int_x86_sse41_mpsadbw, 0>;
3336 /// SS41I_ternary_int - SSE 4.1 ternary operator
3337 let Uses = [XMM0], Constraints = "$src1 = $dst" in {
3338 multiclass SS41I_ternary_int<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
3339 def rr0 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
3340 (ins VR128:$src1, VR128:$src2),
3341 !strconcat(OpcodeStr,
3342 "\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}"),
3343 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2, XMM0))]>,
3346 def rm0 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
3347 (ins VR128:$src1, i128mem:$src2),
3348 !strconcat(OpcodeStr,
3349 "\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}"),
3352 (bitconvert (memopv16i8 addr:$src2)), XMM0))]>, OpSize;
3356 defm BLENDVPD : SS41I_ternary_int<0x15, "blendvpd", int_x86_sse41_blendvpd>;
3357 defm BLENDVPS : SS41I_ternary_int<0x14, "blendvps", int_x86_sse41_blendvps>;
3358 defm PBLENDVB : SS41I_ternary_int<0x10, "pblendvb", int_x86_sse41_pblendvb>;
3361 multiclass SS41I_binop_rm_int8<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
3362 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3363 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3364 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
3366 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3367 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3369 (IntId (bitconvert (v4i32 (load addr:$src)))))]>, OpSize;
3372 defm PMOVSXBW : SS41I_binop_rm_int8<0x20, "pmovsxbw", int_x86_sse41_pmovsxbw>;
3373 defm PMOVSXWD : SS41I_binop_rm_int8<0x23, "pmovsxwd", int_x86_sse41_pmovsxwd>;
3374 defm PMOVSXDQ : SS41I_binop_rm_int8<0x25, "pmovsxdq", int_x86_sse41_pmovsxdq>;
3375 defm PMOVZXBW : SS41I_binop_rm_int8<0x30, "pmovzxbw", int_x86_sse41_pmovzxbw>;
3376 defm PMOVZXWD : SS41I_binop_rm_int8<0x33, "pmovzxwd", int_x86_sse41_pmovzxwd>;
3377 defm PMOVZXDQ : SS41I_binop_rm_int8<0x35, "pmovzxdq", int_x86_sse41_pmovzxdq>;
3379 multiclass SS41I_binop_rm_int4<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
3380 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3381 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3382 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
3384 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
3385 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3387 (IntId (bitconvert (v4i32 (load addr:$src)))))]>, OpSize;
3390 defm PMOVSXBD : SS41I_binop_rm_int4<0x21, "pmovsxbd", int_x86_sse41_pmovsxbd>;
3391 defm PMOVSXWQ : SS41I_binop_rm_int4<0x24, "pmovsxwq", int_x86_sse41_pmovsxwq>;
3392 defm PMOVZXBD : SS41I_binop_rm_int4<0x31, "pmovzxbd", int_x86_sse41_pmovzxbd>;
3393 defm PMOVZXWQ : SS41I_binop_rm_int4<0x34, "pmovzxwq", int_x86_sse41_pmovzxwq>;
3395 multiclass SS41I_binop_rm_int2<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
3396 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3397 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3398 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
3400 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i16mem:$src),
3401 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3403 (IntId (bitconvert (v4i32 (load addr:$src)))))]>, OpSize;
3406 defm PMOVSXBQ : SS41I_binop_rm_int2<0x22, "pmovsxbq", int_x86_sse41_pmovsxbq>;
3407 defm PMOVZXBQ : SS41I_binop_rm_int2<0x32, "pmovsxbq", int_x86_sse41_pmovzxbq>;
3410 /// SS41I_binop_ext8 - SSE 4.1 extract 8 bits to 32 bit reg or 8 bit mem
3411 multiclass SS41I_extract8<bits<8> opc, string OpcodeStr> {
3412 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
3413 (ins VR128:$src1, i32i8imm:$src2),
3414 !strconcat(OpcodeStr,
3415 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3416 [(set GR32:$dst, (X86pextrb (v16i8 VR128:$src1), imm:$src2))]>,
3418 def mr : SS4AIi8<opc, MRMDestMem, (outs),
3419 (ins i8mem:$dst, VR128:$src1, i32i8imm:$src2),
3420 !strconcat(OpcodeStr,
3421 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3424 // There's an AssertZext in the way of writing the store pattern
3425 // (store (i8 (trunc (X86pextrb (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
3428 defm PEXTRB : SS41I_extract8<0x14, "pextrb">;
3431 /// SS41I_extract16 - SSE 4.1 extract 16 bits to memory destination
3432 multiclass SS41I_extract16<bits<8> opc, string OpcodeStr> {
3433 def mr : SS4AIi8<opc, MRMDestMem, (outs),
3434 (ins i16mem:$dst, VR128:$src1, i32i8imm:$src2),
3435 !strconcat(OpcodeStr,
3436 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3439 // There's an AssertZext in the way of writing the store pattern
3440 // (store (i16 (trunc (X86pextrw (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
3443 defm PEXTRW : SS41I_extract16<0x15, "pextrw">;
3446 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
3447 multiclass SS41I_extract32<bits<8> opc, string OpcodeStr> {
3448 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
3449 (ins VR128:$src1, i32i8imm:$src2),
3450 !strconcat(OpcodeStr,
3451 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3453 (extractelt (v4i32 VR128:$src1), imm:$src2))]>, OpSize;
3454 def mr : SS4AIi8<opc, MRMDestMem, (outs),
3455 (ins i32mem:$dst, VR128:$src1, i32i8imm:$src2),
3456 !strconcat(OpcodeStr,
3457 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3458 [(store (extractelt (v4i32 VR128:$src1), imm:$src2),
3459 addr:$dst)]>, OpSize;
3462 defm PEXTRD : SS41I_extract32<0x16, "pextrd">;
3465 /// SS41I_extractf32 - SSE 4.1 extract 32 bits fp value to int reg or memory
3467 multiclass SS41I_extractf32<bits<8> opc, string OpcodeStr> {
3468 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
3469 (ins VR128:$src1, i32i8imm:$src2),
3470 !strconcat(OpcodeStr,
3471 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3473 (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2))]>,
3475 def mr : SS4AIi8<opc, MRMDestMem, (outs),
3476 (ins f32mem:$dst, VR128:$src1, i32i8imm:$src2),
3477 !strconcat(OpcodeStr,
3478 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3479 [(store (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2),
3480 addr:$dst)]>, OpSize;
3483 defm EXTRACTPS : SS41I_extractf32<0x17, "extractps">;
3485 let Constraints = "$src1 = $dst" in {
3486 multiclass SS41I_insert8<bits<8> opc, string OpcodeStr> {
3487 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
3488 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
3489 !strconcat(OpcodeStr,
3490 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3492 (X86pinsrb VR128:$src1, GR32:$src2, imm:$src3))]>, OpSize;
3493 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
3494 (ins VR128:$src1, i8mem:$src2, i32i8imm:$src3),
3495 !strconcat(OpcodeStr,
3496 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3498 (X86pinsrb VR128:$src1, (extloadi8 addr:$src2),
3499 imm:$src3))]>, OpSize;
3503 defm PINSRB : SS41I_insert8<0x20, "pinsrb">;
3505 let Constraints = "$src1 = $dst" in {
3506 multiclass SS41I_insert32<bits<8> opc, string OpcodeStr> {
3507 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
3508 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
3509 !strconcat(OpcodeStr,
3510 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3512 (v4i32 (insertelt VR128:$src1, GR32:$src2, imm:$src3)))]>,
3514 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
3515 (ins VR128:$src1, i32mem:$src2, i32i8imm:$src3),
3516 !strconcat(OpcodeStr,
3517 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3519 (v4i32 (insertelt VR128:$src1, (loadi32 addr:$src2),
3520 imm:$src3)))]>, OpSize;
3524 defm PINSRD : SS41I_insert32<0x22, "pinsrd">;
3526 let Constraints = "$src1 = $dst" in {
3527 multiclass SS41I_insertf32<bits<8> opc, string OpcodeStr> {
3528 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
3529 (ins VR128:$src1, FR32:$src2, i32i8imm:$src3),
3530 !strconcat(OpcodeStr,
3531 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3533 (X86insrtps VR128:$src1, FR32:$src2, imm:$src3))]>, OpSize;
3534 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
3535 (ins VR128:$src1, f32mem:$src2, i32i8imm:$src3),
3536 !strconcat(OpcodeStr,
3537 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3539 (X86insrtps VR128:$src1, (loadf32 addr:$src2),
3540 imm:$src3))]>, OpSize;
3544 defm INSERTPS : SS41I_insertf32<0x21, "insertps">;
3546 let Defs = [EFLAGS] in {
3547 def PTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
3548 "ptest \t{$src2, $src1|$src1, $src2}", []>, OpSize;
3549 def PTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, i128mem:$src2),
3550 "ptest \t{$src2, $src1|$src1, $src2}", []>, OpSize;
3553 def MOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3554 "movntdqa\t{$src, $dst|$dst, $src}",
3555 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>;