1 //====- X86InstrSSE.td - Describe the X86 Instruction Set -------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by Evan Cheng and is distributed under the University
6 // of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86 SSE instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
17 //===----------------------------------------------------------------------===//
18 // SSE specific DAG Nodes.
19 //===----------------------------------------------------------------------===//
21 def SDTX86FPShiftOp : SDTypeProfile<1, 2, [ SDTCisSameAs<0, 1>,
22 SDTCisFP<0>, SDTCisInt<2> ]>;
24 def X86fmin : SDNode<"X86ISD::FMIN", SDTFPBinOp>;
25 def X86fmax : SDNode<"X86ISD::FMAX", SDTFPBinOp>;
26 def X86fand : SDNode<"X86ISD::FAND", SDTFPBinOp,
27 [SDNPCommutative, SDNPAssociative]>;
28 def X86for : SDNode<"X86ISD::FOR", SDTFPBinOp,
29 [SDNPCommutative, SDNPAssociative]>;
30 def X86fxor : SDNode<"X86ISD::FXOR", SDTFPBinOp,
31 [SDNPCommutative, SDNPAssociative]>;
32 def X86frsqrt : SDNode<"X86ISD::FRSQRT", SDTFPUnaryOp>;
33 def X86frcp : SDNode<"X86ISD::FRCP", SDTFPUnaryOp>;
34 def X86fsrl : SDNode<"X86ISD::FSRL", SDTX86FPShiftOp>;
35 def X86comi : SDNode<"X86ISD::COMI", SDTX86CmpTest>;
36 def X86ucomi : SDNode<"X86ISD::UCOMI", SDTX86CmpTest>;
37 def X86s2vec : SDNode<"X86ISD::S2VEC", SDTypeProfile<1, 1, []>, []>;
38 def X86pextrw : SDNode<"X86ISD::PEXTRW", SDTypeProfile<1, 2, []>, []>;
39 def X86pinsrw : SDNode<"X86ISD::PINSRW", SDTypeProfile<1, 3, []>, []>;
41 //===----------------------------------------------------------------------===//
42 // SSE 'Special' Instructions
43 //===----------------------------------------------------------------------===//
45 def IMPLICIT_DEF_VR128 : I<0, Pseudo, (outs VR128:$dst), (ins),
47 [(set VR128:$dst, (v4f32 (undef)))]>,
49 def IMPLICIT_DEF_FR32 : I<0, Pseudo, (outs FR32:$dst), (ins),
51 [(set FR32:$dst, (undef))]>, Requires<[HasSSE1]>;
52 def IMPLICIT_DEF_FR64 : I<0, Pseudo, (outs FR64:$dst), (ins),
54 [(set FR64:$dst, (undef))]>, Requires<[HasSSE2]>;
56 //===----------------------------------------------------------------------===//
57 // SSE Complex Patterns
58 //===----------------------------------------------------------------------===//
60 // These are 'extloads' from a scalar to the low element of a vector, zeroing
61 // the top elements. These are used for the SSE 'ss' and 'sd' instruction
63 def sse_load_f32 : ComplexPattern<v4f32, 4, "SelectScalarSSELoad", [],
65 def sse_load_f64 : ComplexPattern<v2f64, 4, "SelectScalarSSELoad", [],
68 def ssmem : Operand<v4f32> {
69 let PrintMethod = "printf32mem";
70 let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc, i32imm);
72 def sdmem : Operand<v2f64> {
73 let PrintMethod = "printf64mem";
74 let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc, i32imm);
77 //===----------------------------------------------------------------------===//
78 // SSE pattern fragments
79 //===----------------------------------------------------------------------===//
81 def loadv4f32 : PatFrag<(ops node:$ptr), (v4f32 (load node:$ptr))>;
82 def loadv2f64 : PatFrag<(ops node:$ptr), (v2f64 (load node:$ptr))>;
83 def loadv4i32 : PatFrag<(ops node:$ptr), (v4i32 (load node:$ptr))>;
84 def loadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (load node:$ptr))>;
86 // Like 'store', but always requires vector alignment.
87 def alignedstore : PatFrag<(ops node:$val, node:$ptr),
88 (st node:$val, node:$ptr), [{
89 if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
90 return !ST->isTruncatingStore() &&
91 ST->getAddressingMode() == ISD::UNINDEXED &&
92 ST->getAlignment() >= 16;
96 // Like 'load', but always requires vector alignment.
97 def alignedload : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
98 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N))
99 return LD->getExtensionType() == ISD::NON_EXTLOAD &&
100 LD->getAddressingMode() == ISD::UNINDEXED &&
101 LD->getAlignment() >= 16;
105 def alignedloadfsf32 : PatFrag<(ops node:$ptr), (f32 (alignedload node:$ptr))>;
106 def alignedloadfsf64 : PatFrag<(ops node:$ptr), (f64 (alignedload node:$ptr))>;
107 def alignedloadv4f32 : PatFrag<(ops node:$ptr), (v4f32 (alignedload node:$ptr))>;
108 def alignedloadv2f64 : PatFrag<(ops node:$ptr), (v2f64 (alignedload node:$ptr))>;
109 def alignedloadv4i32 : PatFrag<(ops node:$ptr), (v4i32 (alignedload node:$ptr))>;
110 def alignedloadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (alignedload node:$ptr))>;
112 // Like 'load', but uses special alignment checks suitable for use in
113 // memory operands in most SSE instructions, which are required to
114 // be naturally aligned on some targets but not on others.
115 // FIXME: Actually implement support for targets that don't require the
116 // alignment. This probably wants a subtarget predicate.
117 def memop : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
118 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N))
119 return LD->getExtensionType() == ISD::NON_EXTLOAD &&
120 LD->getAddressingMode() == ISD::UNINDEXED &&
121 LD->getAlignment() >= 16;
125 def memopfsf32 : PatFrag<(ops node:$ptr), (f32 (memop node:$ptr))>;
126 def memopfsf64 : PatFrag<(ops node:$ptr), (f64 (memop node:$ptr))>;
127 def memopv4f32 : PatFrag<(ops node:$ptr), (v4f32 (memop node:$ptr))>;
128 def memopv2f64 : PatFrag<(ops node:$ptr), (v2f64 (memop node:$ptr))>;
129 def memopv4i32 : PatFrag<(ops node:$ptr), (v4i32 (memop node:$ptr))>;
130 def memopv2i64 : PatFrag<(ops node:$ptr), (v2i64 (memop node:$ptr))>;
132 // SSSE3 uses MMX registers for some instructions. They aren't aligned on a
134 def memop64 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
135 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N))
136 return LD->getExtensionType() == ISD::NON_EXTLOAD &&
137 LD->getAddressingMode() == ISD::UNINDEXED &&
138 LD->getAlignment() >= 8;
142 def memopv8i8 : PatFrag<(ops node:$ptr), (v8i8 (memop64 node:$ptr))>;
143 def memopv16i8 : PatFrag<(ops node:$ptr), (v16i8 (memop64 node:$ptr))>;
144 def memopv4i16 : PatFrag<(ops node:$ptr), (v4i16 (memop64 node:$ptr))>;
145 def memopv8i16 : PatFrag<(ops node:$ptr), (v8i16 (memop64 node:$ptr))>;
146 def memopv2i32 : PatFrag<(ops node:$ptr), (v2i32 (memop64 node:$ptr))>;
148 def bc_v4f32 : PatFrag<(ops node:$in), (v4f32 (bitconvert node:$in))>;
149 def bc_v2f64 : PatFrag<(ops node:$in), (v2f64 (bitconvert node:$in))>;
150 def bc_v16i8 : PatFrag<(ops node:$in), (v16i8 (bitconvert node:$in))>;
151 def bc_v8i16 : PatFrag<(ops node:$in), (v8i16 (bitconvert node:$in))>;
152 def bc_v4i32 : PatFrag<(ops node:$in), (v4i32 (bitconvert node:$in))>;
153 def bc_v2i64 : PatFrag<(ops node:$in), (v2i64 (bitconvert node:$in))>;
155 def fp32imm0 : PatLeaf<(f32 fpimm), [{
156 return N->isExactlyValue(+0.0);
159 def PSxLDQ_imm : SDNodeXForm<imm, [{
160 // Transformation function: imm >> 3
161 return getI32Imm(N->getValue() >> 3);
164 // SHUFFLE_get_shuf_imm xform function: convert vector_shuffle mask to PSHUF*,
166 def SHUFFLE_get_shuf_imm : SDNodeXForm<build_vector, [{
167 return getI8Imm(X86::getShuffleSHUFImmediate(N));
170 // SHUFFLE_get_pshufhw_imm xform function: convert vector_shuffle mask to
172 def SHUFFLE_get_pshufhw_imm : SDNodeXForm<build_vector, [{
173 return getI8Imm(X86::getShufflePSHUFHWImmediate(N));
176 // SHUFFLE_get_pshuflw_imm xform function: convert vector_shuffle mask to
178 def SHUFFLE_get_pshuflw_imm : SDNodeXForm<build_vector, [{
179 return getI8Imm(X86::getShufflePSHUFLWImmediate(N));
182 def SSE_splat_mask : PatLeaf<(build_vector), [{
183 return X86::isSplatMask(N);
184 }], SHUFFLE_get_shuf_imm>;
186 def SSE_splat_lo_mask : PatLeaf<(build_vector), [{
187 return X86::isSplatLoMask(N);
190 def MOVHLPS_shuffle_mask : PatLeaf<(build_vector), [{
191 return X86::isMOVHLPSMask(N);
194 def MOVHLPS_v_undef_shuffle_mask : PatLeaf<(build_vector), [{
195 return X86::isMOVHLPS_v_undef_Mask(N);
198 def MOVHP_shuffle_mask : PatLeaf<(build_vector), [{
199 return X86::isMOVHPMask(N);
202 def MOVLP_shuffle_mask : PatLeaf<(build_vector), [{
203 return X86::isMOVLPMask(N);
206 def MOVL_shuffle_mask : PatLeaf<(build_vector), [{
207 return X86::isMOVLMask(N);
210 def MOVSHDUP_shuffle_mask : PatLeaf<(build_vector), [{
211 return X86::isMOVSHDUPMask(N);
214 def MOVSLDUP_shuffle_mask : PatLeaf<(build_vector), [{
215 return X86::isMOVSLDUPMask(N);
218 def UNPCKL_shuffle_mask : PatLeaf<(build_vector), [{
219 return X86::isUNPCKLMask(N);
222 def UNPCKH_shuffle_mask : PatLeaf<(build_vector), [{
223 return X86::isUNPCKHMask(N);
226 def UNPCKL_v_undef_shuffle_mask : PatLeaf<(build_vector), [{
227 return X86::isUNPCKL_v_undef_Mask(N);
230 def UNPCKH_v_undef_shuffle_mask : PatLeaf<(build_vector), [{
231 return X86::isUNPCKH_v_undef_Mask(N);
234 def PSHUFD_shuffle_mask : PatLeaf<(build_vector), [{
235 return X86::isPSHUFDMask(N);
236 }], SHUFFLE_get_shuf_imm>;
238 def PSHUFHW_shuffle_mask : PatLeaf<(build_vector), [{
239 return X86::isPSHUFHWMask(N);
240 }], SHUFFLE_get_pshufhw_imm>;
242 def PSHUFLW_shuffle_mask : PatLeaf<(build_vector), [{
243 return X86::isPSHUFLWMask(N);
244 }], SHUFFLE_get_pshuflw_imm>;
246 def SHUFP_unary_shuffle_mask : PatLeaf<(build_vector), [{
247 return X86::isPSHUFDMask(N);
248 }], SHUFFLE_get_shuf_imm>;
250 def SHUFP_shuffle_mask : PatLeaf<(build_vector), [{
251 return X86::isSHUFPMask(N);
252 }], SHUFFLE_get_shuf_imm>;
254 def PSHUFD_binary_shuffle_mask : PatLeaf<(build_vector), [{
255 return X86::isSHUFPMask(N);
256 }], SHUFFLE_get_shuf_imm>;
258 //===----------------------------------------------------------------------===//
259 // SSE scalar FP Instructions
260 //===----------------------------------------------------------------------===//
262 // CMOV* - Used to implement the SSE SELECT DAG operation. Expanded by the
263 // scheduler into a branch sequence.
264 // These are expanded by the scheduler.
265 let Uses = [EFLAGS], usesCustomDAGSchedInserter = 1 in {
266 def CMOV_FR32 : I<0, Pseudo,
267 (outs FR32:$dst), (ins FR32:$t, FR32:$f, i8imm:$cond),
268 "#CMOV_FR32 PSEUDO!",
269 [(set FR32:$dst, (X86cmov FR32:$t, FR32:$f, imm:$cond,
271 def CMOV_FR64 : I<0, Pseudo,
272 (outs FR64:$dst), (ins FR64:$t, FR64:$f, i8imm:$cond),
273 "#CMOV_FR64 PSEUDO!",
274 [(set FR64:$dst, (X86cmov FR64:$t, FR64:$f, imm:$cond,
276 def CMOV_V4F32 : I<0, Pseudo,
277 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
278 "#CMOV_V4F32 PSEUDO!",
280 (v4f32 (X86cmov VR128:$t, VR128:$f, imm:$cond,
282 def CMOV_V2F64 : I<0, Pseudo,
283 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
284 "#CMOV_V2F64 PSEUDO!",
286 (v2f64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
288 def CMOV_V2I64 : I<0, Pseudo,
289 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
290 "#CMOV_V2I64 PSEUDO!",
292 (v2i64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
296 //===----------------------------------------------------------------------===//
298 //===----------------------------------------------------------------------===//
301 def MOVSSrr : SSI<0x10, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
302 "movss\t{$src, $dst|$dst, $src}", []>;
303 let isLoad = 1, isReMaterializable = 1 in
304 def MOVSSrm : SSI<0x10, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
305 "movss\t{$src, $dst|$dst, $src}",
306 [(set FR32:$dst, (loadf32 addr:$src))]>;
307 def MOVSSmr : SSI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
308 "movss\t{$src, $dst|$dst, $src}",
309 [(store FR32:$src, addr:$dst)]>;
311 // Conversion instructions
312 def CVTTSS2SIrr : SSI<0x2C, MRMSrcReg, (outs GR32:$dst), (ins FR32:$src),
313 "cvttss2si\t{$src, $dst|$dst, $src}",
314 [(set GR32:$dst, (fp_to_sint FR32:$src))]>;
315 def CVTTSS2SIrm : SSI<0x2C, MRMSrcMem, (outs GR32:$dst), (ins f32mem:$src),
316 "cvttss2si\t{$src, $dst|$dst, $src}",
317 [(set GR32:$dst, (fp_to_sint (loadf32 addr:$src)))]>;
318 def CVTSI2SSrr : SSI<0x2A, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
319 "cvtsi2ss\t{$src, $dst|$dst, $src}",
320 [(set FR32:$dst, (sint_to_fp GR32:$src))]>;
321 def CVTSI2SSrm : SSI<0x2A, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
322 "cvtsi2ss\t{$src, $dst|$dst, $src}",
323 [(set FR32:$dst, (sint_to_fp (loadi32 addr:$src)))]>;
325 // Match intrinsics which expect XMM operand(s).
326 def Int_CVTSS2SIrr : SSI<0x2D, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
327 "cvtss2si\t{$src, $dst|$dst, $src}",
328 [(set GR32:$dst, (int_x86_sse_cvtss2si VR128:$src))]>;
329 def Int_CVTSS2SIrm : SSI<0x2D, MRMSrcMem, (outs GR32:$dst), (ins f32mem:$src),
330 "cvtss2si\t{$src, $dst|$dst, $src}",
331 [(set GR32:$dst, (int_x86_sse_cvtss2si
332 (load addr:$src)))]>;
334 // Aliases for intrinsics
335 def Int_CVTTSS2SIrr : SSI<0x2C, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
336 "cvttss2si\t{$src, $dst|$dst, $src}",
338 (int_x86_sse_cvttss2si VR128:$src))]>;
339 def Int_CVTTSS2SIrm : SSI<0x2C, MRMSrcMem, (outs GR32:$dst), (ins f32mem:$src),
340 "cvttss2si\t{$src, $dst|$dst, $src}",
342 (int_x86_sse_cvttss2si(load addr:$src)))]>;
344 let isTwoAddress = 1 in {
345 def Int_CVTSI2SSrr : SSI<0x2A, MRMSrcReg,
346 (outs VR128:$dst), (ins VR128:$src1, GR32:$src2),
347 "cvtsi2ss\t{$src2, $dst|$dst, $src2}",
348 [(set VR128:$dst, (int_x86_sse_cvtsi2ss VR128:$src1,
350 def Int_CVTSI2SSrm : SSI<0x2A, MRMSrcMem,
351 (outs VR128:$dst), (ins VR128:$src1, i32mem:$src2),
352 "cvtsi2ss\t{$src2, $dst|$dst, $src2}",
353 [(set VR128:$dst, (int_x86_sse_cvtsi2ss VR128:$src1,
354 (loadi32 addr:$src2)))]>;
357 // Comparison instructions
358 let isTwoAddress = 1 in {
359 def CMPSSrr : SSI<0xC2, MRMSrcReg,
360 (outs FR32:$dst), (ins FR32:$src1, FR32:$src, SSECC:$cc),
361 "cmp${cc}ss\t{$src, $dst|$dst, $src}", []>;
362 def CMPSSrm : SSI<0xC2, MRMSrcMem,
363 (outs FR32:$dst), (ins FR32:$src1, f32mem:$src, SSECC:$cc),
364 "cmp${cc}ss\t{$src, $dst|$dst, $src}", []>;
367 let Defs = [EFLAGS] in {
368 def UCOMISSrr: PSI<0x2E, MRMSrcReg, (outs), (ins FR32:$src1, FR32:$src2),
369 "ucomiss\t{$src2, $src1|$src1, $src2}",
370 [(X86cmp FR32:$src1, FR32:$src2), (implicit EFLAGS)]>;
371 def UCOMISSrm: PSI<0x2E, MRMSrcMem, (outs), (ins FR32:$src1, f32mem:$src2),
372 "ucomiss\t{$src2, $src1|$src1, $src2}",
373 [(X86cmp FR32:$src1, (loadf32 addr:$src2)),
377 // Aliases to match intrinsics which expect XMM operand(s).
378 let isTwoAddress = 1 in {
379 def Int_CMPSSrr : SSI<0xC2, MRMSrcReg,
380 (outs VR128:$dst), (ins VR128:$src1, VR128:$src, SSECC:$cc),
381 "cmp${cc}ss\t{$src, $dst|$dst, $src}",
382 [(set VR128:$dst, (int_x86_sse_cmp_ss VR128:$src1,
383 VR128:$src, imm:$cc))]>;
384 def Int_CMPSSrm : SSI<0xC2, MRMSrcMem,
385 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src, SSECC:$cc),
386 "cmp${cc}ss\t{$src, $dst|$dst, $src}",
387 [(set VR128:$dst, (int_x86_sse_cmp_ss VR128:$src1,
388 (load addr:$src), imm:$cc))]>;
391 let Defs = [EFLAGS] in {
392 def Int_UCOMISSrr: PSI<0x2E, MRMSrcReg, (outs),
393 (ins VR128:$src1, VR128:$src2),
394 "ucomiss\t{$src2, $src1|$src1, $src2}",
395 [(X86ucomi (v4f32 VR128:$src1), VR128:$src2),
397 def Int_UCOMISSrm: PSI<0x2E, MRMSrcMem, (outs),
398 (ins VR128:$src1, f128mem:$src2),
399 "ucomiss\t{$src2, $src1|$src1, $src2}",
400 [(X86ucomi (v4f32 VR128:$src1), (load addr:$src2)),
403 def Int_COMISSrr: PSI<0x2F, MRMSrcReg, (outs),
404 (ins VR128:$src1, VR128:$src2),
405 "comiss\t{$src2, $src1|$src1, $src2}",
406 [(X86comi (v4f32 VR128:$src1), VR128:$src2),
408 def Int_COMISSrm: PSI<0x2F, MRMSrcMem, (outs),
409 (ins VR128:$src1, f128mem:$src2),
410 "comiss\t{$src2, $src1|$src1, $src2}",
411 [(X86comi (v4f32 VR128:$src1), (load addr:$src2)),
415 // Aliases of packed SSE1 instructions for scalar use. These all have names that
418 // Alias instructions that map fld0 to pxor for sse.
419 let isReMaterializable = 1 in
420 def FsFLD0SS : I<0xEF, MRMInitReg, (outs FR32:$dst), (ins),
421 "pxor\t$dst, $dst", [(set FR32:$dst, fp32imm0)]>,
422 Requires<[HasSSE1]>, TB, OpSize;
424 // Alias instruction to do FR32 reg-to-reg copy using movaps. Upper bits are
426 def FsMOVAPSrr : PSI<0x28, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
427 "movaps\t{$src, $dst|$dst, $src}", []>;
429 // Alias instruction to load FR32 from f128mem using movaps. Upper bits are
432 def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
433 "movaps\t{$src, $dst|$dst, $src}",
434 [(set FR32:$dst, (alignedloadfsf32 addr:$src))]>;
436 // Alias bitwise logical operations using SSE logical ops on packed FP values.
437 let isTwoAddress = 1 in {
438 let isCommutable = 1 in {
439 def FsANDPSrr : PSI<0x54, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
440 "andps\t{$src2, $dst|$dst, $src2}",
441 [(set FR32:$dst, (X86fand FR32:$src1, FR32:$src2))]>;
442 def FsORPSrr : PSI<0x56, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
443 "orps\t{$src2, $dst|$dst, $src2}",
444 [(set FR32:$dst, (X86for FR32:$src1, FR32:$src2))]>;
445 def FsXORPSrr : PSI<0x57, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
446 "xorps\t{$src2, $dst|$dst, $src2}",
447 [(set FR32:$dst, (X86fxor FR32:$src1, FR32:$src2))]>;
450 def FsANDPSrm : PSI<0x54, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f128mem:$src2),
451 "andps\t{$src2, $dst|$dst, $src2}",
452 [(set FR32:$dst, (X86fand FR32:$src1,
453 (memopfsf32 addr:$src2)))]>;
454 def FsORPSrm : PSI<0x56, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f128mem:$src2),
455 "orps\t{$src2, $dst|$dst, $src2}",
456 [(set FR32:$dst, (X86for FR32:$src1,
457 (memopfsf32 addr:$src2)))]>;
458 def FsXORPSrm : PSI<0x57, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f128mem:$src2),
459 "xorps\t{$src2, $dst|$dst, $src2}",
460 [(set FR32:$dst, (X86fxor FR32:$src1,
461 (memopfsf32 addr:$src2)))]>;
463 def FsANDNPSrr : PSI<0x55, MRMSrcReg,
464 (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
465 "andnps\t{$src2, $dst|$dst, $src2}", []>;
466 def FsANDNPSrm : PSI<0x55, MRMSrcMem,
467 (outs FR32:$dst), (ins FR32:$src1, f128mem:$src2),
468 "andnps\t{$src2, $dst|$dst, $src2}", []>;
471 /// basic_sse1_fp_binop_rm - SSE1 binops come in both scalar and vector forms.
473 /// In addition, we also have a special variant of the scalar form here to
474 /// represent the associated intrinsic operation. This form is unlike the
475 /// plain scalar form, in that it takes an entire vector (instead of a scalar)
476 /// and leaves the top elements undefined.
478 /// These three forms can each be reg+reg or reg+mem, so there are a total of
479 /// six "instructions".
481 let isTwoAddress = 1 in {
482 multiclass basic_sse1_fp_binop_rm<bits<8> opc, string OpcodeStr,
483 SDNode OpNode, Intrinsic F32Int,
484 bit Commutable = 0> {
485 // Scalar operation, reg+reg.
486 def SSrr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
487 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
488 [(set FR32:$dst, (OpNode FR32:$src1, FR32:$src2))]> {
489 let isCommutable = Commutable;
492 // Scalar operation, reg+mem.
493 def SSrm : SSI<opc, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f32mem:$src2),
494 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
495 [(set FR32:$dst, (OpNode FR32:$src1, (load addr:$src2)))]>;
497 // Vector operation, reg+reg.
498 def PSrr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
499 !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
500 [(set VR128:$dst, (v4f32 (OpNode VR128:$src1, VR128:$src2)))]> {
501 let isCommutable = Commutable;
504 // Vector operation, reg+mem.
505 def PSrm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
506 !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
507 [(set VR128:$dst, (OpNode VR128:$src1, (memopv4f32 addr:$src2)))]>;
509 // Intrinsic operation, reg+reg.
510 def SSrr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
511 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
512 [(set VR128:$dst, (F32Int VR128:$src1, VR128:$src2))]> {
513 let isCommutable = Commutable;
516 // Intrinsic operation, reg+mem.
517 def SSrm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2),
518 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
519 [(set VR128:$dst, (F32Int VR128:$src1,
520 sse_load_f32:$src2))]>;
524 // Arithmetic instructions
525 defm ADD : basic_sse1_fp_binop_rm<0x58, "add", fadd, int_x86_sse_add_ss, 1>;
526 defm MUL : basic_sse1_fp_binop_rm<0x59, "mul", fmul, int_x86_sse_mul_ss, 1>;
527 defm SUB : basic_sse1_fp_binop_rm<0x5C, "sub", fsub, int_x86_sse_sub_ss>;
528 defm DIV : basic_sse1_fp_binop_rm<0x5E, "div", fdiv, int_x86_sse_div_ss>;
530 /// sse1_fp_binop_rm - Other SSE1 binops
532 /// This multiclass is like basic_sse1_fp_binop_rm, with the addition of
533 /// instructions for a full-vector intrinsic form. Operations that map
534 /// onto C operators don't use this form since they just use the plain
535 /// vector form instead of having a separate vector intrinsic form.
537 /// This provides a total of eight "instructions".
539 let isTwoAddress = 1 in {
540 multiclass sse1_fp_binop_rm<bits<8> opc, string OpcodeStr,
544 bit Commutable = 0> {
546 // Scalar operation, reg+reg.
547 def SSrr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
548 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
549 [(set FR32:$dst, (OpNode FR32:$src1, FR32:$src2))]> {
550 let isCommutable = Commutable;
553 // Scalar operation, reg+mem.
554 def SSrm : SSI<opc, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f32mem:$src2),
555 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
556 [(set FR32:$dst, (OpNode FR32:$src1, (load addr:$src2)))]>;
558 // Vector operation, reg+reg.
559 def PSrr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
560 !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
561 [(set VR128:$dst, (v4f32 (OpNode VR128:$src1, VR128:$src2)))]> {
562 let isCommutable = Commutable;
565 // Vector operation, reg+mem.
566 def PSrm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
567 !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
568 [(set VR128:$dst, (OpNode VR128:$src1, (memopv4f32 addr:$src2)))]>;
570 // Intrinsic operation, reg+reg.
571 def SSrr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
572 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
573 [(set VR128:$dst, (F32Int VR128:$src1, VR128:$src2))]> {
574 let isCommutable = Commutable;
577 // Intrinsic operation, reg+mem.
578 def SSrm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2),
579 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
580 [(set VR128:$dst, (F32Int VR128:$src1,
581 sse_load_f32:$src2))]>;
583 // Vector intrinsic operation, reg+reg.
584 def PSrr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
585 !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
586 [(set VR128:$dst, (V4F32Int VR128:$src1, VR128:$src2))]> {
587 let isCommutable = Commutable;
590 // Vector intrinsic operation, reg+mem.
591 def PSrm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
592 !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
593 [(set VR128:$dst, (V4F32Int VR128:$src1, (load addr:$src2)))]>;
597 defm MAX : sse1_fp_binop_rm<0x5F, "max", X86fmax,
598 int_x86_sse_max_ss, int_x86_sse_max_ps>;
599 defm MIN : sse1_fp_binop_rm<0x5D, "min", X86fmin,
600 int_x86_sse_min_ss, int_x86_sse_min_ps>;
602 //===----------------------------------------------------------------------===//
603 // SSE packed FP Instructions
606 def MOVAPSrr : PSI<0x28, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
607 "movaps\t{$src, $dst|$dst, $src}", []>;
608 let isLoad = 1, isReMaterializable = 1 in
609 def MOVAPSrm : PSI<0x28, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
610 "movaps\t{$src, $dst|$dst, $src}",
611 [(set VR128:$dst, (alignedloadv4f32 addr:$src))]>;
613 def MOVAPSmr : PSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
614 "movaps\t{$src, $dst|$dst, $src}",
615 [(alignedstore (v4f32 VR128:$src), addr:$dst)]>;
617 def MOVUPSrr : PSI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
618 "movups\t{$src, $dst|$dst, $src}", []>;
620 def MOVUPSrm : PSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
621 "movups\t{$src, $dst|$dst, $src}",
622 [(set VR128:$dst, (loadv4f32 addr:$src))]>;
623 def MOVUPSmr : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
624 "movups\t{$src, $dst|$dst, $src}",
625 [(store (v4f32 VR128:$src), addr:$dst)]>;
627 // Intrinsic forms of MOVUPS load and store
629 def MOVUPSrm_Int : PSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
630 "movups\t{$src, $dst|$dst, $src}",
631 [(set VR128:$dst, (int_x86_sse_loadu_ps addr:$src))]>;
632 def MOVUPSmr_Int : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
633 "movups\t{$src, $dst|$dst, $src}",
634 [(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>;
636 let isTwoAddress = 1 in {
637 let AddedComplexity = 20 in {
638 def MOVLPSrm : PSI<0x12, MRMSrcMem,
639 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
640 "movlps\t{$src2, $dst|$dst, $src2}",
642 (v4f32 (vector_shuffle VR128:$src1,
643 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2)))),
644 MOVLP_shuffle_mask)))]>;
645 def MOVHPSrm : PSI<0x16, MRMSrcMem,
646 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
647 "movhps\t{$src2, $dst|$dst, $src2}",
649 (v4f32 (vector_shuffle VR128:$src1,
650 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2)))),
651 MOVHP_shuffle_mask)))]>;
655 def MOVLPSmr : PSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
656 "movlps\t{$src, $dst|$dst, $src}",
657 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
658 (iPTR 0))), addr:$dst)]>;
660 // v2f64 extract element 1 is always custom lowered to unpack high to low
661 // and extract element 0 so the non-store version isn't too horrible.
662 def MOVHPSmr : PSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
663 "movhps\t{$src, $dst|$dst, $src}",
664 [(store (f64 (vector_extract
665 (v2f64 (vector_shuffle
666 (bc_v2f64 (v4f32 VR128:$src)), (undef),
667 UNPCKH_shuffle_mask)), (iPTR 0))),
670 let isTwoAddress = 1 in {
671 let AddedComplexity = 15 in {
672 def MOVLHPSrr : PSI<0x16, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
673 "movlhps\t{$src2, $dst|$dst, $src2}",
675 (v4f32 (vector_shuffle VR128:$src1, VR128:$src2,
676 MOVHP_shuffle_mask)))]>;
678 def MOVHLPSrr : PSI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
679 "movhlps\t{$src2, $dst|$dst, $src2}",
681 (v4f32 (vector_shuffle VR128:$src1, VR128:$src2,
682 MOVHLPS_shuffle_mask)))]>;
690 /// sse1_fp_unop_rm - SSE1 unops come in both scalar and vector forms.
692 /// In addition, we also have a special variant of the scalar form here to
693 /// represent the associated intrinsic operation. This form is unlike the
694 /// plain scalar form, in that it takes an entire vector (instead of a
695 /// scalar) and leaves the top elements undefined.
697 /// And, we have a special variant form for a full-vector intrinsic form.
699 /// These four forms can each have a reg or a mem operand, so there are a
700 /// total of eight "instructions".
702 multiclass sse1_fp_unop_rm<bits<8> opc, string OpcodeStr,
706 bit Commutable = 0> {
707 // Scalar operation, reg.
708 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
709 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
710 [(set FR32:$dst, (OpNode FR32:$src))]> {
711 let isCommutable = Commutable;
714 // Scalar operation, mem.
715 def SSm : SSI<opc, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
716 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
717 [(set FR32:$dst, (OpNode (load addr:$src)))]>;
719 // Vector operation, reg.
720 def PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
721 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
722 [(set VR128:$dst, (v4f32 (OpNode VR128:$src)))]> {
723 let isCommutable = Commutable;
726 // Vector operation, mem.
727 def PSm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
728 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
729 [(set VR128:$dst, (OpNode (memopv4f32 addr:$src)))]>;
731 // Intrinsic operation, reg.
732 def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
733 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
734 [(set VR128:$dst, (F32Int VR128:$src))]> {
735 let isCommutable = Commutable;
738 // Intrinsic operation, mem.
739 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins ssmem:$src),
740 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
741 [(set VR128:$dst, (F32Int sse_load_f32:$src))]>;
743 // Vector intrinsic operation, reg
744 def PSr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
745 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
746 [(set VR128:$dst, (V4F32Int VR128:$src))]> {
747 let isCommutable = Commutable;
750 // Vector intrinsic operation, mem
751 def PSm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
752 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
753 [(set VR128:$dst, (V4F32Int (load addr:$src)))]>;
757 defm SQRT : sse1_fp_unop_rm<0x51, "sqrt", fsqrt,
758 int_x86_sse_sqrt_ss, int_x86_sse_sqrt_ps>;
760 // Reciprocal approximations. Note that these typically require refinement
761 // in order to obtain suitable precision.
762 defm RSQRT : sse1_fp_unop_rm<0x52, "rsqrt", X86frsqrt,
763 int_x86_sse_rsqrt_ss, int_x86_sse_rsqrt_ps>;
764 defm RCP : sse1_fp_unop_rm<0x53, "rcp", X86frcp,
765 int_x86_sse_rcp_ss, int_x86_sse_rcp_ps>;
768 let isTwoAddress = 1 in {
769 let isCommutable = 1 in {
770 def ANDPSrr : PSI<0x54, MRMSrcReg,
771 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
772 "andps\t{$src2, $dst|$dst, $src2}",
773 [(set VR128:$dst, (v2i64
774 (and VR128:$src1, VR128:$src2)))]>;
775 def ORPSrr : PSI<0x56, MRMSrcReg,
776 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
777 "orps\t{$src2, $dst|$dst, $src2}",
778 [(set VR128:$dst, (v2i64
779 (or VR128:$src1, VR128:$src2)))]>;
780 def XORPSrr : PSI<0x57, MRMSrcReg,
781 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
782 "xorps\t{$src2, $dst|$dst, $src2}",
783 [(set VR128:$dst, (v2i64
784 (xor VR128:$src1, VR128:$src2)))]>;
787 def ANDPSrm : PSI<0x54, MRMSrcMem,
788 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
789 "andps\t{$src2, $dst|$dst, $src2}",
790 [(set VR128:$dst, (and (bc_v2i64 (v4f32 VR128:$src1)),
791 (memopv2i64 addr:$src2)))]>;
792 def ORPSrm : PSI<0x56, MRMSrcMem,
793 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
794 "orps\t{$src2, $dst|$dst, $src2}",
795 [(set VR128:$dst, (or (bc_v2i64 (v4f32 VR128:$src1)),
796 (memopv2i64 addr:$src2)))]>;
797 def XORPSrm : PSI<0x57, MRMSrcMem,
798 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
799 "xorps\t{$src2, $dst|$dst, $src2}",
800 [(set VR128:$dst, (xor (bc_v2i64 (v4f32 VR128:$src1)),
801 (memopv2i64 addr:$src2)))]>;
802 def ANDNPSrr : PSI<0x55, MRMSrcReg,
803 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
804 "andnps\t{$src2, $dst|$dst, $src2}",
806 (v2i64 (and (xor VR128:$src1,
807 (bc_v2i64 (v4i32 immAllOnesV))),
809 def ANDNPSrm : PSI<0x55, MRMSrcMem,
810 (outs VR128:$dst), (ins VR128:$src1,f128mem:$src2),
811 "andnps\t{$src2, $dst|$dst, $src2}",
813 (v2i64 (and (xor (bc_v2i64 (v4f32 VR128:$src1)),
814 (bc_v2i64 (v4i32 immAllOnesV))),
815 (memopv2i64 addr:$src2))))]>;
818 let isTwoAddress = 1 in {
819 def CMPPSrri : PSIi8<0xC2, MRMSrcReg,
820 (outs VR128:$dst), (ins VR128:$src1, VR128:$src, SSECC:$cc),
821 "cmp${cc}ps\t{$src, $dst|$dst, $src}",
822 [(set VR128:$dst, (int_x86_sse_cmp_ps VR128:$src1,
823 VR128:$src, imm:$cc))]>;
824 def CMPPSrmi : PSIi8<0xC2, MRMSrcMem,
825 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src, SSECC:$cc),
826 "cmp${cc}ps\t{$src, $dst|$dst, $src}",
827 [(set VR128:$dst, (int_x86_sse_cmp_ps VR128:$src1,
828 (load addr:$src), imm:$cc))]>;
831 // Shuffle and unpack instructions
832 let isTwoAddress = 1 in {
833 let isConvertibleToThreeAddress = 1 in // Convert to pshufd
834 def SHUFPSrri : PSIi8<0xC6, MRMSrcReg,
835 (outs VR128:$dst), (ins VR128:$src1,
836 VR128:$src2, i32i8imm:$src3),
837 "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
839 (v4f32 (vector_shuffle
840 VR128:$src1, VR128:$src2,
841 SHUFP_shuffle_mask:$src3)))]>;
842 def SHUFPSrmi : PSIi8<0xC6, MRMSrcMem,
843 (outs VR128:$dst), (ins VR128:$src1,
844 f128mem:$src2, i32i8imm:$src3),
845 "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
847 (v4f32 (vector_shuffle
848 VR128:$src1, (memopv4f32 addr:$src2),
849 SHUFP_shuffle_mask:$src3)))]>;
851 let AddedComplexity = 10 in {
852 def UNPCKHPSrr : PSI<0x15, MRMSrcReg,
853 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
854 "unpckhps\t{$src2, $dst|$dst, $src2}",
856 (v4f32 (vector_shuffle
857 VR128:$src1, VR128:$src2,
858 UNPCKH_shuffle_mask)))]>;
859 def UNPCKHPSrm : PSI<0x15, MRMSrcMem,
860 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
861 "unpckhps\t{$src2, $dst|$dst, $src2}",
863 (v4f32 (vector_shuffle
864 VR128:$src1, (memopv4f32 addr:$src2),
865 UNPCKH_shuffle_mask)))]>;
867 def UNPCKLPSrr : PSI<0x14, MRMSrcReg,
868 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
869 "unpcklps\t{$src2, $dst|$dst, $src2}",
871 (v4f32 (vector_shuffle
872 VR128:$src1, VR128:$src2,
873 UNPCKL_shuffle_mask)))]>;
874 def UNPCKLPSrm : PSI<0x14, MRMSrcMem,
875 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
876 "unpcklps\t{$src2, $dst|$dst, $src2}",
878 (v4f32 (vector_shuffle
879 VR128:$src1, (memopv4f32 addr:$src2),
880 UNPCKL_shuffle_mask)))]>;
885 def MOVMSKPSrr : PSI<0x50, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
886 "movmskps\t{$src, $dst|$dst, $src}",
887 [(set GR32:$dst, (int_x86_sse_movmsk_ps VR128:$src))]>;
888 def MOVMSKPDrr : PSI<0x50, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
889 "movmskpd\t{$src, $dst|$dst, $src}",
890 [(set GR32:$dst, (int_x86_sse2_movmsk_pd VR128:$src))]>;
892 // Prefetching loads.
893 // TODO: no intrinsics for these?
894 def PREFETCHT0 : PSI<0x18, MRM1m, (outs), (ins i8mem:$src), "prefetcht0\t$src", []>;
895 def PREFETCHT1 : PSI<0x18, MRM2m, (outs), (ins i8mem:$src), "prefetcht1\t$src", []>;
896 def PREFETCHT2 : PSI<0x18, MRM3m, (outs), (ins i8mem:$src), "prefetcht2\t$src", []>;
897 def PREFETCHNTA : PSI<0x18, MRM0m, (outs), (ins i8mem:$src), "prefetchnta\t$src", []>;
899 // Non-temporal stores
900 def MOVNTPSmr : PSI<0x2B, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
901 "movntps\t{$src, $dst|$dst, $src}",
902 [(int_x86_sse_movnt_ps addr:$dst, VR128:$src)]>;
904 // Load, store, and memory fence
905 def SFENCE : PSI<0xAE, MRM7m, (outs), (ins), "sfence", [(int_x86_sse_sfence)]>;
908 def LDMXCSR : PSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
909 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>;
910 def STMXCSR : PSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
911 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>;
913 // Alias instructions that map zero vector to pxor / xorp* for sse.
914 // FIXME: remove when we can teach regalloc that xor reg, reg is ok.
915 let isReMaterializable = 1 in
916 def V_SET0 : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins),
918 [(set VR128:$dst, (v4f32 immAllZerosV))]>;
920 // FR32 to 128-bit vector conversion.
921 def MOVSS2PSrr : SSI<0x10, MRMSrcReg, (outs VR128:$dst), (ins FR32:$src),
922 "movss\t{$src, $dst|$dst, $src}",
924 (v4f32 (scalar_to_vector FR32:$src)))]>;
925 def MOVSS2PSrm : SSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f32mem:$src),
926 "movss\t{$src, $dst|$dst, $src}",
928 (v4f32 (scalar_to_vector (loadf32 addr:$src))))]>;
930 // FIXME: may not be able to eliminate this movss with coalescing the src and
931 // dest register classes are different. We really want to write this pattern
933 // def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
935 def MOVPS2SSrr : SSI<0x10, MRMSrcReg, (outs FR32:$dst), (ins VR128:$src),
936 "movss\t{$src, $dst|$dst, $src}",
937 [(set FR32:$dst, (vector_extract (v4f32 VR128:$src),
939 def MOVPS2SSmr : SSI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, VR128:$src),
940 "movss\t{$src, $dst|$dst, $src}",
941 [(store (f32 (vector_extract (v4f32 VR128:$src),
942 (iPTR 0))), addr:$dst)]>;
945 // Move to lower bits of a VR128, leaving upper bits alone.
946 // Three operand (but two address) aliases.
947 let isTwoAddress = 1 in {
948 def MOVLSS2PSrr : SSI<0x10, MRMSrcReg,
949 (outs VR128:$dst), (ins VR128:$src1, FR32:$src2),
950 "movss\t{$src2, $dst|$dst, $src2}", []>;
952 let AddedComplexity = 15 in
953 def MOVLPSrr : SSI<0x10, MRMSrcReg,
954 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
955 "movss\t{$src2, $dst|$dst, $src2}",
957 (v4f32 (vector_shuffle VR128:$src1, VR128:$src2,
958 MOVL_shuffle_mask)))]>;
961 // Move to lower bits of a VR128 and zeroing upper bits.
962 // Loading from memory automatically zeroing upper bits.
963 let AddedComplexity = 20 in
964 def MOVZSS2PSrm : SSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f32mem:$src),
965 "movss\t{$src, $dst|$dst, $src}",
966 [(set VR128:$dst, (v4f32 (vector_shuffle immAllZerosV,
967 (v4f32 (scalar_to_vector (loadf32 addr:$src))),
968 MOVL_shuffle_mask)))]>;
971 //===----------------------------------------------------------------------===//
973 //===----------------------------------------------------------------------===//
976 def MOVSDrr : SDI<0x10, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
977 "movsd\t{$src, $dst|$dst, $src}", []>;
978 let isLoad = 1, isReMaterializable = 1 in
979 def MOVSDrm : SDI<0x10, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
980 "movsd\t{$src, $dst|$dst, $src}",
981 [(set FR64:$dst, (loadf64 addr:$src))]>;
982 def MOVSDmr : SDI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
983 "movsd\t{$src, $dst|$dst, $src}",
984 [(store FR64:$src, addr:$dst)]>;
986 // Conversion instructions
987 def CVTTSD2SIrr : SDI<0x2C, MRMSrcReg, (outs GR32:$dst), (ins FR64:$src),
988 "cvttsd2si\t{$src, $dst|$dst, $src}",
989 [(set GR32:$dst, (fp_to_sint FR64:$src))]>;
990 def CVTTSD2SIrm : SDI<0x2C, MRMSrcMem, (outs GR32:$dst), (ins f64mem:$src),
991 "cvttsd2si\t{$src, $dst|$dst, $src}",
992 [(set GR32:$dst, (fp_to_sint (loadf64 addr:$src)))]>;
993 def CVTSD2SSrr : SDI<0x5A, MRMSrcReg, (outs FR32:$dst), (ins FR64:$src),
994 "cvtsd2ss\t{$src, $dst|$dst, $src}",
995 [(set FR32:$dst, (fround FR64:$src))]>;
996 def CVTSD2SSrm : SDI<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src),
997 "cvtsd2ss\t{$src, $dst|$dst, $src}",
998 [(set FR32:$dst, (fround (loadf64 addr:$src)))]>;
999 def CVTSI2SDrr : SDI<0x2A, MRMSrcReg, (outs FR64:$dst), (ins GR32:$src),
1000 "cvtsi2sd\t{$src, $dst|$dst, $src}",
1001 [(set FR64:$dst, (sint_to_fp GR32:$src))]>;
1002 def CVTSI2SDrm : SDI<0x2A, MRMSrcMem, (outs FR64:$dst), (ins i32mem:$src),
1003 "cvtsi2sd\t{$src, $dst|$dst, $src}",
1004 [(set FR64:$dst, (sint_to_fp (loadi32 addr:$src)))]>;
1006 // SSE2 instructions with XS prefix
1007 def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src),
1008 "cvtss2sd\t{$src, $dst|$dst, $src}",
1009 [(set FR64:$dst, (fextend FR32:$src))]>, XS,
1010 Requires<[HasSSE2]>;
1011 def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src),
1012 "cvtss2sd\t{$src, $dst|$dst, $src}",
1013 [(set FR64:$dst, (extloadf32 addr:$src))]>, XS,
1014 Requires<[HasSSE2]>;
1016 // Match intrinsics which expect XMM operand(s).
1017 def Int_CVTSD2SIrr : SDI<0x2D, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
1018 "cvtsd2si\t{$src, $dst|$dst, $src}",
1019 [(set GR32:$dst, (int_x86_sse2_cvtsd2si VR128:$src))]>;
1020 def Int_CVTSD2SIrm : SDI<0x2D, MRMSrcMem, (outs GR32:$dst), (ins f128mem:$src),
1021 "cvtsd2si\t{$src, $dst|$dst, $src}",
1022 [(set GR32:$dst, (int_x86_sse2_cvtsd2si
1023 (load addr:$src)))]>;
1025 // Aliases for intrinsics
1026 def Int_CVTTSD2SIrr : SDI<0x2C, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
1027 "cvttsd2si\t{$src, $dst|$dst, $src}",
1029 (int_x86_sse2_cvttsd2si VR128:$src))]>;
1030 def Int_CVTTSD2SIrm : SDI<0x2C, MRMSrcMem, (outs GR32:$dst), (ins f128mem:$src),
1031 "cvttsd2si\t{$src, $dst|$dst, $src}",
1032 [(set GR32:$dst, (int_x86_sse2_cvttsd2si
1033 (load addr:$src)))]>;
1035 // Comparison instructions
1036 let isTwoAddress = 1 in {
1037 def CMPSDrr : SDI<0xC2, MRMSrcReg,
1038 (outs FR64:$dst), (ins FR64:$src1, FR64:$src, SSECC:$cc),
1039 "cmp${cc}sd\t{$src, $dst|$dst, $src}", []>;
1040 def CMPSDrm : SDI<0xC2, MRMSrcMem,
1041 (outs FR64:$dst), (ins FR64:$src1, f64mem:$src, SSECC:$cc),
1042 "cmp${cc}sd\t{$src, $dst|$dst, $src}", []>;
1045 let Defs = [EFLAGS] in {
1046 def UCOMISDrr: PDI<0x2E, MRMSrcReg, (outs), (ins FR64:$src1, FR64:$src2),
1047 "ucomisd\t{$src2, $src1|$src1, $src2}",
1048 [(X86cmp FR64:$src1, FR64:$src2), (implicit EFLAGS)]>;
1049 def UCOMISDrm: PDI<0x2E, MRMSrcMem, (outs), (ins FR64:$src1, f64mem:$src2),
1050 "ucomisd\t{$src2, $src1|$src1, $src2}",
1051 [(X86cmp FR64:$src1, (loadf64 addr:$src2)),
1052 (implicit EFLAGS)]>;
1055 // Aliases to match intrinsics which expect XMM operand(s).
1056 let isTwoAddress = 1 in {
1057 def Int_CMPSDrr : SDI<0xC2, MRMSrcReg,
1058 (outs VR128:$dst), (ins VR128:$src1, VR128:$src, SSECC:$cc),
1059 "cmp${cc}sd\t{$src, $dst|$dst, $src}",
1060 [(set VR128:$dst, (int_x86_sse2_cmp_sd VR128:$src1,
1061 VR128:$src, imm:$cc))]>;
1062 def Int_CMPSDrm : SDI<0xC2, MRMSrcMem,
1063 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src, SSECC:$cc),
1064 "cmp${cc}sd\t{$src, $dst|$dst, $src}",
1065 [(set VR128:$dst, (int_x86_sse2_cmp_sd VR128:$src1,
1066 (load addr:$src), imm:$cc))]>;
1069 let Defs = [EFLAGS] in {
1070 def Int_UCOMISDrr: PDI<0x2E, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
1071 "ucomisd\t{$src2, $src1|$src1, $src2}",
1072 [(X86ucomi (v2f64 VR128:$src1), (v2f64 VR128:$src2)),
1073 (implicit EFLAGS)]>;
1074 def Int_UCOMISDrm: PDI<0x2E, MRMSrcMem, (outs),(ins VR128:$src1, f128mem:$src2),
1075 "ucomisd\t{$src2, $src1|$src1, $src2}",
1076 [(X86ucomi (v2f64 VR128:$src1), (load addr:$src2)),
1077 (implicit EFLAGS)]>;
1079 def Int_COMISDrr: PDI<0x2F, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
1080 "comisd\t{$src2, $src1|$src1, $src2}",
1081 [(X86comi (v2f64 VR128:$src1), (v2f64 VR128:$src2)),
1082 (implicit EFLAGS)]>;
1083 def Int_COMISDrm: PDI<0x2F, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
1084 "comisd\t{$src2, $src1|$src1, $src2}",
1085 [(X86comi (v2f64 VR128:$src1), (load addr:$src2)),
1086 (implicit EFLAGS)]>;
1089 // Aliases of packed SSE2 instructions for scalar use. These all have names that
1092 // Alias instructions that map fld0 to pxor for sse.
1093 let isReMaterializable = 1 in
1094 def FsFLD0SD : I<0xEF, MRMInitReg, (outs FR64:$dst), (ins),
1095 "pxor\t$dst, $dst", [(set FR64:$dst, fpimm0)]>,
1096 Requires<[HasSSE2]>, TB, OpSize;
1098 // Alias instruction to do FR64 reg-to-reg copy using movapd. Upper bits are
1100 def FsMOVAPDrr : PDI<0x28, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
1101 "movapd\t{$src, $dst|$dst, $src}", []>;
1103 // Alias instruction to load FR64 from f128mem using movapd. Upper bits are
1106 def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
1107 "movapd\t{$src, $dst|$dst, $src}",
1108 [(set FR64:$dst, (alignedloadfsf64 addr:$src))]>;
1110 // Alias bitwise logical operations using SSE logical ops on packed FP values.
1111 let isTwoAddress = 1 in {
1112 let isCommutable = 1 in {
1113 def FsANDPDrr : PDI<0x54, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
1114 "andpd\t{$src2, $dst|$dst, $src2}",
1115 [(set FR64:$dst, (X86fand FR64:$src1, FR64:$src2))]>;
1116 def FsORPDrr : PDI<0x56, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
1117 "orpd\t{$src2, $dst|$dst, $src2}",
1118 [(set FR64:$dst, (X86for FR64:$src1, FR64:$src2))]>;
1119 def FsXORPDrr : PDI<0x57, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
1120 "xorpd\t{$src2, $dst|$dst, $src2}",
1121 [(set FR64:$dst, (X86fxor FR64:$src1, FR64:$src2))]>;
1124 def FsANDPDrm : PDI<0x54, MRMSrcMem, (outs FR64:$dst), (ins FR64:$src1, f128mem:$src2),
1125 "andpd\t{$src2, $dst|$dst, $src2}",
1126 [(set FR64:$dst, (X86fand FR64:$src1,
1127 (memopfsf64 addr:$src2)))]>;
1128 def FsORPDrm : PDI<0x56, MRMSrcMem, (outs FR64:$dst), (ins FR64:$src1, f128mem:$src2),
1129 "orpd\t{$src2, $dst|$dst, $src2}",
1130 [(set FR64:$dst, (X86for FR64:$src1,
1131 (memopfsf64 addr:$src2)))]>;
1132 def FsXORPDrm : PDI<0x57, MRMSrcMem, (outs FR64:$dst), (ins FR64:$src1, f128mem:$src2),
1133 "xorpd\t{$src2, $dst|$dst, $src2}",
1134 [(set FR64:$dst, (X86fxor FR64:$src1,
1135 (memopfsf64 addr:$src2)))]>;
1137 def FsANDNPDrr : PDI<0x55, MRMSrcReg,
1138 (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
1139 "andnpd\t{$src2, $dst|$dst, $src2}", []>;
1140 def FsANDNPDrm : PDI<0x55, MRMSrcMem,
1141 (outs FR64:$dst), (ins FR64:$src1, f128mem:$src2),
1142 "andnpd\t{$src2, $dst|$dst, $src2}", []>;
1145 /// basic_sse2_fp_binop_rm - SSE2 binops come in both scalar and vector forms.
1147 /// In addition, we also have a special variant of the scalar form here to
1148 /// represent the associated intrinsic operation. This form is unlike the
1149 /// plain scalar form, in that it takes an entire vector (instead of a scalar)
1150 /// and leaves the top elements undefined.
1152 /// These three forms can each be reg+reg or reg+mem, so there are a total of
1153 /// six "instructions".
1155 let isTwoAddress = 1 in {
1156 multiclass basic_sse2_fp_binop_rm<bits<8> opc, string OpcodeStr,
1157 SDNode OpNode, Intrinsic F64Int,
1158 bit Commutable = 0> {
1159 // Scalar operation, reg+reg.
1160 def SDrr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
1161 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
1162 [(set FR64:$dst, (OpNode FR64:$src1, FR64:$src2))]> {
1163 let isCommutable = Commutable;
1166 // Scalar operation, reg+mem.
1167 def SDrm : SDI<opc, MRMSrcMem, (outs FR64:$dst), (ins FR64:$src1, f64mem:$src2),
1168 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
1169 [(set FR64:$dst, (OpNode FR64:$src1, (load addr:$src2)))]>;
1171 // Vector operation, reg+reg.
1172 def PDrr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1173 !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
1174 [(set VR128:$dst, (v2f64 (OpNode VR128:$src1, VR128:$src2)))]> {
1175 let isCommutable = Commutable;
1178 // Vector operation, reg+mem.
1179 def PDrm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
1180 !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
1181 [(set VR128:$dst, (OpNode VR128:$src1, (memopv2f64 addr:$src2)))]>;
1183 // Intrinsic operation, reg+reg.
1184 def SDrr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1185 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
1186 [(set VR128:$dst, (F64Int VR128:$src1, VR128:$src2))]> {
1187 let isCommutable = Commutable;
1190 // Intrinsic operation, reg+mem.
1191 def SDrm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2),
1192 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
1193 [(set VR128:$dst, (F64Int VR128:$src1,
1194 sse_load_f64:$src2))]>;
1198 // Arithmetic instructions
1199 defm ADD : basic_sse2_fp_binop_rm<0x58, "add", fadd, int_x86_sse2_add_sd, 1>;
1200 defm MUL : basic_sse2_fp_binop_rm<0x59, "mul", fmul, int_x86_sse2_mul_sd, 1>;
1201 defm SUB : basic_sse2_fp_binop_rm<0x5C, "sub", fsub, int_x86_sse2_sub_sd>;
1202 defm DIV : basic_sse2_fp_binop_rm<0x5E, "div", fdiv, int_x86_sse2_div_sd>;
1204 /// sse2_fp_binop_rm - Other SSE2 binops
1206 /// This multiclass is like basic_sse2_fp_binop_rm, with the addition of
1207 /// instructions for a full-vector intrinsic form. Operations that map
1208 /// onto C operators don't use this form since they just use the plain
1209 /// vector form instead of having a separate vector intrinsic form.
1211 /// This provides a total of eight "instructions".
1213 let isTwoAddress = 1 in {
1214 multiclass sse2_fp_binop_rm<bits<8> opc, string OpcodeStr,
1218 bit Commutable = 0> {
1220 // Scalar operation, reg+reg.
1221 def SDrr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
1222 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
1223 [(set FR64:$dst, (OpNode FR64:$src1, FR64:$src2))]> {
1224 let isCommutable = Commutable;
1227 // Scalar operation, reg+mem.
1228 def SDrm : SDI<opc, MRMSrcMem, (outs FR64:$dst), (ins FR64:$src1, f64mem:$src2),
1229 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
1230 [(set FR64:$dst, (OpNode FR64:$src1, (load addr:$src2)))]>;
1232 // Vector operation, reg+reg.
1233 def PDrr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1234 !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
1235 [(set VR128:$dst, (v2f64 (OpNode VR128:$src1, VR128:$src2)))]> {
1236 let isCommutable = Commutable;
1239 // Vector operation, reg+mem.
1240 def PDrm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
1241 !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
1242 [(set VR128:$dst, (OpNode VR128:$src1, (memopv2f64 addr:$src2)))]>;
1244 // Intrinsic operation, reg+reg.
1245 def SDrr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1246 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
1247 [(set VR128:$dst, (F64Int VR128:$src1, VR128:$src2))]> {
1248 let isCommutable = Commutable;
1251 // Intrinsic operation, reg+mem.
1252 def SDrm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2),
1253 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
1254 [(set VR128:$dst, (F64Int VR128:$src1,
1255 sse_load_f64:$src2))]>;
1257 // Vector intrinsic operation, reg+reg.
1258 def PDrr_Int : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1259 !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
1260 [(set VR128:$dst, (V2F64Int VR128:$src1, VR128:$src2))]> {
1261 let isCommutable = Commutable;
1264 // Vector intrinsic operation, reg+mem.
1265 def PDrm_Int : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
1266 !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
1267 [(set VR128:$dst, (V2F64Int VR128:$src1, (load addr:$src2)))]>;
1271 defm MAX : sse2_fp_binop_rm<0x5F, "max", X86fmax,
1272 int_x86_sse2_max_sd, int_x86_sse2_max_pd>;
1273 defm MIN : sse2_fp_binop_rm<0x5D, "min", X86fmin,
1274 int_x86_sse2_min_sd, int_x86_sse2_min_pd>;
1276 //===----------------------------------------------------------------------===//
1277 // SSE packed FP Instructions
1279 // Move Instructions
1280 def MOVAPDrr : PDI<0x28, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1281 "movapd\t{$src, $dst|$dst, $src}", []>;
1282 let isLoad = 1, isReMaterializable = 1 in
1283 def MOVAPDrm : PDI<0x28, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1284 "movapd\t{$src, $dst|$dst, $src}",
1285 [(set VR128:$dst, (alignedloadv2f64 addr:$src))]>;
1287 def MOVAPDmr : PDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
1288 "movapd\t{$src, $dst|$dst, $src}",
1289 [(alignedstore (v2f64 VR128:$src), addr:$dst)]>;
1291 def MOVUPDrr : PDI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1292 "movupd\t{$src, $dst|$dst, $src}", []>;
1294 def MOVUPDrm : PDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1295 "movupd\t{$src, $dst|$dst, $src}",
1296 [(set VR128:$dst, (loadv2f64 addr:$src))]>;
1297 def MOVUPDmr : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
1298 "movupd\t{$src, $dst|$dst, $src}",
1299 [(store (v2f64 VR128:$src), addr:$dst)]>;
1301 // Intrinsic forms of MOVUPD load and store
1302 def MOVUPDrm_Int : PDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1303 "movupd\t{$src, $dst|$dst, $src}",
1304 [(set VR128:$dst, (int_x86_sse2_loadu_pd addr:$src))]>;
1305 def MOVUPDmr_Int : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
1306 "movupd\t{$src, $dst|$dst, $src}",
1307 [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>;
1309 let isTwoAddress = 1 in {
1310 let AddedComplexity = 20 in {
1311 def MOVLPDrm : PDI<0x12, MRMSrcMem,
1312 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
1313 "movlpd\t{$src2, $dst|$dst, $src2}",
1315 (v2f64 (vector_shuffle VR128:$src1,
1316 (scalar_to_vector (loadf64 addr:$src2)),
1317 MOVLP_shuffle_mask)))]>;
1318 def MOVHPDrm : PDI<0x16, MRMSrcMem,
1319 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
1320 "movhpd\t{$src2, $dst|$dst, $src2}",
1322 (v2f64 (vector_shuffle VR128:$src1,
1323 (scalar_to_vector (loadf64 addr:$src2)),
1324 MOVHP_shuffle_mask)))]>;
1325 } // AddedComplexity
1328 def MOVLPDmr : PDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1329 "movlpd\t{$src, $dst|$dst, $src}",
1330 [(store (f64 (vector_extract (v2f64 VR128:$src),
1331 (iPTR 0))), addr:$dst)]>;
1333 // v2f64 extract element 1 is always custom lowered to unpack high to low
1334 // and extract element 0 so the non-store version isn't too horrible.
1335 def MOVHPDmr : PDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1336 "movhpd\t{$src, $dst|$dst, $src}",
1337 [(store (f64 (vector_extract
1338 (v2f64 (vector_shuffle VR128:$src, (undef),
1339 UNPCKH_shuffle_mask)), (iPTR 0))),
1342 // SSE2 instructions without OpSize prefix
1343 def Int_CVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1344 "cvtdq2ps\t{$src, $dst|$dst, $src}",
1345 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
1346 TB, Requires<[HasSSE2]>;
1347 def Int_CVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
1348 "cvtdq2ps\t{$src, $dst|$dst, $src}",
1349 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps
1350 (bitconvert (memopv2i64 addr:$src))))]>,
1351 TB, Requires<[HasSSE2]>;
1353 // SSE2 instructions with XS prefix
1354 def Int_CVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1355 "cvtdq2pd\t{$src, $dst|$dst, $src}",
1356 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
1357 XS, Requires<[HasSSE2]>;
1358 def Int_CVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
1359 "cvtdq2pd\t{$src, $dst|$dst, $src}",
1360 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd
1361 (bitconvert (memopv2i64 addr:$src))))]>,
1362 XS, Requires<[HasSSE2]>;
1364 def Int_CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1365 "cvtps2dq\t{$src, $dst|$dst, $src}",
1366 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>;
1367 def Int_CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1368 "cvtps2dq\t{$src, $dst|$dst, $src}",
1369 [(set VR128:$dst, (int_x86_sse2_cvtps2dq
1370 (load addr:$src)))]>;
1371 // SSE2 packed instructions with XS prefix
1372 def Int_CVTTPS2DQrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1373 "cvttps2dq\t{$src, $dst|$dst, $src}",
1374 [(set VR128:$dst, (int_x86_sse2_cvttps2dq VR128:$src))]>,
1375 XS, Requires<[HasSSE2]>;
1376 def Int_CVTTPS2DQrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1377 "cvttps2dq\t{$src, $dst|$dst, $src}",
1378 [(set VR128:$dst, (int_x86_sse2_cvttps2dq
1379 (load addr:$src)))]>,
1380 XS, Requires<[HasSSE2]>;
1382 // SSE2 packed instructions with XD prefix
1383 def Int_CVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1384 "cvtpd2dq\t{$src, $dst|$dst, $src}",
1385 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
1386 XD, Requires<[HasSSE2]>;
1387 def Int_CVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1388 "cvtpd2dq\t{$src, $dst|$dst, $src}",
1389 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
1390 (load addr:$src)))]>,
1391 XD, Requires<[HasSSE2]>;
1393 def Int_CVTTPD2DQrr : PDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1394 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1395 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>;
1396 def Int_CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1397 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1398 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
1399 (load addr:$src)))]>;
1401 // SSE2 instructions without OpSize prefix
1402 def Int_CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1403 "cvtps2pd\t{$src, $dst|$dst, $src}",
1404 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
1405 TB, Requires<[HasSSE2]>;
1406 def Int_CVTPS2PDrm : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins f64mem:$src),
1407 "cvtps2pd\t{$src, $dst|$dst, $src}",
1408 [(set VR128:$dst, (int_x86_sse2_cvtps2pd
1409 (load addr:$src)))]>,
1410 TB, Requires<[HasSSE2]>;
1412 def Int_CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1413 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1414 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
1415 def Int_CVTPD2PSrm : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins f128mem:$src),
1416 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1417 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps
1418 (load addr:$src)))]>;
1420 // Match intrinsics which expect XMM operand(s).
1421 // Aliases for intrinsics
1422 let isTwoAddress = 1 in {
1423 def Int_CVTSI2SDrr: SDI<0x2A, MRMSrcReg,
1424 (outs VR128:$dst), (ins VR128:$src1, GR32:$src2),
1425 "cvtsi2sd\t{$src2, $dst|$dst, $src2}",
1426 [(set VR128:$dst, (int_x86_sse2_cvtsi2sd VR128:$src1,
1428 def Int_CVTSI2SDrm: SDI<0x2A, MRMSrcMem,
1429 (outs VR128:$dst), (ins VR128:$src1, i32mem:$src2),
1430 "cvtsi2sd\t{$src2, $dst|$dst, $src2}",
1431 [(set VR128:$dst, (int_x86_sse2_cvtsi2sd VR128:$src1,
1432 (loadi32 addr:$src2)))]>;
1433 def Int_CVTSD2SSrr: SDI<0x5A, MRMSrcReg,
1434 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1435 "cvtsd2ss\t{$src2, $dst|$dst, $src2}",
1436 [(set VR128:$dst, (int_x86_sse2_cvtsd2ss VR128:$src1,
1438 def Int_CVTSD2SSrm: SDI<0x5A, MRMSrcMem,
1439 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
1440 "cvtsd2ss\t{$src2, $dst|$dst, $src2}",
1441 [(set VR128:$dst, (int_x86_sse2_cvtsd2ss VR128:$src1,
1442 (load addr:$src2)))]>;
1443 def Int_CVTSS2SDrr: I<0x5A, MRMSrcReg,
1444 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1445 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
1446 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
1447 VR128:$src2))]>, XS,
1448 Requires<[HasSSE2]>;
1449 def Int_CVTSS2SDrm: I<0x5A, MRMSrcMem,
1450 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
1451 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
1452 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
1453 (load addr:$src2)))]>, XS,
1454 Requires<[HasSSE2]>;
1459 /// sse2_fp_unop_rm - SSE2 unops come in both scalar and vector forms.
1461 /// In addition, we also have a special variant of the scalar form here to
1462 /// represent the associated intrinsic operation. This form is unlike the
1463 /// plain scalar form, in that it takes an entire vector (instead of a
1464 /// scalar) and leaves the top elements undefined.
1466 /// And, we have a special variant form for a full-vector intrinsic form.
1468 /// These four forms can each have a reg or a mem operand, so there are a
1469 /// total of eight "instructions".
1471 multiclass sse2_fp_unop_rm<bits<8> opc, string OpcodeStr,
1475 bit Commutable = 0> {
1476 // Scalar operation, reg.
1477 def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
1478 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1479 [(set FR64:$dst, (OpNode FR64:$src))]> {
1480 let isCommutable = Commutable;
1483 // Scalar operation, mem.
1484 def SDm : SDI<opc, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
1485 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1486 [(set FR64:$dst, (OpNode (load addr:$src)))]>;
1488 // Vector operation, reg.
1489 def PDr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1490 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1491 [(set VR128:$dst, (v2f64 (OpNode VR128:$src)))]> {
1492 let isCommutable = Commutable;
1495 // Vector operation, mem.
1496 def PDm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1497 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1498 [(set VR128:$dst, (OpNode (memopv2f64 addr:$src)))]>;
1500 // Intrinsic operation, reg.
1501 def SDr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1502 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1503 [(set VR128:$dst, (F64Int VR128:$src))]> {
1504 let isCommutable = Commutable;
1507 // Intrinsic operation, mem.
1508 def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins sdmem:$src),
1509 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1510 [(set VR128:$dst, (F64Int sse_load_f64:$src))]>;
1512 // Vector intrinsic operation, reg
1513 def PDr_Int : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1514 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1515 [(set VR128:$dst, (V2F64Int VR128:$src))]> {
1516 let isCommutable = Commutable;
1519 // Vector intrinsic operation, mem
1520 def PDm_Int : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1521 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1522 [(set VR128:$dst, (V2F64Int (load addr:$src)))]>;
1526 defm SQRT : sse2_fp_unop_rm<0x51, "sqrt", fsqrt,
1527 int_x86_sse2_sqrt_sd, int_x86_sse2_sqrt_pd>;
1529 // There is no f64 version of the reciprocal approximation instructions.
1532 let isTwoAddress = 1 in {
1533 let isCommutable = 1 in {
1534 def ANDPDrr : PDI<0x54, MRMSrcReg,
1535 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1536 "andpd\t{$src2, $dst|$dst, $src2}",
1538 (and (bc_v2i64 (v2f64 VR128:$src1)),
1539 (bc_v2i64 (v2f64 VR128:$src2))))]>;
1540 def ORPDrr : PDI<0x56, MRMSrcReg,
1541 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1542 "orpd\t{$src2, $dst|$dst, $src2}",
1544 (or (bc_v2i64 (v2f64 VR128:$src1)),
1545 (bc_v2i64 (v2f64 VR128:$src2))))]>;
1546 def XORPDrr : PDI<0x57, MRMSrcReg,
1547 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1548 "xorpd\t{$src2, $dst|$dst, $src2}",
1550 (xor (bc_v2i64 (v2f64 VR128:$src1)),
1551 (bc_v2i64 (v2f64 VR128:$src2))))]>;
1554 def ANDPDrm : PDI<0x54, MRMSrcMem,
1555 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
1556 "andpd\t{$src2, $dst|$dst, $src2}",
1558 (and (bc_v2i64 (v2f64 VR128:$src1)),
1559 (memopv2i64 addr:$src2)))]>;
1560 def ORPDrm : PDI<0x56, MRMSrcMem,
1561 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
1562 "orpd\t{$src2, $dst|$dst, $src2}",
1564 (or (bc_v2i64 (v2f64 VR128:$src1)),
1565 (memopv2i64 addr:$src2)))]>;
1566 def XORPDrm : PDI<0x57, MRMSrcMem,
1567 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
1568 "xorpd\t{$src2, $dst|$dst, $src2}",
1570 (xor (bc_v2i64 (v2f64 VR128:$src1)),
1571 (memopv2i64 addr:$src2)))]>;
1572 def ANDNPDrr : PDI<0x55, MRMSrcReg,
1573 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1574 "andnpd\t{$src2, $dst|$dst, $src2}",
1576 (and (vnot (bc_v2i64 (v2f64 VR128:$src1))),
1577 (bc_v2i64 (v2f64 VR128:$src2))))]>;
1578 def ANDNPDrm : PDI<0x55, MRMSrcMem,
1579 (outs VR128:$dst), (ins VR128:$src1,f128mem:$src2),
1580 "andnpd\t{$src2, $dst|$dst, $src2}",
1582 (and (vnot (bc_v2i64 (v2f64 VR128:$src1))),
1583 (memopv2i64 addr:$src2)))]>;
1586 let isTwoAddress = 1 in {
1587 def CMPPDrri : PDIi8<0xC2, MRMSrcReg,
1588 (outs VR128:$dst), (ins VR128:$src1, VR128:$src, SSECC:$cc),
1589 "cmp${cc}pd\t{$src, $dst|$dst, $src}",
1590 [(set VR128:$dst, (int_x86_sse2_cmp_pd VR128:$src1,
1591 VR128:$src, imm:$cc))]>;
1592 def CMPPDrmi : PDIi8<0xC2, MRMSrcMem,
1593 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src, SSECC:$cc),
1594 "cmp${cc}pd\t{$src, $dst|$dst, $src}",
1595 [(set VR128:$dst, (int_x86_sse2_cmp_pd VR128:$src1,
1596 (load addr:$src), imm:$cc))]>;
1599 // Shuffle and unpack instructions
1600 let isTwoAddress = 1 in {
1601 def SHUFPDrri : PDIi8<0xC6, MRMSrcReg,
1602 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i8imm:$src3),
1603 "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1604 [(set VR128:$dst, (v2f64 (vector_shuffle
1605 VR128:$src1, VR128:$src2,
1606 SHUFP_shuffle_mask:$src3)))]>;
1607 def SHUFPDrmi : PDIi8<0xC6, MRMSrcMem,
1608 (outs VR128:$dst), (ins VR128:$src1,
1609 f128mem:$src2, i8imm:$src3),
1610 "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1612 (v2f64 (vector_shuffle
1613 VR128:$src1, (memopv2f64 addr:$src2),
1614 SHUFP_shuffle_mask:$src3)))]>;
1616 let AddedComplexity = 10 in {
1617 def UNPCKHPDrr : PDI<0x15, MRMSrcReg,
1618 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1619 "unpckhpd\t{$src2, $dst|$dst, $src2}",
1621 (v2f64 (vector_shuffle
1622 VR128:$src1, VR128:$src2,
1623 UNPCKH_shuffle_mask)))]>;
1624 def UNPCKHPDrm : PDI<0x15, MRMSrcMem,
1625 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
1626 "unpckhpd\t{$src2, $dst|$dst, $src2}",
1628 (v2f64 (vector_shuffle
1629 VR128:$src1, (memopv2f64 addr:$src2),
1630 UNPCKH_shuffle_mask)))]>;
1632 def UNPCKLPDrr : PDI<0x14, MRMSrcReg,
1633 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1634 "unpcklpd\t{$src2, $dst|$dst, $src2}",
1636 (v2f64 (vector_shuffle
1637 VR128:$src1, VR128:$src2,
1638 UNPCKL_shuffle_mask)))]>;
1639 def UNPCKLPDrm : PDI<0x14, MRMSrcMem,
1640 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
1641 "unpcklpd\t{$src2, $dst|$dst, $src2}",
1643 (v2f64 (vector_shuffle
1644 VR128:$src1, (memopv2f64 addr:$src2),
1645 UNPCKL_shuffle_mask)))]>;
1646 } // AddedComplexity
1650 //===----------------------------------------------------------------------===//
1651 // SSE integer instructions
1653 // Move Instructions
1654 def MOVDQArr : PDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1655 "movdqa\t{$src, $dst|$dst, $src}", []>;
1657 def MOVDQArm : PDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
1658 "movdqa\t{$src, $dst|$dst, $src}",
1659 [/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/]>;
1660 def MOVDQAmr : PDI<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
1661 "movdqa\t{$src, $dst|$dst, $src}",
1662 [/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/]>;
1664 def MOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
1665 "movdqu\t{$src, $dst|$dst, $src}",
1666 [/*(set VR128:$dst, (loadv2i64 addr:$src))*/]>,
1667 XS, Requires<[HasSSE2]>;
1668 def MOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
1669 "movdqu\t{$src, $dst|$dst, $src}",
1670 [/*(store (v2i64 VR128:$src), addr:$dst)*/]>,
1671 XS, Requires<[HasSSE2]>;
1673 // Intrinsic forms of MOVDQU load and store
1675 def MOVDQUrm_Int : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
1676 "movdqu\t{$src, $dst|$dst, $src}",
1677 [(set VR128:$dst, (int_x86_sse2_loadu_dq addr:$src))]>,
1678 XS, Requires<[HasSSE2]>;
1679 def MOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
1680 "movdqu\t{$src, $dst|$dst, $src}",
1681 [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
1682 XS, Requires<[HasSSE2]>;
1684 let isTwoAddress = 1 in {
1686 multiclass PDI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId,
1687 bit Commutable = 0> {
1688 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1689 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
1690 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]> {
1691 let isCommutable = Commutable;
1693 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
1694 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
1695 [(set VR128:$dst, (IntId VR128:$src1,
1696 (bitconvert (memopv2i64 addr:$src2))))]>;
1699 multiclass PDI_binop_rmi_int<bits<8> opc, bits<8> opc2, Format ImmForm,
1700 string OpcodeStr, Intrinsic IntId> {
1701 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1702 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
1703 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
1704 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
1705 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
1706 [(set VR128:$dst, (IntId VR128:$src1,
1707 (bitconvert (memopv2i64 addr:$src2))))]>;
1708 def ri : PDIi8<opc2, ImmForm, (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
1709 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
1710 [(set VR128:$dst, (IntId VR128:$src1,
1711 (scalar_to_vector (i32 imm:$src2))))]>;
1715 /// PDI_binop_rm - Simple SSE2 binary operator.
1716 multiclass PDI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
1717 ValueType OpVT, bit Commutable = 0> {
1718 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1719 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
1720 [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]> {
1721 let isCommutable = Commutable;
1723 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
1724 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
1725 [(set VR128:$dst, (OpVT (OpNode VR128:$src1,
1726 (bitconvert (memopv2i64 addr:$src2)))))]>;
1729 /// PDI_binop_rm_v2i64 - Simple SSE2 binary operator whose type is v2i64.
1731 /// FIXME: we could eliminate this and use PDI_binop_rm instead if tblgen knew
1732 /// to collapse (bitconvert VT to VT) into its operand.
1734 multiclass PDI_binop_rm_v2i64<bits<8> opc, string OpcodeStr, SDNode OpNode,
1735 bit Commutable = 0> {
1736 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1737 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
1738 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))]> {
1739 let isCommutable = Commutable;
1741 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
1742 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
1743 [(set VR128:$dst, (OpNode VR128:$src1,(memopv2i64 addr:$src2)))]>;
1748 // 128-bit Integer Arithmetic
1750 defm PADDB : PDI_binop_rm<0xFC, "paddb", add, v16i8, 1>;
1751 defm PADDW : PDI_binop_rm<0xFD, "paddw", add, v8i16, 1>;
1752 defm PADDD : PDI_binop_rm<0xFE, "paddd", add, v4i32, 1>;
1753 defm PADDQ : PDI_binop_rm_v2i64<0xD4, "paddq", add, 1>;
1755 defm PADDSB : PDI_binop_rm_int<0xEC, "paddsb" , int_x86_sse2_padds_b, 1>;
1756 defm PADDSW : PDI_binop_rm_int<0xED, "paddsw" , int_x86_sse2_padds_w, 1>;
1757 defm PADDUSB : PDI_binop_rm_int<0xDC, "paddusb", int_x86_sse2_paddus_b, 1>;
1758 defm PADDUSW : PDI_binop_rm_int<0xDD, "paddusw", int_x86_sse2_paddus_w, 1>;
1760 defm PSUBB : PDI_binop_rm<0xF8, "psubb", sub, v16i8>;
1761 defm PSUBW : PDI_binop_rm<0xF9, "psubw", sub, v8i16>;
1762 defm PSUBD : PDI_binop_rm<0xFA, "psubd", sub, v4i32>;
1763 defm PSUBQ : PDI_binop_rm_v2i64<0xFB, "psubq", sub>;
1765 defm PSUBSB : PDI_binop_rm_int<0xE8, "psubsb" , int_x86_sse2_psubs_b>;
1766 defm PSUBSW : PDI_binop_rm_int<0xE9, "psubsw" , int_x86_sse2_psubs_w>;
1767 defm PSUBUSB : PDI_binop_rm_int<0xD8, "psubusb", int_x86_sse2_psubus_b>;
1768 defm PSUBUSW : PDI_binop_rm_int<0xD9, "psubusw", int_x86_sse2_psubus_w>;
1770 defm PMULLW : PDI_binop_rm<0xD5, "pmullw", mul, v8i16, 1>;
1772 defm PMULHUW : PDI_binop_rm_int<0xE4, "pmulhuw", int_x86_sse2_pmulhu_w, 1>;
1773 defm PMULHW : PDI_binop_rm_int<0xE5, "pmulhw" , int_x86_sse2_pmulh_w , 1>;
1774 defm PMULUDQ : PDI_binop_rm_int<0xF4, "pmuludq", int_x86_sse2_pmulu_dq, 1>;
1776 defm PMADDWD : PDI_binop_rm_int<0xF5, "pmaddwd", int_x86_sse2_pmadd_wd, 1>;
1778 defm PAVGB : PDI_binop_rm_int<0xE0, "pavgb", int_x86_sse2_pavg_b, 1>;
1779 defm PAVGW : PDI_binop_rm_int<0xE3, "pavgw", int_x86_sse2_pavg_w, 1>;
1782 defm PMINUB : PDI_binop_rm_int<0xDA, "pminub", int_x86_sse2_pminu_b, 1>;
1783 defm PMINSW : PDI_binop_rm_int<0xEA, "pminsw", int_x86_sse2_pmins_w, 1>;
1784 defm PMAXUB : PDI_binop_rm_int<0xDE, "pmaxub", int_x86_sse2_pmaxu_b, 1>;
1785 defm PMAXSW : PDI_binop_rm_int<0xEE, "pmaxsw", int_x86_sse2_pmaxs_w, 1>;
1786 defm PSADBW : PDI_binop_rm_int<0xE0, "psadbw", int_x86_sse2_psad_bw, 1>;
1789 defm PSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "psllw", int_x86_sse2_psll_w>;
1790 defm PSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "pslld", int_x86_sse2_psll_d>;
1791 defm PSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "psllq", int_x86_sse2_psll_q>;
1793 defm PSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "psrlw", int_x86_sse2_psrl_w>;
1794 defm PSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "psrld", int_x86_sse2_psrl_d>;
1795 defm PSRLQ : PDI_binop_rmi_int<0xD3, 0x73, MRM2r, "psrlq", int_x86_sse2_psrl_q>;
1797 defm PSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "psraw", int_x86_sse2_psra_w>;
1798 defm PSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "psrad", int_x86_sse2_psra_d>;
1799 // PSRAQ doesn't exist in SSE[1-3].
1801 // 128-bit logical shifts.
1802 let isTwoAddress = 1 in {
1803 def PSLLDQri : PDIi8<0x73, MRM7r,
1804 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
1805 "pslldq\t{$src2, $dst|$dst, $src2}", []>;
1806 def PSRLDQri : PDIi8<0x73, MRM3r,
1807 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
1808 "psrldq\t{$src2, $dst|$dst, $src2}", []>;
1809 // PSRADQri doesn't exist in SSE[1-3].
1812 let Predicates = [HasSSE2] in {
1813 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
1814 (v2i64 (PSLLDQri VR128:$src1, (PSxLDQ_imm imm:$src2)))>;
1815 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
1816 (v2i64 (PSRLDQri VR128:$src1, (PSxLDQ_imm imm:$src2)))>;
1817 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
1818 (v2f64 (PSRLDQri VR128:$src1, (PSxLDQ_imm imm:$src2)))>;
1822 defm PAND : PDI_binop_rm_v2i64<0xDB, "pand", and, 1>;
1823 defm POR : PDI_binop_rm_v2i64<0xEB, "por" , or , 1>;
1824 defm PXOR : PDI_binop_rm_v2i64<0xEF, "pxor", xor, 1>;
1826 let isTwoAddress = 1 in {
1827 def PANDNrr : PDI<0xDF, MRMSrcReg,
1828 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1829 "pandn\t{$src2, $dst|$dst, $src2}",
1830 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
1833 def PANDNrm : PDI<0xDF, MRMSrcMem,
1834 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
1835 "pandn\t{$src2, $dst|$dst, $src2}",
1836 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
1837 (memopv2i64 addr:$src2))))]>;
1840 // SSE2 Integer comparison
1841 defm PCMPEQB : PDI_binop_rm_int<0x74, "pcmpeqb", int_x86_sse2_pcmpeq_b>;
1842 defm PCMPEQW : PDI_binop_rm_int<0x75, "pcmpeqw", int_x86_sse2_pcmpeq_w>;
1843 defm PCMPEQD : PDI_binop_rm_int<0x76, "pcmpeqd", int_x86_sse2_pcmpeq_d>;
1844 defm PCMPGTB : PDI_binop_rm_int<0x64, "pcmpgtb", int_x86_sse2_pcmpgt_b>;
1845 defm PCMPGTW : PDI_binop_rm_int<0x65, "pcmpgtw", int_x86_sse2_pcmpgt_w>;
1846 defm PCMPGTD : PDI_binop_rm_int<0x66, "pcmpgtd", int_x86_sse2_pcmpgt_d>;
1848 // Pack instructions
1849 defm PACKSSWB : PDI_binop_rm_int<0x63, "packsswb", int_x86_sse2_packsswb_128>;
1850 defm PACKSSDW : PDI_binop_rm_int<0x6B, "packssdw", int_x86_sse2_packssdw_128>;
1851 defm PACKUSWB : PDI_binop_rm_int<0x67, "packuswb", int_x86_sse2_packuswb_128>;
1853 // Shuffle and unpack instructions
1854 def PSHUFDri : PDIi8<0x70, MRMSrcReg,
1855 (outs VR128:$dst), (ins VR128:$src1, i8imm:$src2),
1856 "pshufd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1857 [(set VR128:$dst, (v4i32 (vector_shuffle
1858 VR128:$src1, (undef),
1859 PSHUFD_shuffle_mask:$src2)))]>;
1860 def PSHUFDmi : PDIi8<0x70, MRMSrcMem,
1861 (outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
1862 "pshufd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1863 [(set VR128:$dst, (v4i32 (vector_shuffle
1864 (bc_v4i32(memopv2i64 addr:$src1)),
1866 PSHUFD_shuffle_mask:$src2)))]>;
1868 // SSE2 with ImmT == Imm8 and XS prefix.
1869 def PSHUFHWri : Ii8<0x70, MRMSrcReg,
1870 (outs VR128:$dst), (ins VR128:$src1, i8imm:$src2),
1871 "pshufhw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1872 [(set VR128:$dst, (v8i16 (vector_shuffle
1873 VR128:$src1, (undef),
1874 PSHUFHW_shuffle_mask:$src2)))]>,
1875 XS, Requires<[HasSSE2]>;
1876 def PSHUFHWmi : Ii8<0x70, MRMSrcMem,
1877 (outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
1878 "pshufhw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1879 [(set VR128:$dst, (v8i16 (vector_shuffle
1880 (bc_v8i16 (memopv2i64 addr:$src1)),
1882 PSHUFHW_shuffle_mask:$src2)))]>,
1883 XS, Requires<[HasSSE2]>;
1885 // SSE2 with ImmT == Imm8 and XD prefix.
1886 def PSHUFLWri : Ii8<0x70, MRMSrcReg,
1887 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
1888 "pshuflw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1889 [(set VR128:$dst, (v8i16 (vector_shuffle
1890 VR128:$src1, (undef),
1891 PSHUFLW_shuffle_mask:$src2)))]>,
1892 XD, Requires<[HasSSE2]>;
1893 def PSHUFLWmi : Ii8<0x70, MRMSrcMem,
1894 (outs VR128:$dst), (ins i128mem:$src1, i32i8imm:$src2),
1895 "pshuflw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1896 [(set VR128:$dst, (v8i16 (vector_shuffle
1897 (bc_v8i16 (memopv2i64 addr:$src1)),
1899 PSHUFLW_shuffle_mask:$src2)))]>,
1900 XD, Requires<[HasSSE2]>;
1903 let isTwoAddress = 1 in {
1904 def PUNPCKLBWrr : PDI<0x60, MRMSrcReg,
1905 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1906 "punpcklbw\t{$src2, $dst|$dst, $src2}",
1908 (v16i8 (vector_shuffle VR128:$src1, VR128:$src2,
1909 UNPCKL_shuffle_mask)))]>;
1910 def PUNPCKLBWrm : PDI<0x60, MRMSrcMem,
1911 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
1912 "punpcklbw\t{$src2, $dst|$dst, $src2}",
1914 (v16i8 (vector_shuffle VR128:$src1,
1915 (bc_v16i8 (memopv2i64 addr:$src2)),
1916 UNPCKL_shuffle_mask)))]>;
1917 def PUNPCKLWDrr : PDI<0x61, MRMSrcReg,
1918 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1919 "punpcklwd\t{$src2, $dst|$dst, $src2}",
1921 (v8i16 (vector_shuffle VR128:$src1, VR128:$src2,
1922 UNPCKL_shuffle_mask)))]>;
1923 def PUNPCKLWDrm : PDI<0x61, MRMSrcMem,
1924 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
1925 "punpcklwd\t{$src2, $dst|$dst, $src2}",
1927 (v8i16 (vector_shuffle VR128:$src1,
1928 (bc_v8i16 (memopv2i64 addr:$src2)),
1929 UNPCKL_shuffle_mask)))]>;
1930 def PUNPCKLDQrr : PDI<0x62, MRMSrcReg,
1931 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1932 "punpckldq\t{$src2, $dst|$dst, $src2}",
1934 (v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
1935 UNPCKL_shuffle_mask)))]>;
1936 def PUNPCKLDQrm : PDI<0x62, MRMSrcMem,
1937 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
1938 "punpckldq\t{$src2, $dst|$dst, $src2}",
1940 (v4i32 (vector_shuffle VR128:$src1,
1941 (bc_v4i32 (memopv2i64 addr:$src2)),
1942 UNPCKL_shuffle_mask)))]>;
1943 def PUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
1944 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1945 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
1947 (v2i64 (vector_shuffle VR128:$src1, VR128:$src2,
1948 UNPCKL_shuffle_mask)))]>;
1949 def PUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
1950 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
1951 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
1953 (v2i64 (vector_shuffle VR128:$src1,
1954 (memopv2i64 addr:$src2),
1955 UNPCKL_shuffle_mask)))]>;
1957 def PUNPCKHBWrr : PDI<0x68, MRMSrcReg,
1958 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1959 "punpckhbw\t{$src2, $dst|$dst, $src2}",
1961 (v16i8 (vector_shuffle VR128:$src1, VR128:$src2,
1962 UNPCKH_shuffle_mask)))]>;
1963 def PUNPCKHBWrm : PDI<0x68, MRMSrcMem,
1964 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
1965 "punpckhbw\t{$src2, $dst|$dst, $src2}",
1967 (v16i8 (vector_shuffle VR128:$src1,
1968 (bc_v16i8 (memopv2i64 addr:$src2)),
1969 UNPCKH_shuffle_mask)))]>;
1970 def PUNPCKHWDrr : PDI<0x69, MRMSrcReg,
1971 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1972 "punpckhwd\t{$src2, $dst|$dst, $src2}",
1974 (v8i16 (vector_shuffle VR128:$src1, VR128:$src2,
1975 UNPCKH_shuffle_mask)))]>;
1976 def PUNPCKHWDrm : PDI<0x69, MRMSrcMem,
1977 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
1978 "punpckhwd\t{$src2, $dst|$dst, $src2}",
1980 (v8i16 (vector_shuffle VR128:$src1,
1981 (bc_v8i16 (memopv2i64 addr:$src2)),
1982 UNPCKH_shuffle_mask)))]>;
1983 def PUNPCKHDQrr : PDI<0x6A, MRMSrcReg,
1984 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1985 "punpckhdq\t{$src2, $dst|$dst, $src2}",
1987 (v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
1988 UNPCKH_shuffle_mask)))]>;
1989 def PUNPCKHDQrm : PDI<0x6A, MRMSrcMem,
1990 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
1991 "punpckhdq\t{$src2, $dst|$dst, $src2}",
1993 (v4i32 (vector_shuffle VR128:$src1,
1994 (bc_v4i32 (memopv2i64 addr:$src2)),
1995 UNPCKH_shuffle_mask)))]>;
1996 def PUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
1997 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1998 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
2000 (v2i64 (vector_shuffle VR128:$src1, VR128:$src2,
2001 UNPCKH_shuffle_mask)))]>;
2002 def PUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
2003 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2004 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
2006 (v2i64 (vector_shuffle VR128:$src1,
2007 (memopv2i64 addr:$src2),
2008 UNPCKH_shuffle_mask)))]>;
2012 def PEXTRWri : PDIi8<0xC5, MRMSrcReg,
2013 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
2014 "pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2015 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
2016 (iPTR imm:$src2)))]>;
2017 let isTwoAddress = 1 in {
2018 def PINSRWrri : PDIi8<0xC4, MRMSrcReg,
2019 (outs VR128:$dst), (ins VR128:$src1,
2020 GR32:$src2, i32i8imm:$src3),
2021 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2023 (v8i16 (X86pinsrw (v8i16 VR128:$src1),
2024 GR32:$src2, (iPTR imm:$src3))))]>;
2025 def PINSRWrmi : PDIi8<0xC4, MRMSrcMem,
2026 (outs VR128:$dst), (ins VR128:$src1,
2027 i16mem:$src2, i32i8imm:$src3),
2028 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2030 (v8i16 (X86pinsrw (v8i16 VR128:$src1),
2031 (i32 (anyext (loadi16 addr:$src2))),
2032 (iPTR imm:$src3))))]>;
2036 def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
2037 "pmovmskb\t{$src, $dst|$dst, $src}",
2038 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>;
2040 // Conditional store
2042 def MASKMOVDQU : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
2043 "maskmovdqu\t{$mask, $src|$src, $mask}",
2044 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>;
2046 // Non-temporal stores
2047 def MOVNTPDmr : PDI<0x2B, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2048 "movntpd\t{$src, $dst|$dst, $src}",
2049 [(int_x86_sse2_movnt_pd addr:$dst, VR128:$src)]>;
2050 def MOVNTDQmr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2051 "movntdq\t{$src, $dst|$dst, $src}",
2052 [(int_x86_sse2_movnt_dq addr:$dst, VR128:$src)]>;
2053 def MOVNTImr : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
2054 "movnti\t{$src, $dst|$dst, $src}",
2055 [(int_x86_sse2_movnt_i addr:$dst, GR32:$src)]>,
2056 TB, Requires<[HasSSE2]>;
2059 def CLFLUSH : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
2060 "clflush\t$src", [(int_x86_sse2_clflush addr:$src)]>,
2061 TB, Requires<[HasSSE2]>;
2063 // Load, store, and memory fence
2064 def LFENCE : I<0xAE, MRM5m, (outs), (ins),
2065 "lfence", [(int_x86_sse2_lfence)]>, TB, Requires<[HasSSE2]>;
2066 def MFENCE : I<0xAE, MRM6m, (outs), (ins),
2067 "mfence", [(int_x86_sse2_mfence)]>, TB, Requires<[HasSSE2]>;
2070 // Alias instructions that map zero vector to pxor / xorp* for sse.
2071 // FIXME: remove when we can teach regalloc that xor reg, reg is ok.
2072 let isReMaterializable = 1 in
2073 def V_SETALLONES : PDI<0x76, MRMInitReg, (outs VR128:$dst), (ins),
2074 "pcmpeqd\t$dst, $dst",
2075 [(set VR128:$dst, (v2f64 immAllOnesV))]>;
2077 // FR64 to 128-bit vector conversion.
2078 def MOVSD2PDrr : SDI<0x10, MRMSrcReg, (outs VR128:$dst), (ins FR64:$src),
2079 "movsd\t{$src, $dst|$dst, $src}",
2081 (v2f64 (scalar_to_vector FR64:$src)))]>;
2082 def MOVSD2PDrm : SDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
2083 "movsd\t{$src, $dst|$dst, $src}",
2085 (v2f64 (scalar_to_vector (loadf64 addr:$src))))]>;
2087 def MOVDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
2088 "movd\t{$src, $dst|$dst, $src}",
2090 (v4i32 (scalar_to_vector GR32:$src)))]>;
2091 def MOVDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
2092 "movd\t{$src, $dst|$dst, $src}",
2094 (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>;
2096 def MOVDI2SSrr : PDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
2097 "movd\t{$src, $dst|$dst, $src}",
2098 [(set FR32:$dst, (bitconvert GR32:$src))]>;
2100 def MOVDI2SSrm : PDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
2101 "movd\t{$src, $dst|$dst, $src}",
2102 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>;
2104 // SSE2 instructions with XS prefix
2105 def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
2106 "movq\t{$src, $dst|$dst, $src}",
2108 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
2109 Requires<[HasSSE2]>;
2110 def MOVPQI2QImr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
2111 "movq\t{$src, $dst|$dst, $src}",
2112 [(store (i64 (vector_extract (v2i64 VR128:$src),
2113 (iPTR 0))), addr:$dst)]>;
2115 // FIXME: may not be able to eliminate this movss with coalescing the src and
2116 // dest register classes are different. We really want to write this pattern
2118 // def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
2119 // (f32 FR32:$src)>;
2120 def MOVPD2SDrr : SDI<0x10, MRMSrcReg, (outs FR64:$dst), (ins VR128:$src),
2121 "movsd\t{$src, $dst|$dst, $src}",
2122 [(set FR64:$dst, (vector_extract (v2f64 VR128:$src),
2124 def MOVPD2SDmr : SDI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
2125 "movsd\t{$src, $dst|$dst, $src}",
2126 [(store (f64 (vector_extract (v2f64 VR128:$src),
2127 (iPTR 0))), addr:$dst)]>;
2128 def MOVPDI2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
2129 "movd\t{$src, $dst|$dst, $src}",
2130 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
2132 def MOVPDI2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src),
2133 "movd\t{$src, $dst|$dst, $src}",
2134 [(store (i32 (vector_extract (v4i32 VR128:$src),
2135 (iPTR 0))), addr:$dst)]>;
2137 def MOVSS2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
2138 "movd\t{$src, $dst|$dst, $src}",
2139 [(set GR32:$dst, (bitconvert FR32:$src))]>;
2140 def MOVSS2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
2141 "movd\t{$src, $dst|$dst, $src}",
2142 [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>;
2145 // Move to lower bits of a VR128, leaving upper bits alone.
2146 // Three operand (but two address) aliases.
2147 let isTwoAddress = 1 in {
2148 def MOVLSD2PDrr : SDI<0x10, MRMSrcReg,
2149 (outs VR128:$dst), (ins VR128:$src1, FR64:$src2),
2150 "movsd\t{$src2, $dst|$dst, $src2}", []>;
2152 let AddedComplexity = 15 in
2153 def MOVLPDrr : SDI<0x10, MRMSrcReg,
2154 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2155 "movsd\t{$src2, $dst|$dst, $src2}",
2157 (v2f64 (vector_shuffle VR128:$src1, VR128:$src2,
2158 MOVL_shuffle_mask)))]>;
2161 // Store / copy lower 64-bits of a XMM register.
2162 def MOVLQ128mr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
2163 "movq\t{$src, $dst|$dst, $src}",
2164 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>;
2166 // Move to lower bits of a VR128 and zeroing upper bits.
2167 // Loading from memory automatically zeroing upper bits.
2168 let AddedComplexity = 20 in
2169 def MOVZSD2PDrm : SDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
2170 "movsd\t{$src, $dst|$dst, $src}",
2172 (v2f64 (vector_shuffle immAllZerosV,
2173 (v2f64 (scalar_to_vector
2174 (loadf64 addr:$src))),
2175 MOVL_shuffle_mask)))]>;
2177 let AddedComplexity = 15 in
2178 // movd / movq to XMM register zero-extends
2179 def MOVZDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
2180 "movd\t{$src, $dst|$dst, $src}",
2182 (v4i32 (vector_shuffle immAllZerosV,
2183 (v4i32 (scalar_to_vector GR32:$src)),
2184 MOVL_shuffle_mask)))]>;
2185 let AddedComplexity = 20 in
2186 def MOVZDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
2187 "movd\t{$src, $dst|$dst, $src}",
2189 (v4i32 (vector_shuffle immAllZerosV,
2190 (v4i32 (scalar_to_vector (loadi32 addr:$src))),
2191 MOVL_shuffle_mask)))]>;
2193 // Moving from XMM to XMM but still clear upper 64 bits.
2194 let AddedComplexity = 15 in
2195 def MOVZQI2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2196 "movq\t{$src, $dst|$dst, $src}",
2197 [(set VR128:$dst, (int_x86_sse2_movl_dq VR128:$src))]>,
2198 XS, Requires<[HasSSE2]>;
2199 let AddedComplexity = 20 in
2200 def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
2201 "movq\t{$src, $dst|$dst, $src}",
2202 [(set VR128:$dst, (int_x86_sse2_movl_dq
2203 (bitconvert (memopv2i64 addr:$src))))]>,
2204 XS, Requires<[HasSSE2]>;
2207 //===----------------------------------------------------------------------===//
2208 // SSE3 Instructions
2209 //===----------------------------------------------------------------------===//
2211 // Move Instructions
2212 def MOVSHDUPrr : S3SI<0x16, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2213 "movshdup\t{$src, $dst|$dst, $src}",
2214 [(set VR128:$dst, (v4f32 (vector_shuffle
2215 VR128:$src, (undef),
2216 MOVSHDUP_shuffle_mask)))]>;
2217 def MOVSHDUPrm : S3SI<0x16, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2218 "movshdup\t{$src, $dst|$dst, $src}",
2219 [(set VR128:$dst, (v4f32 (vector_shuffle
2220 (memopv4f32 addr:$src), (undef),
2221 MOVSHDUP_shuffle_mask)))]>;
2223 def MOVSLDUPrr : S3SI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2224 "movsldup\t{$src, $dst|$dst, $src}",
2225 [(set VR128:$dst, (v4f32 (vector_shuffle
2226 VR128:$src, (undef),
2227 MOVSLDUP_shuffle_mask)))]>;
2228 def MOVSLDUPrm : S3SI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2229 "movsldup\t{$src, $dst|$dst, $src}",
2230 [(set VR128:$dst, (v4f32 (vector_shuffle
2231 (memopv4f32 addr:$src), (undef),
2232 MOVSLDUP_shuffle_mask)))]>;
2234 def MOVDDUPrr : S3DI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2235 "movddup\t{$src, $dst|$dst, $src}",
2236 [(set VR128:$dst, (v2f64 (vector_shuffle
2237 VR128:$src, (undef),
2238 SSE_splat_lo_mask)))]>;
2239 def MOVDDUPrm : S3DI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
2240 "movddup\t{$src, $dst|$dst, $src}",
2242 (v2f64 (vector_shuffle
2243 (scalar_to_vector (loadf64 addr:$src)),
2245 SSE_splat_lo_mask)))]>;
2248 let isTwoAddress = 1 in {
2249 def ADDSUBPSrr : S3DI<0xD0, MRMSrcReg,
2250 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2251 "addsubps\t{$src2, $dst|$dst, $src2}",
2252 [(set VR128:$dst, (int_x86_sse3_addsub_ps VR128:$src1,
2254 def ADDSUBPSrm : S3DI<0xD0, MRMSrcMem,
2255 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
2256 "addsubps\t{$src2, $dst|$dst, $src2}",
2257 [(set VR128:$dst, (int_x86_sse3_addsub_ps VR128:$src1,
2258 (load addr:$src2)))]>;
2259 def ADDSUBPDrr : S3I<0xD0, MRMSrcReg,
2260 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2261 "addsubpd\t{$src2, $dst|$dst, $src2}",
2262 [(set VR128:$dst, (int_x86_sse3_addsub_pd VR128:$src1,
2264 def ADDSUBPDrm : S3I<0xD0, MRMSrcMem,
2265 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
2266 "addsubpd\t{$src2, $dst|$dst, $src2}",
2267 [(set VR128:$dst, (int_x86_sse3_addsub_pd VR128:$src1,
2268 (load addr:$src2)))]>;
2271 def LDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2272 "lddqu\t{$src, $dst|$dst, $src}",
2273 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>;
2276 class S3D_Intrr<bits<8> o, string OpcodeStr, Intrinsic IntId>
2277 : S3DI<o, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2278 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2279 [(set VR128:$dst, (v4f32 (IntId VR128:$src1, VR128:$src2)))]>;
2280 class S3D_Intrm<bits<8> o, string OpcodeStr, Intrinsic IntId>
2281 : S3DI<o, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
2282 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2283 [(set VR128:$dst, (v4f32 (IntId VR128:$src1, (load addr:$src2))))]>;
2284 class S3_Intrr<bits<8> o, string OpcodeStr, Intrinsic IntId>
2285 : S3I<o, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2286 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2287 [(set VR128:$dst, (v2f64 (IntId VR128:$src1, VR128:$src2)))]>;
2288 class S3_Intrm<bits<8> o, string OpcodeStr, Intrinsic IntId>
2289 : S3I<o, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
2290 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2291 [(set VR128:$dst, (v2f64 (IntId VR128:$src1, (load addr:$src2))))]>;
2293 let isTwoAddress = 1 in {
2294 def HADDPSrr : S3D_Intrr<0x7C, "haddps", int_x86_sse3_hadd_ps>;
2295 def HADDPSrm : S3D_Intrm<0x7C, "haddps", int_x86_sse3_hadd_ps>;
2296 def HADDPDrr : S3_Intrr <0x7C, "haddpd", int_x86_sse3_hadd_pd>;
2297 def HADDPDrm : S3_Intrm <0x7C, "haddpd", int_x86_sse3_hadd_pd>;
2298 def HSUBPSrr : S3D_Intrr<0x7D, "hsubps", int_x86_sse3_hsub_ps>;
2299 def HSUBPSrm : S3D_Intrm<0x7D, "hsubps", int_x86_sse3_hsub_ps>;
2300 def HSUBPDrr : S3_Intrr <0x7D, "hsubpd", int_x86_sse3_hsub_pd>;
2301 def HSUBPDrm : S3_Intrm <0x7D, "hsubpd", int_x86_sse3_hsub_pd>;
2304 // Thread synchronization
2305 def MONITOR : I<0xC8, RawFrm, (outs), (ins), "monitor",
2306 [(int_x86_sse3_monitor EAX, ECX, EDX)]>,TB, Requires<[HasSSE3]>;
2307 def MWAIT : I<0xC9, RawFrm, (outs), (ins), "mwait",
2308 [(int_x86_sse3_mwait ECX, EAX)]>, TB, Requires<[HasSSE3]>;
2310 // vector_shuffle v1, <undef> <1, 1, 3, 3>
2311 let AddedComplexity = 15 in
2312 def : Pat<(v4i32 (vector_shuffle VR128:$src, (undef),
2313 MOVSHDUP_shuffle_mask)),
2314 (MOVSHDUPrr VR128:$src)>, Requires<[HasSSE3]>;
2315 let AddedComplexity = 20 in
2316 def : Pat<(v4i32 (vector_shuffle (bc_v4i32 (memopv2i64 addr:$src)), (undef),
2317 MOVSHDUP_shuffle_mask)),
2318 (MOVSHDUPrm addr:$src)>, Requires<[HasSSE3]>;
2320 // vector_shuffle v1, <undef> <0, 0, 2, 2>
2321 let AddedComplexity = 15 in
2322 def : Pat<(v4i32 (vector_shuffle VR128:$src, (undef),
2323 MOVSLDUP_shuffle_mask)),
2324 (MOVSLDUPrr VR128:$src)>, Requires<[HasSSE3]>;
2325 let AddedComplexity = 20 in
2326 def : Pat<(v4i32 (vector_shuffle (bc_v4i32 (memopv2i64 addr:$src)), (undef),
2327 MOVSLDUP_shuffle_mask)),
2328 (MOVSLDUPrm addr:$src)>, Requires<[HasSSE3]>;
2330 //===----------------------------------------------------------------------===//
2331 // SSSE3 Instructions
2332 //===----------------------------------------------------------------------===//
2334 // SSSE3 Instruction Templates:
2336 // SS38I - SSSE3 instructions with T8 prefix.
2337 // SS3AI - SSSE3 instructions with TA prefix.
2339 // Note: SSSE3 instructions have 64-bit and 128-bit versions. The 64-bit version
2340 // uses the MMX registers. We put those instructions here because they better
2341 // fit into the SSSE3 instruction category rather than the MMX category.
2343 class SS38I<bits<8> o, Format F, dag outs, dag ins, string asm,
2345 : I<o, F, outs, ins, asm, pattern>, T8, Requires<[HasSSSE3]>;
2346 class SS3AI<bits<8> o, Format F, dag outs, dag ins, string asm,
2348 : I<o, F, outs, ins, asm, pattern>, TA, Requires<[HasSSSE3]>;
2350 /// SS3I_unop_rm_int_8 - Simple SSSE3 unary operator whose type is v*i8.
2351 let isTwoAddress = 1 in {
2352 multiclass SS3I_unop_rm_int_8<bits<8> opc, string OpcodeStr,
2353 Intrinsic IntId64, Intrinsic IntId128,
2354 bit Commutable = 0> {
2355 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst), (ins VR64:$src),
2356 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2357 [(set VR64:$dst, (IntId64 VR64:$src))]> {
2358 let isCommutable = Commutable;
2360 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst), (ins i64mem:$src),
2361 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2363 (IntId64 (bitconvert (memopv8i8 addr:$src))))]>;
2365 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
2367 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2368 [(set VR128:$dst, (IntId128 VR128:$src))]>,
2370 let isCommutable = Commutable;
2372 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
2374 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2377 (bitconvert (memopv16i8 addr:$src))))]>, OpSize;
2381 /// SS3I_unop_rm_int_16 - Simple SSSE3 unary operator whose type is v*i16.
2382 let isTwoAddress = 1 in {
2383 multiclass SS3I_unop_rm_int_16<bits<8> opc, string OpcodeStr,
2384 Intrinsic IntId64, Intrinsic IntId128,
2385 bit Commutable = 0> {
2386 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
2388 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2389 [(set VR64:$dst, (IntId64 VR64:$src))]> {
2390 let isCommutable = Commutable;
2392 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
2394 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2397 (bitconvert (memopv4i16 addr:$src))))]>;
2399 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
2401 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2402 [(set VR128:$dst, (IntId128 VR128:$src))]>,
2404 let isCommutable = Commutable;
2406 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
2408 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2411 (bitconvert (memopv8i16 addr:$src))))]>, OpSize;
2415 /// SS3I_unop_rm_int_32 - Simple SSSE3 unary operator whose type is v*i32.
2416 let isTwoAddress = 1 in {
2417 multiclass SS3I_unop_rm_int_32<bits<8> opc, string OpcodeStr,
2418 Intrinsic IntId64, Intrinsic IntId128,
2419 bit Commutable = 0> {
2420 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
2422 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2423 [(set VR64:$dst, (IntId64 VR64:$src))]> {
2424 let isCommutable = Commutable;
2426 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
2428 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2431 (bitconvert (memopv2i32 addr:$src))))]>;
2433 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
2435 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2436 [(set VR128:$dst, (IntId128 VR128:$src))]>,
2438 let isCommutable = Commutable;
2440 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
2442 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2445 (bitconvert (memopv4i32 addr:$src))))]>, OpSize;
2449 defm PABSB : SS3I_unop_rm_int_8 <0x1C, "pabsb",
2450 int_x86_ssse3_pabs_b,
2451 int_x86_ssse3_pabs_b_128>;
2452 defm PABSW : SS3I_unop_rm_int_16<0x1D, "pabsw",
2453 int_x86_ssse3_pabs_w,
2454 int_x86_ssse3_pabs_w_128>;
2455 defm PABSD : SS3I_unop_rm_int_32<0x1E, "pabsd",
2456 int_x86_ssse3_pabs_d,
2457 int_x86_ssse3_pabs_d_128>;
2459 /// SS3I_binop_rm_int_8 - Simple SSSE3 binary operator whose type is v*i8.
2460 let isTwoAddress = 1 in {
2461 multiclass SS3I_binop_rm_int_8<bits<8> opc, string OpcodeStr,
2462 Intrinsic IntId64, Intrinsic IntId128,
2463 bit Commutable = 0> {
2464 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
2465 (ins VR64:$src1, VR64:$src2),
2466 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2467 [(set VR64:$dst, (IntId64 VR64:$src1, VR64:$src2))]> {
2468 let isCommutable = Commutable;
2470 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
2471 (ins VR64:$src1, i64mem:$src2),
2472 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2474 (IntId64 VR64:$src1,
2475 (bitconvert (memopv8i8 addr:$src2))))]>;
2477 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
2478 (ins VR128:$src1, VR128:$src2),
2479 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2480 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
2482 let isCommutable = Commutable;
2484 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
2485 (ins VR128:$src1, i128mem:$src2),
2486 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2488 (IntId128 VR128:$src1,
2489 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
2493 /// SS3I_binop_rm_int_16 - Simple SSSE3 binary operator whose type is v*i16.
2494 let isTwoAddress = 1 in {
2495 multiclass SS3I_binop_rm_int_16<bits<8> opc, string OpcodeStr,
2496 Intrinsic IntId64, Intrinsic IntId128,
2497 bit Commutable = 0> {
2498 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
2499 (ins VR64:$src1, VR64:$src2),
2500 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2501 [(set VR64:$dst, (IntId64 VR64:$src1, VR64:$src2))]> {
2502 let isCommutable = Commutable;
2504 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
2505 (ins VR64:$src1, i64mem:$src2),
2506 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2508 (IntId64 VR64:$src1,
2509 (bitconvert (memopv4i16 addr:$src2))))]>;
2511 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
2512 (ins VR128:$src1, VR128:$src2),
2513 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2514 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
2516 let isCommutable = Commutable;
2518 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
2519 (ins VR128:$src1, i128mem:$src2),
2520 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2522 (IntId128 VR128:$src1,
2523 (bitconvert (memopv8i16 addr:$src2))))]>, OpSize;
2527 /// SS3I_binop_rm_int_32 - Simple SSSE3 binary operator whose type is v*i32.
2528 let isTwoAddress = 1 in {
2529 multiclass SS3I_binop_rm_int_32<bits<8> opc, string OpcodeStr,
2530 Intrinsic IntId64, Intrinsic IntId128,
2531 bit Commutable = 0> {
2532 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
2533 (ins VR64:$src1, VR64:$src2),
2534 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2535 [(set VR64:$dst, (IntId64 VR64:$src1, VR64:$src2))]> {
2536 let isCommutable = Commutable;
2538 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
2539 (ins VR64:$src1, i64mem:$src2),
2540 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2542 (IntId64 VR64:$src1,
2543 (bitconvert (memopv2i32 addr:$src2))))]>;
2545 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
2546 (ins VR128:$src1, VR128:$src2),
2547 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2548 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
2550 let isCommutable = Commutable;
2552 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
2553 (ins VR128:$src1, i128mem:$src2),
2554 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2556 (IntId128 VR128:$src1,
2557 (bitconvert (memopv4i32 addr:$src2))))]>, OpSize;
2561 defm PHADDW : SS3I_binop_rm_int_16<0x01, "phaddw",
2562 int_x86_ssse3_phadd_w,
2563 int_x86_ssse3_phadd_w_128, 1>;
2564 defm PHADDD : SS3I_binop_rm_int_32<0x02, "phaddd",
2565 int_x86_ssse3_phadd_d,
2566 int_x86_ssse3_phadd_d_128, 1>;
2567 defm PHADDSW : SS3I_binop_rm_int_16<0x03, "phaddsw",
2568 int_x86_ssse3_phadd_sw,
2569 int_x86_ssse3_phadd_sw_128, 1>;
2570 defm PHSUBW : SS3I_binop_rm_int_16<0x05, "phsubw",
2571 int_x86_ssse3_phsub_w,
2572 int_x86_ssse3_phsub_w_128>;
2573 defm PHSUBD : SS3I_binop_rm_int_32<0x06, "phsubd",
2574 int_x86_ssse3_phsub_d,
2575 int_x86_ssse3_phsub_d_128>;
2576 defm PHSUBSW : SS3I_binop_rm_int_16<0x07, "phsubsw",
2577 int_x86_ssse3_phsub_sw,
2578 int_x86_ssse3_phsub_sw_128>;
2579 defm PMADDUBSW : SS3I_binop_rm_int_8 <0x04, "pmaddubsw",
2580 int_x86_ssse3_pmadd_ub_sw,
2581 int_x86_ssse3_pmadd_ub_sw_128, 1>;
2582 defm PMULHRSW : SS3I_binop_rm_int_16<0x0B, "pmulhrsw",
2583 int_x86_ssse3_pmul_hr_sw,
2584 int_x86_ssse3_pmul_hr_sw_128, 1>;
2585 defm PSHUFB : SS3I_binop_rm_int_8 <0x00, "pshufb",
2586 int_x86_ssse3_pshuf_b,
2587 int_x86_ssse3_pshuf_b_128>;
2588 defm PSIGNB : SS3I_binop_rm_int_8 <0x08, "psignb",
2589 int_x86_ssse3_psign_b,
2590 int_x86_ssse3_psign_b_128>;
2591 defm PSIGNW : SS3I_binop_rm_int_16<0x09, "psignw",
2592 int_x86_ssse3_psign_w,
2593 int_x86_ssse3_psign_w_128>;
2594 defm PSIGND : SS3I_binop_rm_int_32<0x09, "psignd",
2595 int_x86_ssse3_psign_d,
2596 int_x86_ssse3_psign_d_128>;
2598 let isTwoAddress = 1 in {
2599 def PALIGNR64rr : SS3AI<0x0F, MRMSrcReg, (outs VR64:$dst),
2600 (ins VR64:$src1, VR64:$src2, i16imm:$src3),
2601 "palignr\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2603 (int_x86_ssse3_palign_r
2604 VR64:$src1, VR64:$src2,
2606 def PALIGNR64rm : SS3AI<0x0F, MRMSrcReg, (outs VR64:$dst),
2607 (ins VR64:$src1, i64mem:$src2, i16imm:$src3),
2608 "palignr\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2610 (int_x86_ssse3_palign_r
2612 (bitconvert (memopv2i32 addr:$src2)),
2615 def PALIGNR128rr : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
2616 (ins VR128:$src1, VR128:$src2, i32imm:$src3),
2617 "palignr\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2619 (int_x86_ssse3_palign_r_128
2620 VR128:$src1, VR128:$src2,
2621 imm:$src3))]>, OpSize;
2622 def PALIGNR128rm : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
2623 (ins VR128:$src1, i128mem:$src2, i32imm:$src3),
2624 "palignr\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2626 (int_x86_ssse3_palign_r_128
2628 (bitconvert (memopv4i32 addr:$src2)),
2629 imm:$src3))]>, OpSize;
2632 //===----------------------------------------------------------------------===//
2633 // Non-Instruction Patterns
2634 //===----------------------------------------------------------------------===//
2636 // 128-bit vector undef's.
2637 def : Pat<(v4f32 (undef)), (IMPLICIT_DEF_VR128)>, Requires<[HasSSE2]>;
2638 def : Pat<(v2f64 (undef)), (IMPLICIT_DEF_VR128)>, Requires<[HasSSE2]>;
2639 def : Pat<(v16i8 (undef)), (IMPLICIT_DEF_VR128)>, Requires<[HasSSE2]>;
2640 def : Pat<(v8i16 (undef)), (IMPLICIT_DEF_VR128)>, Requires<[HasSSE2]>;
2641 def : Pat<(v4i32 (undef)), (IMPLICIT_DEF_VR128)>, Requires<[HasSSE2]>;
2642 def : Pat<(v2i64 (undef)), (IMPLICIT_DEF_VR128)>, Requires<[HasSSE2]>;
2644 // 128-bit vector all zero's.
2645 def : Pat<(v16i8 immAllZerosV), (V_SET0)>, Requires<[HasSSE2]>;
2646 def : Pat<(v8i16 immAllZerosV), (V_SET0)>, Requires<[HasSSE2]>;
2647 def : Pat<(v4i32 immAllZerosV), (V_SET0)>, Requires<[HasSSE2]>;
2648 def : Pat<(v2i64 immAllZerosV), (V_SET0)>, Requires<[HasSSE2]>;
2649 def : Pat<(v2f64 immAllZerosV), (V_SET0)>, Requires<[HasSSE2]>;
2651 // 128-bit vector all one's.
2652 def : Pat<(v16i8 immAllOnesV), (V_SETALLONES)>, Requires<[HasSSE2]>;
2653 def : Pat<(v8i16 immAllOnesV), (V_SETALLONES)>, Requires<[HasSSE2]>;
2654 def : Pat<(v4i32 immAllOnesV), (V_SETALLONES)>, Requires<[HasSSE2]>;
2655 def : Pat<(v2i64 immAllOnesV), (V_SETALLONES)>, Requires<[HasSSE2]>;
2656 def : Pat<(v4f32 immAllOnesV), (V_SETALLONES)>, Requires<[HasSSE1]>;
2659 // Scalar to v8i16 / v16i8. The source may be a GR32, but only the lower 8 or
2661 def : Pat<(v8i16 (X86s2vec GR32:$src)), (MOVDI2PDIrr GR32:$src)>,
2662 Requires<[HasSSE2]>;
2663 def : Pat<(v16i8 (X86s2vec GR32:$src)), (MOVDI2PDIrr GR32:$src)>,
2664 Requires<[HasSSE2]>;
2667 let Predicates = [HasSSE2] in {
2668 def : Pat<(v2i64 (bitconvert (v4i32 VR128:$src))), (v2i64 VR128:$src)>;
2669 def : Pat<(v2i64 (bitconvert (v8i16 VR128:$src))), (v2i64 VR128:$src)>;
2670 def : Pat<(v2i64 (bitconvert (v16i8 VR128:$src))), (v2i64 VR128:$src)>;
2671 def : Pat<(v2i64 (bitconvert (v2f64 VR128:$src))), (v2i64 VR128:$src)>;
2672 def : Pat<(v2i64 (bitconvert (v4f32 VR128:$src))), (v2i64 VR128:$src)>;
2673 def : Pat<(v4i32 (bitconvert (v2i64 VR128:$src))), (v4i32 VR128:$src)>;
2674 def : Pat<(v4i32 (bitconvert (v8i16 VR128:$src))), (v4i32 VR128:$src)>;
2675 def : Pat<(v4i32 (bitconvert (v16i8 VR128:$src))), (v4i32 VR128:$src)>;
2676 def : Pat<(v4i32 (bitconvert (v2f64 VR128:$src))), (v4i32 VR128:$src)>;
2677 def : Pat<(v4i32 (bitconvert (v4f32 VR128:$src))), (v4i32 VR128:$src)>;
2678 def : Pat<(v8i16 (bitconvert (v2i64 VR128:$src))), (v8i16 VR128:$src)>;
2679 def : Pat<(v8i16 (bitconvert (v4i32 VR128:$src))), (v8i16 VR128:$src)>;
2680 def : Pat<(v8i16 (bitconvert (v16i8 VR128:$src))), (v8i16 VR128:$src)>;
2681 def : Pat<(v8i16 (bitconvert (v2f64 VR128:$src))), (v8i16 VR128:$src)>;
2682 def : Pat<(v8i16 (bitconvert (v4f32 VR128:$src))), (v8i16 VR128:$src)>;
2683 def : Pat<(v16i8 (bitconvert (v2i64 VR128:$src))), (v16i8 VR128:$src)>;
2684 def : Pat<(v16i8 (bitconvert (v4i32 VR128:$src))), (v16i8 VR128:$src)>;
2685 def : Pat<(v16i8 (bitconvert (v8i16 VR128:$src))), (v16i8 VR128:$src)>;
2686 def : Pat<(v16i8 (bitconvert (v2f64 VR128:$src))), (v16i8 VR128:$src)>;
2687 def : Pat<(v16i8 (bitconvert (v4f32 VR128:$src))), (v16i8 VR128:$src)>;
2688 def : Pat<(v4f32 (bitconvert (v2i64 VR128:$src))), (v4f32 VR128:$src)>;
2689 def : Pat<(v4f32 (bitconvert (v4i32 VR128:$src))), (v4f32 VR128:$src)>;
2690 def : Pat<(v4f32 (bitconvert (v8i16 VR128:$src))), (v4f32 VR128:$src)>;
2691 def : Pat<(v4f32 (bitconvert (v16i8 VR128:$src))), (v4f32 VR128:$src)>;
2692 def : Pat<(v4f32 (bitconvert (v2f64 VR128:$src))), (v4f32 VR128:$src)>;
2693 def : Pat<(v2f64 (bitconvert (v2i64 VR128:$src))), (v2f64 VR128:$src)>;
2694 def : Pat<(v2f64 (bitconvert (v4i32 VR128:$src))), (v2f64 VR128:$src)>;
2695 def : Pat<(v2f64 (bitconvert (v8i16 VR128:$src))), (v2f64 VR128:$src)>;
2696 def : Pat<(v2f64 (bitconvert (v16i8 VR128:$src))), (v2f64 VR128:$src)>;
2697 def : Pat<(v2f64 (bitconvert (v4f32 VR128:$src))), (v2f64 VR128:$src)>;
2700 // Move scalar to XMM zero-extended
2701 // movd to XMM register zero-extends
2702 let AddedComplexity = 15 in {
2703 def : Pat<(v8i16 (vector_shuffle immAllZerosV,
2704 (v8i16 (X86s2vec GR32:$src)), MOVL_shuffle_mask)),
2705 (MOVZDI2PDIrr GR32:$src)>, Requires<[HasSSE2]>;
2706 def : Pat<(v16i8 (vector_shuffle immAllZerosV,
2707 (v16i8 (X86s2vec GR32:$src)), MOVL_shuffle_mask)),
2708 (MOVZDI2PDIrr GR32:$src)>, Requires<[HasSSE2]>;
2709 // Zeroing a VR128 then do a MOVS{S|D} to the lower bits.
2710 def : Pat<(v2f64 (vector_shuffle immAllZerosV,
2711 (v2f64 (scalar_to_vector FR64:$src)), MOVL_shuffle_mask)),
2712 (MOVLSD2PDrr (V_SET0), FR64:$src)>, Requires<[HasSSE2]>;
2713 def : Pat<(v4f32 (vector_shuffle immAllZerosV,
2714 (v4f32 (scalar_to_vector FR32:$src)), MOVL_shuffle_mask)),
2715 (MOVLSS2PSrr (V_SET0), FR32:$src)>, Requires<[HasSSE2]>;
2718 // Splat v2f64 / v2i64
2719 let AddedComplexity = 10 in {
2720 def : Pat<(vector_shuffle (v2f64 VR128:$src), (undef), SSE_splat_lo_mask:$sm),
2721 (UNPCKLPDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2722 def : Pat<(vector_shuffle (v2f64 VR128:$src), (undef), UNPCKH_shuffle_mask:$sm),
2723 (UNPCKHPDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2724 def : Pat<(vector_shuffle (v2i64 VR128:$src), (undef), SSE_splat_lo_mask:$sm),
2725 (PUNPCKLQDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2726 def : Pat<(vector_shuffle (v2i64 VR128:$src), (undef), UNPCKH_shuffle_mask:$sm),
2727 (PUNPCKHQDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2731 def : Pat<(vector_shuffle (v4f32 VR128:$src), (undef), SSE_splat_mask:$sm),
2732 (SHUFPSrri VR128:$src, VR128:$src, SSE_splat_mask:$sm)>,
2733 Requires<[HasSSE1]>;
2735 // Special unary SHUFPSrri case.
2736 // FIXME: when we want non two-address code, then we should use PSHUFD?
2737 def : Pat<(vector_shuffle (v4f32 VR128:$src1), (undef),
2738 SHUFP_unary_shuffle_mask:$sm),
2739 (SHUFPSrri VR128:$src1, VR128:$src1, SHUFP_unary_shuffle_mask:$sm)>,
2740 Requires<[HasSSE1]>;
2741 // Special unary SHUFPDrri case.
2742 def : Pat<(vector_shuffle (v2f64 VR128:$src1), (undef),
2743 SHUFP_unary_shuffle_mask:$sm),
2744 (SHUFPDrri VR128:$src1, VR128:$src1, SHUFP_unary_shuffle_mask:$sm)>,
2745 Requires<[HasSSE2]>;
2746 // Unary v4f32 shuffle with PSHUF* in order to fold a load.
2747 def : Pat<(vector_shuffle (memopv4f32 addr:$src1), (undef),
2748 SHUFP_unary_shuffle_mask:$sm),
2749 (PSHUFDmi addr:$src1, SHUFP_unary_shuffle_mask:$sm)>,
2750 Requires<[HasSSE2]>;
2751 // Special binary v4i32 shuffle cases with SHUFPS.
2752 def : Pat<(vector_shuffle (v4i32 VR128:$src1), (v4i32 VR128:$src2),
2753 PSHUFD_binary_shuffle_mask:$sm),
2754 (SHUFPSrri VR128:$src1, VR128:$src2, PSHUFD_binary_shuffle_mask:$sm)>,
2755 Requires<[HasSSE2]>;
2756 def : Pat<(vector_shuffle (v4i32 VR128:$src1),
2757 (bc_v4i32 (memopv2i64 addr:$src2)), PSHUFD_binary_shuffle_mask:$sm),
2758 (SHUFPSrmi VR128:$src1, addr:$src2, PSHUFD_binary_shuffle_mask:$sm)>,
2759 Requires<[HasSSE2]>;
2761 // vector_shuffle v1, <undef>, <0, 0, 1, 1, ...>
2762 let AddedComplexity = 10 in {
2763 def : Pat<(v4f32 (vector_shuffle VR128:$src, (undef),
2764 UNPCKL_v_undef_shuffle_mask)),
2765 (UNPCKLPSrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2766 def : Pat<(v16i8 (vector_shuffle VR128:$src, (undef),
2767 UNPCKL_v_undef_shuffle_mask)),
2768 (PUNPCKLBWrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2769 def : Pat<(v8i16 (vector_shuffle VR128:$src, (undef),
2770 UNPCKL_v_undef_shuffle_mask)),
2771 (PUNPCKLWDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2772 def : Pat<(v4i32 (vector_shuffle VR128:$src, (undef),
2773 UNPCKL_v_undef_shuffle_mask)),
2774 (PUNPCKLDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE1]>;
2777 // vector_shuffle v1, <undef>, <2, 2, 3, 3, ...>
2778 let AddedComplexity = 10 in {
2779 def : Pat<(v4f32 (vector_shuffle VR128:$src, (undef),
2780 UNPCKH_v_undef_shuffle_mask)),
2781 (UNPCKHPSrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2782 def : Pat<(v16i8 (vector_shuffle VR128:$src, (undef),
2783 UNPCKH_v_undef_shuffle_mask)),
2784 (PUNPCKHBWrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2785 def : Pat<(v8i16 (vector_shuffle VR128:$src, (undef),
2786 UNPCKH_v_undef_shuffle_mask)),
2787 (PUNPCKHWDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2788 def : Pat<(v4i32 (vector_shuffle VR128:$src, (undef),
2789 UNPCKH_v_undef_shuffle_mask)),
2790 (PUNPCKHDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE1]>;
2793 let AddedComplexity = 15 in {
2794 // vector_shuffle v1, v2 <0, 1, 4, 5> using MOVLHPS
2795 def : Pat<(v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
2796 MOVHP_shuffle_mask)),
2797 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
2799 // vector_shuffle v1, v2 <6, 7, 2, 3> using MOVHLPS
2800 def : Pat<(v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
2801 MOVHLPS_shuffle_mask)),
2802 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
2804 // vector_shuffle v1, undef <2, ?, ?, ?> using MOVHLPS
2805 def : Pat<(v4f32 (vector_shuffle VR128:$src1, (undef),
2806 MOVHLPS_v_undef_shuffle_mask)),
2807 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
2808 def : Pat<(v4i32 (vector_shuffle VR128:$src1, (undef),
2809 MOVHLPS_v_undef_shuffle_mask)),
2810 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
2813 let AddedComplexity = 20 in {
2814 // vector_shuffle v1, (load v2) <4, 5, 2, 3> using MOVLPS
2815 // vector_shuffle v1, (load v2) <0, 1, 4, 5> using MOVHPS
2816 def : Pat<(v4f32 (vector_shuffle VR128:$src1, (memopv4f32 addr:$src2),
2817 MOVLP_shuffle_mask)),
2818 (MOVLPSrm VR128:$src1, addr:$src2)>, Requires<[HasSSE1]>;
2819 def : Pat<(v2f64 (vector_shuffle VR128:$src1, (memopv2f64 addr:$src2),
2820 MOVLP_shuffle_mask)),
2821 (MOVLPDrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
2822 def : Pat<(v4f32 (vector_shuffle VR128:$src1, (memopv4f32 addr:$src2),
2823 MOVHP_shuffle_mask)),
2824 (MOVHPSrm VR128:$src1, addr:$src2)>, Requires<[HasSSE1]>;
2825 def : Pat<(v2f64 (vector_shuffle VR128:$src1, (memopv2f64 addr:$src2),
2826 MOVHP_shuffle_mask)),
2827 (MOVHPDrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
2829 def : Pat<(v4i32 (vector_shuffle VR128:$src1, (bc_v4i32 (memopv2i64 addr:$src2)),
2830 MOVLP_shuffle_mask)),
2831 (MOVLPSrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
2832 def : Pat<(v2i64 (vector_shuffle VR128:$src1, (memopv2i64 addr:$src2),
2833 MOVLP_shuffle_mask)),
2834 (MOVLPDrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
2835 def : Pat<(v4i32 (vector_shuffle VR128:$src1, (bc_v4i32 (memopv2i64 addr:$src2)),
2836 MOVHP_shuffle_mask)),
2837 (MOVHPSrm VR128:$src1, addr:$src2)>, Requires<[HasSSE1]>;
2838 def : Pat<(v2i64 (vector_shuffle VR128:$src1, (memopv2i64 addr:$src2),
2839 MOVLP_shuffle_mask)),
2840 (MOVLPDrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
2843 let AddedComplexity = 15 in {
2844 // Setting the lowest element in the vector.
2845 def : Pat<(v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
2846 MOVL_shuffle_mask)),
2847 (MOVLPSrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
2848 def : Pat<(v2i64 (vector_shuffle VR128:$src1, VR128:$src2,
2849 MOVL_shuffle_mask)),
2850 (MOVLPDrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
2852 // vector_shuffle v1, v2 <4, 5, 2, 3> using MOVLPDrr (movsd)
2853 def : Pat<(v4f32 (vector_shuffle VR128:$src1, VR128:$src2,
2854 MOVLP_shuffle_mask)),
2855 (MOVLPDrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
2856 def : Pat<(v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
2857 MOVLP_shuffle_mask)),
2858 (MOVLPDrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
2861 // Set lowest element and zero upper elements.
2862 let AddedComplexity = 20 in
2863 def : Pat<(bc_v2i64 (vector_shuffle immAllZerosV,
2864 (v2f64 (scalar_to_vector (loadf64 addr:$src))),
2865 MOVL_shuffle_mask)),
2866 (MOVZQI2PQIrm addr:$src)>, Requires<[HasSSE2]>;
2868 // FIXME: Temporary workaround since 2-wide shuffle is broken.
2869 def : Pat<(int_x86_sse2_movs_d VR128:$src1, VR128:$src2),
2870 (v2f64 (MOVLPDrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>;
2871 def : Pat<(int_x86_sse2_loadh_pd VR128:$src1, addr:$src2),
2872 (v2f64 (MOVHPDrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>;
2873 def : Pat<(int_x86_sse2_loadl_pd VR128:$src1, addr:$src2),
2874 (v2f64 (MOVLPDrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>;
2875 def : Pat<(int_x86_sse2_shuf_pd VR128:$src1, VR128:$src2, imm:$src3),
2876 (v2f64 (SHUFPDrri VR128:$src1, VR128:$src2, imm:$src3))>,
2877 Requires<[HasSSE2]>;
2878 def : Pat<(int_x86_sse2_shuf_pd VR128:$src1, (load addr:$src2), imm:$src3),
2879 (v2f64 (SHUFPDrmi VR128:$src1, addr:$src2, imm:$src3))>,
2880 Requires<[HasSSE2]>;
2881 def : Pat<(int_x86_sse2_unpckh_pd VR128:$src1, VR128:$src2),
2882 (v2f64 (UNPCKHPDrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>;
2883 def : Pat<(int_x86_sse2_unpckh_pd VR128:$src1, (load addr:$src2)),
2884 (v2f64 (UNPCKHPDrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>;
2885 def : Pat<(int_x86_sse2_unpckl_pd VR128:$src1, VR128:$src2),
2886 (v2f64 (UNPCKLPDrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>;
2887 def : Pat<(int_x86_sse2_unpckl_pd VR128:$src1, (load addr:$src2)),
2888 (v2f64 (UNPCKLPDrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>;
2889 def : Pat<(int_x86_sse2_punpckh_qdq VR128:$src1, VR128:$src2),
2890 (v2i64 (PUNPCKHQDQrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>;
2891 def : Pat<(int_x86_sse2_punpckh_qdq VR128:$src1, (load addr:$src2)),
2892 (v2i64 (PUNPCKHQDQrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>;
2893 def : Pat<(int_x86_sse2_punpckl_qdq VR128:$src1, VR128:$src2),
2894 (v2i64 (PUNPCKLQDQrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>;
2895 def : Pat<(int_x86_sse2_punpckl_qdq VR128:$src1, (load addr:$src2)),
2896 (PUNPCKLQDQrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
2898 // Some special case pandn patterns.
2899 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v4i32 immAllOnesV))),
2901 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
2902 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v8i16 immAllOnesV))),
2904 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
2905 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v16i8 immAllOnesV))),
2907 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
2909 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v4i32 immAllOnesV))),
2910 (memopv2i64 addr:$src2))),
2911 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
2912 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v8i16 immAllOnesV))),
2913 (memopv2i64 addr:$src2))),
2914 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
2915 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v16i8 immAllOnesV))),
2916 (memopv2i64 addr:$src2))),
2917 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
2919 // Use movaps / movups for SSE integer load / store (one byte shorter).
2920 def : Pat<(alignedloadv4i32 addr:$src),
2921 (MOVAPSrm addr:$src)>, Requires<[HasSSE1]>;
2922 def : Pat<(loadv4i32 addr:$src),
2923 (MOVUPSrm addr:$src)>, Requires<[HasSSE1]>;
2924 def : Pat<(alignedloadv2i64 addr:$src),
2925 (MOVAPSrm addr:$src)>, Requires<[HasSSE2]>;
2926 def : Pat<(loadv2i64 addr:$src),
2927 (MOVUPSrm addr:$src)>, Requires<[HasSSE2]>;
2929 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
2930 (MOVAPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
2931 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
2932 (MOVAPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
2933 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
2934 (MOVAPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
2935 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
2936 (MOVAPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
2937 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
2938 (MOVUPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
2939 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
2940 (MOVUPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
2941 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
2942 (MOVUPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
2943 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
2944 (MOVUPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;