1 //====- X86InstrSSE.td - Describe the X86 Instruction Set --*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86 SSE instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
17 //===----------------------------------------------------------------------===//
18 // SSE specific DAG Nodes.
19 //===----------------------------------------------------------------------===//
21 def SDTX86FPShiftOp : SDTypeProfile<1, 2, [ SDTCisSameAs<0, 1>,
22 SDTCisFP<0>, SDTCisInt<2> ]>;
23 def SDTX86VFCMP : SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisSameAs<1, 2>,
24 SDTCisFP<1>, SDTCisVT<3, i8>]>;
26 def X86fmin : SDNode<"X86ISD::FMIN", SDTFPBinOp>;
27 def X86fmax : SDNode<"X86ISD::FMAX", SDTFPBinOp>;
28 def X86fand : SDNode<"X86ISD::FAND", SDTFPBinOp,
29 [SDNPCommutative, SDNPAssociative]>;
30 def X86for : SDNode<"X86ISD::FOR", SDTFPBinOp,
31 [SDNPCommutative, SDNPAssociative]>;
32 def X86fxor : SDNode<"X86ISD::FXOR", SDTFPBinOp,
33 [SDNPCommutative, SDNPAssociative]>;
34 def X86frsqrt : SDNode<"X86ISD::FRSQRT", SDTFPUnaryOp>;
35 def X86frcp : SDNode<"X86ISD::FRCP", SDTFPUnaryOp>;
36 def X86fsrl : SDNode<"X86ISD::FSRL", SDTX86FPShiftOp>;
37 def X86comi : SDNode<"X86ISD::COMI", SDTX86CmpTest>;
38 def X86ucomi : SDNode<"X86ISD::UCOMI", SDTX86CmpTest>;
39 def X86pshufb : SDNode<"X86ISD::PSHUFB",
40 SDTypeProfile<1, 2, [SDTCisVT<0, v16i8>, SDTCisSameAs<0,1>,
42 def X86pextrb : SDNode<"X86ISD::PEXTRB",
43 SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
44 def X86pextrw : SDNode<"X86ISD::PEXTRW",
45 SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
46 def X86pinsrb : SDNode<"X86ISD::PINSRB",
47 SDTypeProfile<1, 3, [SDTCisVT<0, v16i8>, SDTCisSameAs<0,1>,
48 SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
49 def X86pinsrw : SDNode<"X86ISD::PINSRW",
50 SDTypeProfile<1, 3, [SDTCisVT<0, v8i16>, SDTCisSameAs<0,1>,
51 SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
52 def X86insrtps : SDNode<"X86ISD::INSERTPS",
53 SDTypeProfile<1, 3, [SDTCisVT<0, v4f32>, SDTCisSameAs<0,1>,
54 SDTCisVT<2, v4f32>, SDTCisPtrTy<3>]>>;
55 def X86vzmovl : SDNode<"X86ISD::VZEXT_MOVL",
56 SDTypeProfile<1, 1, [SDTCisSameAs<0,1>]>>;
57 def X86vzload : SDNode<"X86ISD::VZEXT_LOAD", SDTLoad,
58 [SDNPHasChain, SDNPMayLoad]>;
59 def X86vshl : SDNode<"X86ISD::VSHL", SDTIntShiftOp>;
60 def X86vshr : SDNode<"X86ISD::VSRL", SDTIntShiftOp>;
61 def X86cmpps : SDNode<"X86ISD::CMPPS", SDTX86VFCMP>;
62 def X86cmppd : SDNode<"X86ISD::CMPPD", SDTX86VFCMP>;
63 def X86pcmpeqb : SDNode<"X86ISD::PCMPEQB", SDTIntBinOp, [SDNPCommutative]>;
64 def X86pcmpeqw : SDNode<"X86ISD::PCMPEQW", SDTIntBinOp, [SDNPCommutative]>;
65 def X86pcmpeqd : SDNode<"X86ISD::PCMPEQD", SDTIntBinOp, [SDNPCommutative]>;
66 def X86pcmpeqq : SDNode<"X86ISD::PCMPEQQ", SDTIntBinOp, [SDNPCommutative]>;
67 def X86pcmpgtb : SDNode<"X86ISD::PCMPGTB", SDTIntBinOp>;
68 def X86pcmpgtw : SDNode<"X86ISD::PCMPGTW", SDTIntBinOp>;
69 def X86pcmpgtd : SDNode<"X86ISD::PCMPGTD", SDTIntBinOp>;
70 def X86pcmpgtq : SDNode<"X86ISD::PCMPGTQ", SDTIntBinOp>;
72 def SDTX86CmpPTest : SDTypeProfile<1, 2, [SDTCisVT<0, i32>,
75 def X86ptest : SDNode<"X86ISD::PTEST", SDTX86CmpPTest>;
77 //===----------------------------------------------------------------------===//
78 // SSE Complex Patterns
79 //===----------------------------------------------------------------------===//
81 // These are 'extloads' from a scalar to the low element of a vector, zeroing
82 // the top elements. These are used for the SSE 'ss' and 'sd' instruction
84 def sse_load_f32 : ComplexPattern<v4f32, 5, "SelectScalarSSELoad", [],
85 [SDNPHasChain, SDNPMayLoad]>;
86 def sse_load_f64 : ComplexPattern<v2f64, 5, "SelectScalarSSELoad", [],
87 [SDNPHasChain, SDNPMayLoad]>;
89 def ssmem : Operand<v4f32> {
90 let PrintMethod = "printf32mem";
91 let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm);
92 let ParserMatchClass = X86MemAsmOperand;
94 def sdmem : Operand<v2f64> {
95 let PrintMethod = "printf64mem";
96 let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm);
97 let ParserMatchClass = X86MemAsmOperand;
100 //===----------------------------------------------------------------------===//
101 // SSE pattern fragments
102 //===----------------------------------------------------------------------===//
104 def loadv4f32 : PatFrag<(ops node:$ptr), (v4f32 (load node:$ptr))>;
105 def loadv2f64 : PatFrag<(ops node:$ptr), (v2f64 (load node:$ptr))>;
106 def loadv4i32 : PatFrag<(ops node:$ptr), (v4i32 (load node:$ptr))>;
107 def loadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (load node:$ptr))>;
109 // FIXME: move this to a more appropriate place after all AVX is done.
110 def loadv8f32 : PatFrag<(ops node:$ptr), (v8f32 (load node:$ptr))>;
111 def loadv4f64 : PatFrag<(ops node:$ptr), (v4f64 (load node:$ptr))>;
112 def loadv8i32 : PatFrag<(ops node:$ptr), (v8i32 (load node:$ptr))>;
113 def loadv4i64 : PatFrag<(ops node:$ptr), (v4i64 (load node:$ptr))>;
115 // Like 'store', but always requires vector alignment.
116 def alignedstore : PatFrag<(ops node:$val, node:$ptr),
117 (store node:$val, node:$ptr), [{
118 return cast<StoreSDNode>(N)->getAlignment() >= 16;
121 // Like 'load', but always requires vector alignment.
122 def alignedload : PatFrag<(ops node:$ptr), (load node:$ptr), [{
123 return cast<LoadSDNode>(N)->getAlignment() >= 16;
126 def alignedloadfsf32 : PatFrag<(ops node:$ptr),
127 (f32 (alignedload node:$ptr))>;
128 def alignedloadfsf64 : PatFrag<(ops node:$ptr),
129 (f64 (alignedload node:$ptr))>;
130 def alignedloadv4f32 : PatFrag<(ops node:$ptr),
131 (v4f32 (alignedload node:$ptr))>;
132 def alignedloadv2f64 : PatFrag<(ops node:$ptr),
133 (v2f64 (alignedload node:$ptr))>;
134 def alignedloadv4i32 : PatFrag<(ops node:$ptr),
135 (v4i32 (alignedload node:$ptr))>;
136 def alignedloadv2i64 : PatFrag<(ops node:$ptr),
137 (v2i64 (alignedload node:$ptr))>;
139 // FIXME: move this to a more appropriate place after all AVX is done.
140 def alignedloadv8f32 : PatFrag<(ops node:$ptr),
141 (v8f32 (alignedload node:$ptr))>;
142 def alignedloadv4f64 : PatFrag<(ops node:$ptr),
143 (v4f64 (alignedload node:$ptr))>;
144 def alignedloadv8i32 : PatFrag<(ops node:$ptr),
145 (v8i32 (alignedload node:$ptr))>;
146 def alignedloadv4i64 : PatFrag<(ops node:$ptr),
147 (v4i64 (alignedload node:$ptr))>;
149 // Like 'load', but uses special alignment checks suitable for use in
150 // memory operands in most SSE instructions, which are required to
151 // be naturally aligned on some targets but not on others. If the subtarget
152 // allows unaligned accesses, match any load, though this may require
153 // setting a feature bit in the processor (on startup, for example).
154 // Opteron 10h and later implement such a feature.
155 def memop : PatFrag<(ops node:$ptr), (load node:$ptr), [{
156 return Subtarget->hasVectorUAMem()
157 || cast<LoadSDNode>(N)->getAlignment() >= 16;
160 def memopfsf32 : PatFrag<(ops node:$ptr), (f32 (memop node:$ptr))>;
161 def memopfsf64 : PatFrag<(ops node:$ptr), (f64 (memop node:$ptr))>;
162 def memopv4f32 : PatFrag<(ops node:$ptr), (v4f32 (memop node:$ptr))>;
163 def memopv2f64 : PatFrag<(ops node:$ptr), (v2f64 (memop node:$ptr))>;
164 def memopv4i32 : PatFrag<(ops node:$ptr), (v4i32 (memop node:$ptr))>;
165 def memopv2i64 : PatFrag<(ops node:$ptr), (v2i64 (memop node:$ptr))>;
166 def memopv16i8 : PatFrag<(ops node:$ptr), (v16i8 (memop node:$ptr))>;
168 // FIXME: move this to a more appropriate place after all AVX is done.
169 def memopv8f32 : PatFrag<(ops node:$ptr), (v8f32 (memop node:$ptr))>;
170 def memopv4f64 : PatFrag<(ops node:$ptr), (v4f64 (memop node:$ptr))>;
172 // SSSE3 uses MMX registers for some instructions. They aren't aligned on a
174 // FIXME: 8 byte alignment for mmx reads is not required
175 def memop64 : PatFrag<(ops node:$ptr), (load node:$ptr), [{
176 return cast<LoadSDNode>(N)->getAlignment() >= 8;
179 def memopv8i8 : PatFrag<(ops node:$ptr), (v8i8 (memop64 node:$ptr))>;
180 def memopv4i16 : PatFrag<(ops node:$ptr), (v4i16 (memop64 node:$ptr))>;
181 def memopv8i16 : PatFrag<(ops node:$ptr), (v8i16 (memop64 node:$ptr))>;
182 def memopv2i32 : PatFrag<(ops node:$ptr), (v2i32 (memop64 node:$ptr))>;
185 // Like 'store', but requires the non-temporal bit to be set
186 def nontemporalstore : PatFrag<(ops node:$val, node:$ptr),
187 (st node:$val, node:$ptr), [{
188 if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
189 return ST->isNonTemporal();
193 def alignednontemporalstore : PatFrag<(ops node:$val, node:$ptr),
194 (st node:$val, node:$ptr), [{
195 if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
196 return ST->isNonTemporal() && !ST->isTruncatingStore() &&
197 ST->getAddressingMode() == ISD::UNINDEXED &&
198 ST->getAlignment() >= 16;
202 def unalignednontemporalstore : PatFrag<(ops node:$val, node:$ptr),
203 (st node:$val, node:$ptr), [{
204 if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
205 return ST->isNonTemporal() &&
206 ST->getAlignment() < 16;
210 def bc_v4f32 : PatFrag<(ops node:$in), (v4f32 (bitconvert node:$in))>;
211 def bc_v2f64 : PatFrag<(ops node:$in), (v2f64 (bitconvert node:$in))>;
212 def bc_v16i8 : PatFrag<(ops node:$in), (v16i8 (bitconvert node:$in))>;
213 def bc_v8i16 : PatFrag<(ops node:$in), (v8i16 (bitconvert node:$in))>;
214 def bc_v4i32 : PatFrag<(ops node:$in), (v4i32 (bitconvert node:$in))>;
215 def bc_v2i64 : PatFrag<(ops node:$in), (v2i64 (bitconvert node:$in))>;
217 def vzmovl_v2i64 : PatFrag<(ops node:$src),
218 (bitconvert (v2i64 (X86vzmovl
219 (v2i64 (scalar_to_vector (loadi64 node:$src))))))>;
220 def vzmovl_v4i32 : PatFrag<(ops node:$src),
221 (bitconvert (v4i32 (X86vzmovl
222 (v4i32 (scalar_to_vector (loadi32 node:$src))))))>;
224 def vzload_v2i64 : PatFrag<(ops node:$src),
225 (bitconvert (v2i64 (X86vzload node:$src)))>;
228 def fp32imm0 : PatLeaf<(f32 fpimm), [{
229 return N->isExactlyValue(+0.0);
232 // BYTE_imm - Transform bit immediates into byte immediates.
233 def BYTE_imm : SDNodeXForm<imm, [{
234 // Transformation function: imm >> 3
235 return getI32Imm(N->getZExtValue() >> 3);
238 // SHUFFLE_get_shuf_imm xform function: convert vector_shuffle mask to PSHUF*,
240 def SHUFFLE_get_shuf_imm : SDNodeXForm<vector_shuffle, [{
241 return getI8Imm(X86::getShuffleSHUFImmediate(N));
244 // SHUFFLE_get_pshufhw_imm xform function: convert vector_shuffle mask to
246 def SHUFFLE_get_pshufhw_imm : SDNodeXForm<vector_shuffle, [{
247 return getI8Imm(X86::getShufflePSHUFHWImmediate(N));
250 // SHUFFLE_get_pshuflw_imm xform function: convert vector_shuffle mask to
252 def SHUFFLE_get_pshuflw_imm : SDNodeXForm<vector_shuffle, [{
253 return getI8Imm(X86::getShufflePSHUFLWImmediate(N));
256 // SHUFFLE_get_palign_imm xform function: convert vector_shuffle mask to
258 def SHUFFLE_get_palign_imm : SDNodeXForm<vector_shuffle, [{
259 return getI8Imm(X86::getShufflePALIGNRImmediate(N));
262 def splat_lo : PatFrag<(ops node:$lhs, node:$rhs),
263 (vector_shuffle node:$lhs, node:$rhs), [{
264 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
265 return SVOp->isSplat() && SVOp->getSplatIndex() == 0;
268 def movddup : PatFrag<(ops node:$lhs, node:$rhs),
269 (vector_shuffle node:$lhs, node:$rhs), [{
270 return X86::isMOVDDUPMask(cast<ShuffleVectorSDNode>(N));
273 def movhlps : PatFrag<(ops node:$lhs, node:$rhs),
274 (vector_shuffle node:$lhs, node:$rhs), [{
275 return X86::isMOVHLPSMask(cast<ShuffleVectorSDNode>(N));
278 def movhlps_undef : PatFrag<(ops node:$lhs, node:$rhs),
279 (vector_shuffle node:$lhs, node:$rhs), [{
280 return X86::isMOVHLPS_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
283 def movlhps : PatFrag<(ops node:$lhs, node:$rhs),
284 (vector_shuffle node:$lhs, node:$rhs), [{
285 return X86::isMOVLHPSMask(cast<ShuffleVectorSDNode>(N));
288 def movlp : PatFrag<(ops node:$lhs, node:$rhs),
289 (vector_shuffle node:$lhs, node:$rhs), [{
290 return X86::isMOVLPMask(cast<ShuffleVectorSDNode>(N));
293 def movl : PatFrag<(ops node:$lhs, node:$rhs),
294 (vector_shuffle node:$lhs, node:$rhs), [{
295 return X86::isMOVLMask(cast<ShuffleVectorSDNode>(N));
298 def movshdup : PatFrag<(ops node:$lhs, node:$rhs),
299 (vector_shuffle node:$lhs, node:$rhs), [{
300 return X86::isMOVSHDUPMask(cast<ShuffleVectorSDNode>(N));
303 def movsldup : PatFrag<(ops node:$lhs, node:$rhs),
304 (vector_shuffle node:$lhs, node:$rhs), [{
305 return X86::isMOVSLDUPMask(cast<ShuffleVectorSDNode>(N));
308 def unpckl : PatFrag<(ops node:$lhs, node:$rhs),
309 (vector_shuffle node:$lhs, node:$rhs), [{
310 return X86::isUNPCKLMask(cast<ShuffleVectorSDNode>(N));
313 def unpckh : PatFrag<(ops node:$lhs, node:$rhs),
314 (vector_shuffle node:$lhs, node:$rhs), [{
315 return X86::isUNPCKHMask(cast<ShuffleVectorSDNode>(N));
318 def unpckl_undef : PatFrag<(ops node:$lhs, node:$rhs),
319 (vector_shuffle node:$lhs, node:$rhs), [{
320 return X86::isUNPCKL_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
323 def unpckh_undef : PatFrag<(ops node:$lhs, node:$rhs),
324 (vector_shuffle node:$lhs, node:$rhs), [{
325 return X86::isUNPCKH_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
328 def pshufd : PatFrag<(ops node:$lhs, node:$rhs),
329 (vector_shuffle node:$lhs, node:$rhs), [{
330 return X86::isPSHUFDMask(cast<ShuffleVectorSDNode>(N));
331 }], SHUFFLE_get_shuf_imm>;
333 def shufp : PatFrag<(ops node:$lhs, node:$rhs),
334 (vector_shuffle node:$lhs, node:$rhs), [{
335 return X86::isSHUFPMask(cast<ShuffleVectorSDNode>(N));
336 }], SHUFFLE_get_shuf_imm>;
338 def pshufhw : PatFrag<(ops node:$lhs, node:$rhs),
339 (vector_shuffle node:$lhs, node:$rhs), [{
340 return X86::isPSHUFHWMask(cast<ShuffleVectorSDNode>(N));
341 }], SHUFFLE_get_pshufhw_imm>;
343 def pshuflw : PatFrag<(ops node:$lhs, node:$rhs),
344 (vector_shuffle node:$lhs, node:$rhs), [{
345 return X86::isPSHUFLWMask(cast<ShuffleVectorSDNode>(N));
346 }], SHUFFLE_get_pshuflw_imm>;
348 def palign : PatFrag<(ops node:$lhs, node:$rhs),
349 (vector_shuffle node:$lhs, node:$rhs), [{
350 return X86::isPALIGNRMask(cast<ShuffleVectorSDNode>(N));
351 }], SHUFFLE_get_palign_imm>;
353 //===----------------------------------------------------------------------===//
354 // SSE scalar FP Instructions
355 //===----------------------------------------------------------------------===//
357 // CMOV* - Used to implement the SSE SELECT DAG operation. Expanded after
358 // instruction selection into a branch sequence.
359 let Uses = [EFLAGS], usesCustomInserter = 1 in {
360 def CMOV_FR32 : I<0, Pseudo,
361 (outs FR32:$dst), (ins FR32:$t, FR32:$f, i8imm:$cond),
362 "#CMOV_FR32 PSEUDO!",
363 [(set FR32:$dst, (X86cmov FR32:$t, FR32:$f, imm:$cond,
365 def CMOV_FR64 : I<0, Pseudo,
366 (outs FR64:$dst), (ins FR64:$t, FR64:$f, i8imm:$cond),
367 "#CMOV_FR64 PSEUDO!",
368 [(set FR64:$dst, (X86cmov FR64:$t, FR64:$f, imm:$cond,
370 def CMOV_V4F32 : I<0, Pseudo,
371 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
372 "#CMOV_V4F32 PSEUDO!",
374 (v4f32 (X86cmov VR128:$t, VR128:$f, imm:$cond,
376 def CMOV_V2F64 : I<0, Pseudo,
377 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
378 "#CMOV_V2F64 PSEUDO!",
380 (v2f64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
382 def CMOV_V2I64 : I<0, Pseudo,
383 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
384 "#CMOV_V2I64 PSEUDO!",
386 (v2i64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
390 //===----------------------------------------------------------------------===//
391 // SSE 1 & 2 Instructions Classes
392 //===----------------------------------------------------------------------===//
394 /// sse12_fp_scalar - SSE 1 & 2 scalar instructions class
395 multiclass sse12_fp_scalar<bits<8> opc, string OpcodeStr, SDNode OpNode,
396 RegisterClass RC, X86MemOperand x86memop> {
397 let isCommutable = 1 in {
398 def rr : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
399 OpcodeStr, [(set RC:$dst, (OpNode RC:$src1, RC:$src2))]>;
401 def rm : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
402 OpcodeStr, [(set RC:$dst, (OpNode RC:$src1, (load addr:$src2)))]>;
405 /// sse12_fp_scalar_int - SSE 1 & 2 scalar instructions intrinsics class
406 multiclass sse12_fp_scalar_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
407 string asm, string SSEVer, string FPSizeStr,
408 Operand memopr, ComplexPattern mem_cpat> {
409 def rr_Int : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
410 asm, [(set RC:$dst, (
411 !nameconcat<Intrinsic>("int_x86_sse",
412 !strconcat(SSEVer, !strconcat("_",
413 !strconcat(OpcodeStr, FPSizeStr))))
414 RC:$src1, RC:$src2))]>;
415 def rm_Int : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, memopr:$src2),
416 asm, [(set RC:$dst, (
417 !nameconcat<Intrinsic>("int_x86_sse",
418 !strconcat(SSEVer, !strconcat("_",
419 !strconcat(OpcodeStr, FPSizeStr))))
420 RC:$src1, mem_cpat:$src2))]>;
423 /// sse12_fp_packed - SSE 1 & 2 packed instructions class
424 multiclass sse12_fp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
425 RegisterClass RC, ValueType vt,
426 X86MemOperand x86memop, PatFrag mem_frag,
427 Domain d, bit MayLoad = 0> {
428 let isCommutable = 1 in
429 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
430 OpcodeStr, [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))],d>;
431 let mayLoad = MayLoad in
432 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
433 OpcodeStr, [(set RC:$dst, (OpNode RC:$src1,
434 (mem_frag addr:$src2)))],d>;
437 /// sse12_fp_packed_logical_rm - SSE 1 & 2 packed instructions class
438 multiclass sse12_fp_packed_logical_rm<bits<8> opc, RegisterClass RC, Domain d,
439 string OpcodeStr, X86MemOperand x86memop,
440 list<dag> pat_rr, list<dag> pat_rm> {
441 let isCommutable = 1 in
442 def rr : PI<opc, MRMSrcReg, (outs RC:$dst),
443 (ins RC:$src1, RC:$src2), OpcodeStr, pat_rr, d>;
444 def rm : PI<opc, MRMSrcMem, (outs RC:$dst),
445 (ins RC:$src1, x86memop:$src2), OpcodeStr, pat_rm, d>;
448 /// sse12_fp_packed_int - SSE 1 & 2 packed instructions intrinsics class
449 multiclass sse12_fp_packed_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
450 string asm, string SSEVer, string FPSizeStr,
451 X86MemOperand x86memop, PatFrag mem_frag,
453 def rr_Int : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
454 asm, [(set RC:$dst, (
455 !nameconcat<Intrinsic>("int_x86_sse",
456 !strconcat(SSEVer, !strconcat("_",
457 !strconcat(OpcodeStr, FPSizeStr))))
458 RC:$src1, RC:$src2))], d>;
459 def rm_Int : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
460 asm, [(set RC:$dst, (
461 !nameconcat<Intrinsic>("int_x86_sse",
462 !strconcat(SSEVer, !strconcat("_",
463 !strconcat(OpcodeStr, FPSizeStr))))
464 RC:$src1, (mem_frag addr:$src2)))], d>;
467 //===----------------------------------------------------------------------===//
468 // SSE 1 & 2 - Move Instructions
469 //===----------------------------------------------------------------------===//
471 class sse12_move_rr<RegisterClass RC, ValueType vt, string asm> :
472 SI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, RC:$src2), asm,
473 [(set (vt VR128:$dst), (movl VR128:$src1, (scalar_to_vector RC:$src2)))]>;
475 // Loading from memory automatically zeroing upper bits.
476 class sse12_move_rm<RegisterClass RC, X86MemOperand x86memop,
477 PatFrag mem_pat, string OpcodeStr> :
478 SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
479 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
480 [(set RC:$dst, (mem_pat addr:$src))]>;
482 // Move Instructions. Register-to-register movss/movsd is not used for FR32/64
483 // register copies because it's a partial register update; FsMOVAPSrr/FsMOVAPDrr
484 // is used instead. Register-to-register movss/movsd is not modeled as an
485 // INSERT_SUBREG because INSERT_SUBREG requires that the insert be implementable
486 // in terms of a copy, and just mentioned, we don't use movss/movsd for copies.
487 let isAsmParserOnly = 1 in {
488 def VMOVSSrr : sse12_move_rr<FR32, v4f32,
489 "movss\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XS, VEX_4V;
490 def VMOVSDrr : sse12_move_rr<FR64, v2f64,
491 "movsd\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XD, VEX_4V;
493 let canFoldAsLoad = 1, isReMaterializable = 1 in {
494 def VMOVSSrm : sse12_move_rm<FR32, f32mem, loadf32, "movss">, XS, VEX;
496 let AddedComplexity = 20 in
497 def VMOVSDrm : sse12_move_rm<FR64, f64mem, loadf64, "movsd">, XD, VEX;
501 let Constraints = "$src1 = $dst" in {
502 def MOVSSrr : sse12_move_rr<FR32, v4f32,
503 "movss\t{$src2, $dst|$dst, $src2}">, XS;
504 def MOVSDrr : sse12_move_rr<FR64, v2f64,
505 "movsd\t{$src2, $dst|$dst, $src2}">, XD;
508 let canFoldAsLoad = 1, isReMaterializable = 1 in {
509 def MOVSSrm : sse12_move_rm<FR32, f32mem, loadf32, "movss">, XS;
511 let AddedComplexity = 20 in
512 def MOVSDrm : sse12_move_rm<FR64, f64mem, loadf64, "movsd">, XD;
515 let AddedComplexity = 15 in {
516 // Extract the low 32-bit value from one vector and insert it into another.
517 def : Pat<(v4f32 (movl VR128:$src1, VR128:$src2)),
518 (MOVSSrr (v4f32 VR128:$src1),
519 (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>;
520 // Extract the low 64-bit value from one vector and insert it into another.
521 def : Pat<(v2f64 (movl VR128:$src1, VR128:$src2)),
522 (MOVSDrr (v2f64 VR128:$src1),
523 (EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>;
526 // Implicitly promote a 32-bit scalar to a vector.
527 def : Pat<(v4f32 (scalar_to_vector FR32:$src)),
528 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src, sub_ss)>;
529 // Implicitly promote a 64-bit scalar to a vector.
530 def : Pat<(v2f64 (scalar_to_vector FR64:$src)),
531 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src, sub_sd)>;
533 let AddedComplexity = 20 in {
534 // MOVSSrm zeros the high parts of the register; represent this
535 // with SUBREG_TO_REG.
536 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
537 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
538 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
539 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
540 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
541 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
542 // MOVSDrm zeros the high parts of the register; represent this
543 // with SUBREG_TO_REG.
544 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
545 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
546 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
547 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
548 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
549 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
550 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
551 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
552 def : Pat<(v2f64 (X86vzload addr:$src)),
553 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
556 // Store scalar value to memory.
557 def MOVSSmr : SSI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
558 "movss\t{$src, $dst|$dst, $src}",
559 [(store FR32:$src, addr:$dst)]>;
560 def MOVSDmr : SDI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
561 "movsd\t{$src, $dst|$dst, $src}",
562 [(store FR64:$src, addr:$dst)]>;
564 let isAsmParserOnly = 1 in {
565 def VMOVSSmr : SI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
566 "movss\t{$src, $dst|$dst, $src}",
567 [(store FR32:$src, addr:$dst)]>, XS, VEX_4V;
568 def VMOVSDmr : SI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
569 "movsd\t{$src, $dst|$dst, $src}",
570 [(store FR64:$src, addr:$dst)]>, XD, VEX_4V;
573 // Extract and store.
574 def : Pat<(store (f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
577 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
578 def : Pat<(store (f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
581 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
583 // Move Aligned/Unaligned floating point values
584 multiclass sse12_mov_packed<bits<8> opc, RegisterClass RC,
585 X86MemOperand x86memop, PatFrag ld_frag,
586 string asm, Domain d,
587 bit IsReMaterializable = 1> {
588 let neverHasSideEffects = 1 in
589 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
590 !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], d>;
591 let canFoldAsLoad = 1, isReMaterializable = IsReMaterializable in
592 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
593 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
594 [(set RC:$dst, (ld_frag addr:$src))], d>;
597 let isAsmParserOnly = 1 in {
598 defm VMOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
599 "movaps", SSEPackedSingle>, VEX;
600 defm VMOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
601 "movapd", SSEPackedDouble>, OpSize, VEX;
602 defm VMOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
603 "movups", SSEPackedSingle>, VEX;
604 defm VMOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
605 "movupd", SSEPackedDouble, 0>, OpSize, VEX;
607 defm VMOVAPSY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv8f32,
608 "movaps", SSEPackedSingle>, VEX;
609 defm VMOVAPDY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv4f64,
610 "movapd", SSEPackedDouble>, OpSize, VEX;
611 defm VMOVUPSY : sse12_mov_packed<0x10, VR256, f256mem, loadv8f32,
612 "movups", SSEPackedSingle>, VEX;
613 defm VMOVUPDY : sse12_mov_packed<0x10, VR256, f256mem, loadv4f64,
614 "movupd", SSEPackedDouble, 0>, OpSize, VEX;
616 defm MOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
617 "movaps", SSEPackedSingle>, TB;
618 defm MOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
619 "movapd", SSEPackedDouble>, TB, OpSize;
620 defm MOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
621 "movups", SSEPackedSingle>, TB;
622 defm MOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
623 "movupd", SSEPackedDouble, 0>, TB, OpSize;
625 let isAsmParserOnly = 1 in {
626 def VMOVAPSmr : VPSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
627 "movaps\t{$src, $dst|$dst, $src}",
628 [(alignedstore (v4f32 VR128:$src), addr:$dst)]>, VEX;
629 def VMOVAPDmr : VPDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
630 "movapd\t{$src, $dst|$dst, $src}",
631 [(alignedstore (v2f64 VR128:$src), addr:$dst)]>, VEX;
632 def VMOVUPSmr : VPSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
633 "movups\t{$src, $dst|$dst, $src}",
634 [(store (v4f32 VR128:$src), addr:$dst)]>, VEX;
635 def VMOVUPDmr : VPDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
636 "movupd\t{$src, $dst|$dst, $src}",
637 [(store (v2f64 VR128:$src), addr:$dst)]>, VEX;
638 def VMOVAPSYmr : VPSI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
639 "movaps\t{$src, $dst|$dst, $src}",
640 [(alignedstore (v8f32 VR256:$src), addr:$dst)]>, VEX;
641 def VMOVAPDYmr : VPDI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
642 "movapd\t{$src, $dst|$dst, $src}",
643 [(alignedstore (v4f64 VR256:$src), addr:$dst)]>, VEX;
644 def VMOVUPSYmr : VPSI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
645 "movups\t{$src, $dst|$dst, $src}",
646 [(store (v8f32 VR256:$src), addr:$dst)]>, VEX;
647 def VMOVUPDYmr : VPDI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
648 "movupd\t{$src, $dst|$dst, $src}",
649 [(store (v4f64 VR256:$src), addr:$dst)]>, VEX;
651 def MOVAPSmr : PSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
652 "movaps\t{$src, $dst|$dst, $src}",
653 [(alignedstore (v4f32 VR128:$src), addr:$dst)]>;
654 def MOVAPDmr : PDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
655 "movapd\t{$src, $dst|$dst, $src}",
656 [(alignedstore (v2f64 VR128:$src), addr:$dst)]>;
657 def MOVUPSmr : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
658 "movups\t{$src, $dst|$dst, $src}",
659 [(store (v4f32 VR128:$src), addr:$dst)]>;
660 def MOVUPDmr : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
661 "movupd\t{$src, $dst|$dst, $src}",
662 [(store (v2f64 VR128:$src), addr:$dst)]>;
664 // Intrinsic forms of MOVUPS/D load and store
665 let isAsmParserOnly = 1 in {
666 let canFoldAsLoad = 1, isReMaterializable = 1 in
667 def VMOVUPSrm_Int : VPSI<0x10, MRMSrcMem, (outs VR128:$dst),
669 "movups\t{$src, $dst|$dst, $src}",
670 [(set VR128:$dst, (int_x86_sse_loadu_ps addr:$src))]>, VEX;
671 def VMOVUPDrm_Int : VPDI<0x10, MRMSrcMem, (outs VR128:$dst),
673 "movupd\t{$src, $dst|$dst, $src}",
674 [(set VR128:$dst, (int_x86_sse2_loadu_pd addr:$src))]>, VEX;
675 def VMOVUPSmr_Int : VPSI<0x11, MRMDestMem, (outs),
676 (ins f128mem:$dst, VR128:$src),
677 "movups\t{$src, $dst|$dst, $src}",
678 [(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>, VEX;
679 def VMOVUPDmr_Int : VPDI<0x11, MRMDestMem, (outs),
680 (ins f128mem:$dst, VR128:$src),
681 "movupd\t{$src, $dst|$dst, $src}",
682 [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>, VEX;
684 let canFoldAsLoad = 1, isReMaterializable = 1 in
685 def MOVUPSrm_Int : PSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
686 "movups\t{$src, $dst|$dst, $src}",
687 [(set VR128:$dst, (int_x86_sse_loadu_ps addr:$src))]>;
688 def MOVUPDrm_Int : PDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
689 "movupd\t{$src, $dst|$dst, $src}",
690 [(set VR128:$dst, (int_x86_sse2_loadu_pd addr:$src))]>;
692 def MOVUPSmr_Int : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
693 "movups\t{$src, $dst|$dst, $src}",
694 [(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>;
695 def MOVUPDmr_Int : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
696 "movupd\t{$src, $dst|$dst, $src}",
697 [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>;
699 // Move Low/High packed floating point values
700 multiclass sse12_mov_hilo_packed<bits<8>opc, RegisterClass RC,
701 PatFrag mov_frag, string base_opc,
703 def PSrm : PI<opc, MRMSrcMem,
704 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
705 !strconcat(!strconcat(base_opc,"s"), asm_opr),
708 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))))],
709 SSEPackedSingle>, TB;
711 def PDrm : PI<opc, MRMSrcMem,
712 (outs RC:$dst), (ins RC:$src1, f64mem:$src2),
713 !strconcat(!strconcat(base_opc,"d"), asm_opr),
714 [(set RC:$dst, (v2f64 (mov_frag RC:$src1,
715 (scalar_to_vector (loadf64 addr:$src2)))))],
716 SSEPackedDouble>, TB, OpSize;
719 let isAsmParserOnly = 1, AddedComplexity = 20 in {
720 defm VMOVL : sse12_mov_hilo_packed<0x12, VR128, movlp, "movlp",
721 "\t{$src2, $src1, $dst|$dst, $src1, $src2}">, VEX_4V;
722 defm VMOVH : sse12_mov_hilo_packed<0x16, VR128, movlhps, "movhp",
723 "\t{$src2, $src1, $dst|$dst, $src1, $src2}">, VEX_4V;
725 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
726 defm MOVL : sse12_mov_hilo_packed<0x12, VR128, movlp, "movlp",
727 "\t{$src2, $dst|$dst, $src2}">;
728 defm MOVH : sse12_mov_hilo_packed<0x16, VR128, movlhps, "movhp",
729 "\t{$src2, $dst|$dst, $src2}">;
732 let isAsmParserOnly = 1 in {
733 def VMOVLPSmr : VPSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
734 "movlps\t{$src, $dst|$dst, $src}",
735 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
736 (iPTR 0))), addr:$dst)]>, VEX;
737 def VMOVLPDmr : VPDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
738 "movlpd\t{$src, $dst|$dst, $src}",
739 [(store (f64 (vector_extract (v2f64 VR128:$src),
740 (iPTR 0))), addr:$dst)]>, VEX;
742 def MOVLPSmr : PSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
743 "movlps\t{$src, $dst|$dst, $src}",
744 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
745 (iPTR 0))), addr:$dst)]>;
746 def MOVLPDmr : PDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
747 "movlpd\t{$src, $dst|$dst, $src}",
748 [(store (f64 (vector_extract (v2f64 VR128:$src),
749 (iPTR 0))), addr:$dst)]>;
751 // v2f64 extract element 1 is always custom lowered to unpack high to low
752 // and extract element 0 so the non-store version isn't too horrible.
753 let isAsmParserOnly = 1 in {
754 def VMOVHPSmr : VPSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
755 "movhps\t{$src, $dst|$dst, $src}",
756 [(store (f64 (vector_extract
757 (unpckh (bc_v2f64 (v4f32 VR128:$src)),
758 (undef)), (iPTR 0))), addr:$dst)]>,
760 def VMOVHPDmr : VPDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
761 "movhpd\t{$src, $dst|$dst, $src}",
762 [(store (f64 (vector_extract
763 (v2f64 (unpckh VR128:$src, (undef))),
764 (iPTR 0))), addr:$dst)]>,
767 def MOVHPSmr : PSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
768 "movhps\t{$src, $dst|$dst, $src}",
769 [(store (f64 (vector_extract
770 (unpckh (bc_v2f64 (v4f32 VR128:$src)),
771 (undef)), (iPTR 0))), addr:$dst)]>;
772 def MOVHPDmr : PDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
773 "movhpd\t{$src, $dst|$dst, $src}",
774 [(store (f64 (vector_extract
775 (v2f64 (unpckh VR128:$src, (undef))),
776 (iPTR 0))), addr:$dst)]>;
778 let isAsmParserOnly = 1, AddedComplexity = 20 in {
779 def VMOVLHPSrr : VPSI<0x16, MRMSrcReg, (outs VR128:$dst),
780 (ins VR128:$src1, VR128:$src2),
781 "movlhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
783 (v4f32 (movlhps VR128:$src1, VR128:$src2)))]>,
785 def VMOVHLPSrr : VPSI<0x12, MRMSrcReg, (outs VR128:$dst),
786 (ins VR128:$src1, VR128:$src2),
787 "movhlps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
789 (v4f32 (movhlps VR128:$src1, VR128:$src2)))]>,
792 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
793 def MOVLHPSrr : PSI<0x16, MRMSrcReg, (outs VR128:$dst),
794 (ins VR128:$src1, VR128:$src2),
795 "movlhps\t{$src2, $dst|$dst, $src2}",
797 (v4f32 (movlhps VR128:$src1, VR128:$src2)))]>;
798 def MOVHLPSrr : PSI<0x12, MRMSrcReg, (outs VR128:$dst),
799 (ins VR128:$src1, VR128:$src2),
800 "movhlps\t{$src2, $dst|$dst, $src2}",
802 (v4f32 (movhlps VR128:$src1, VR128:$src2)))]>;
805 def : Pat<(movlhps VR128:$src1, (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
806 (MOVHPSrm (v4i32 VR128:$src1), addr:$src2)>;
807 let AddedComplexity = 20 in {
808 def : Pat<(v4f32 (movddup VR128:$src, (undef))),
809 (MOVLHPSrr (v4f32 VR128:$src), (v4f32 VR128:$src))>;
810 def : Pat<(v2i64 (movddup VR128:$src, (undef))),
811 (MOVLHPSrr (v2i64 VR128:$src), (v2i64 VR128:$src))>;
814 //===----------------------------------------------------------------------===//
815 // SSE 1 & 2 - Conversion Instructions
816 //===----------------------------------------------------------------------===//
818 multiclass sse12_cvt_s<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
819 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
821 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
822 [(set DstRC:$dst, (OpNode SrcRC:$src))]>;
823 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
824 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))]>;
827 multiclass sse12_cvt_p<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
828 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
829 string asm, Domain d> {
830 def rr : PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
831 [(set DstRC:$dst, (OpNode SrcRC:$src))], d>;
832 def rm : PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
833 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))], d>;
836 multiclass sse12_vcvt_avx<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
837 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
839 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src),
841 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
842 (ins DstRC:$src1, x86memop:$src), asm, []>;
845 let isAsmParserOnly = 1 in {
846 defm VCVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
847 "cvttss2si\t{$src, $dst|$dst, $src}">, XS, VEX;
848 defm VCVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
849 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD, VEX;
850 defm VCVTSI2SS : sse12_vcvt_avx<0x2A, GR32, FR32, sint_to_fp, i32mem, loadi32,
851 "cvtsi2ss\t{$src, $src1, $dst|$dst, $src1, $src}">, XS,
853 defm VCVTSI2SD : sse12_vcvt_avx<0x2A, GR32, FR64, sint_to_fp, i32mem, loadi32,
854 "cvtsi2sd\t{$src, $src1, $dst|$dst, $src1, $src}">, XD,
858 defm CVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
859 "cvttss2si\t{$src, $dst|$dst, $src}">, XS;
860 defm CVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
861 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD;
862 defm CVTSI2SS : sse12_cvt_s<0x2A, GR32, FR32, sint_to_fp, i32mem, loadi32,
863 "cvtsi2ss\t{$src, $dst|$dst, $src}">, XS;
864 defm CVTSI2SD : sse12_cvt_s<0x2A, GR32, FR64, sint_to_fp, i32mem, loadi32,
865 "cvtsi2sd\t{$src, $dst|$dst, $src}">, XD;
867 // Conversion Instructions Intrinsics - Match intrinsics which expect MM
868 // and/or XMM operand(s).
869 multiclass sse12_cvt_pint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
870 Intrinsic Int, X86MemOperand x86memop, PatFrag ld_frag,
871 string asm, Domain d> {
872 def rr : PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
873 [(set DstRC:$dst, (Int SrcRC:$src))], d>;
874 def rm : PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
875 [(set DstRC:$dst, (Int (ld_frag addr:$src)))], d>;
878 multiclass sse12_cvt_sint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
879 Intrinsic Int, X86MemOperand x86memop, PatFrag ld_frag,
881 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
882 [(set DstRC:$dst, (Int SrcRC:$src))]>;
883 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
884 [(set DstRC:$dst, (Int (ld_frag addr:$src)))]>;
887 multiclass sse12_cvt_pint_3addr<bits<8> opc, RegisterClass SrcRC,
888 RegisterClass DstRC, Intrinsic Int, X86MemOperand x86memop,
889 PatFrag ld_frag, string asm, Domain d> {
890 def rr : PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src2),
891 asm, [(set DstRC:$dst, (Int DstRC:$src1, SrcRC:$src2))], d>;
892 def rm : PI<opc, MRMSrcMem, (outs DstRC:$dst),
893 (ins DstRC:$src1, x86memop:$src2), asm,
894 [(set DstRC:$dst, (Int DstRC:$src1, (ld_frag addr:$src2)))], d>;
897 multiclass sse12_cvt_sint_3addr<bits<8> opc, RegisterClass SrcRC,
898 RegisterClass DstRC, Intrinsic Int, X86MemOperand x86memop,
899 PatFrag ld_frag, string asm> {
900 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src2),
901 asm, [(set DstRC:$dst, (Int DstRC:$src1, SrcRC:$src2))]>;
902 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
903 (ins DstRC:$src1, x86memop:$src2), asm,
904 [(set DstRC:$dst, (Int DstRC:$src1, (ld_frag addr:$src2)))]>;
907 let isAsmParserOnly = 1 in {
908 defm Int_VCVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
909 f32mem, load, "cvtss2si\t{$src, $dst|$dst, $src}">, XS,
911 defm Int_VCVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
912 f128mem, load, "cvtsd2si\t{$src, $dst|$dst, $src}">, XD,
915 defm Int_CVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
916 f32mem, load, "cvtss2si\t{$src, $dst|$dst, $src}">, XS;
917 defm Int_CVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
918 f128mem, load, "cvtsd2si\t{$src, $dst|$dst, $src}">, XD;
921 let Constraints = "$src1 = $dst" in {
922 defm Int_CVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
923 int_x86_sse_cvtsi2ss, i32mem, loadi32,
924 "cvtsi2ss\t{$src2, $dst|$dst, $src2}">, XS;
925 defm Int_CVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
926 int_x86_sse2_cvtsi2sd, i32mem, loadi32,
927 "cvtsi2ss\t{$src2, $dst|$dst, $src2}">, XD;
930 // Instructions below don't have an AVX form.
931 defm Int_CVTPS2PI : sse12_cvt_pint<0x2D, VR128, VR64, int_x86_sse_cvtps2pi,
932 f64mem, load, "cvtps2pi\t{$src, $dst|$dst, $src}",
933 SSEPackedSingle>, TB;
934 defm Int_CVTPD2PI : sse12_cvt_pint<0x2D, VR128, VR64, int_x86_sse_cvtpd2pi,
935 f128mem, memop, "cvtpd2pi\t{$src, $dst|$dst, $src}",
936 SSEPackedDouble>, TB, OpSize;
937 defm Int_CVTTPS2PI : sse12_cvt_pint<0x2C, VR128, VR64, int_x86_sse_cvttps2pi,
938 f64mem, load, "cvttps2pi\t{$src, $dst|$dst, $src}",
939 SSEPackedSingle>, TB;
940 defm Int_CVTTPD2PI : sse12_cvt_pint<0x2C, VR128, VR64, int_x86_sse_cvttpd2pi,
941 f128mem, memop, "cvttpd2pi\t{$src, $dst|$dst, $src}",
942 SSEPackedDouble>, TB, OpSize;
943 defm Int_CVTPI2PD : sse12_cvt_pint<0x2A, VR64, VR128, int_x86_sse_cvtpi2pd,
944 i64mem, load, "cvtpi2pd\t{$src, $dst|$dst, $src}",
945 SSEPackedDouble>, TB, OpSize;
946 let Constraints = "$src1 = $dst" in {
947 defm Int_CVTPI2PS : sse12_cvt_pint_3addr<0x2A, VR64, VR128,
948 int_x86_sse_cvtpi2ps,
949 i64mem, load, "cvtpi2ps\t{$src2, $dst|$dst, $src2}",
950 SSEPackedSingle>, TB;
955 // Aliases for intrinsics
956 let isAsmParserOnly = 1, Pattern = []<dag> in {
957 defm Int_VCVTTSS2SI : sse12_cvt_sint_3addr<0x2C, VR128, GR32,
958 int_x86_sse_cvttss2si, f32mem, load,
959 "cvttss2si\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XS;
960 defm Int_VCVTTSD2SI : sse12_cvt_sint_3addr<0x2C, VR128, GR32,
961 int_x86_sse2_cvttsd2si, f128mem, load,
962 "cvttss2si\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XD;
964 defm Int_CVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
965 f32mem, load, "cvttss2si\t{$src, $dst|$dst, $src}">,
967 defm Int_CVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
968 f128mem, load, "cvttss2si\t{$src, $dst|$dst, $src}">,
971 let isAsmParserOnly = 1, Pattern = []<dag> in {
972 defm VCVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load,
973 "cvtss2si{l}\t{$src, $dst|$dst, $src}">, XS, VEX;
974 defm VCVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, f128mem, load,
975 "cvtdq2ps\t{$src, $dst|$dst, $src}",
976 SSEPackedSingle>, TB, VEX;
978 let Pattern = []<dag> in {
979 defm CVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load /*dummy*/,
980 "cvtss2si{l}\t{$src, $dst|$dst, $src}">, XS;
981 defm CVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, f128mem, load /*dummy*/,
982 "cvtdq2ps\t{$src, $dst|$dst, $src}",
983 SSEPackedSingle>, TB; /* PD SSE3 form is avaiable */
988 // Convert scalar double to scalar single
989 let isAsmParserOnly = 1 in {
990 def VCVTSD2SSrr : VSDI<0x5A, MRMSrcReg, (outs FR32:$dst),
991 (ins FR64:$src1, FR64:$src2),
992 "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
994 def VCVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst),
995 (ins FR64:$src1, f64mem:$src2),
996 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
997 []>, XD, Requires<[HasAVX, HasSSE2, OptForSize]>, VEX_4V;
999 def CVTSD2SSrr : SDI<0x5A, MRMSrcReg, (outs FR32:$dst), (ins FR64:$src),
1000 "cvtsd2ss\t{$src, $dst|$dst, $src}",
1001 [(set FR32:$dst, (fround FR64:$src))]>;
1002 def CVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src),
1003 "cvtsd2ss\t{$src, $dst|$dst, $src}",
1004 [(set FR32:$dst, (fround (loadf64 addr:$src)))]>, XD,
1005 Requires<[HasSSE2, OptForSize]>;
1007 let isAsmParserOnly = 1 in
1008 defm Int_VCVTSD2SS: sse12_cvt_sint_3addr<0x5A, VR128, VR128,
1009 int_x86_sse2_cvtsd2ss, f64mem, load,
1010 "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}">,
1012 let Constraints = "$src1 = $dst" in
1013 defm Int_CVTSD2SS: sse12_cvt_sint_3addr<0x5A, VR128, VR128,
1014 int_x86_sse2_cvtsd2ss, f64mem, load,
1015 "cvtsd2ss\t{$src2, $dst|$dst, $src2}">, XS;
1017 // Convert scalar single to scalar double
1018 let isAsmParserOnly = 1 in { // SSE2 instructions with XS prefix
1019 def VCVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst),
1020 (ins FR32:$src1, FR32:$src2),
1021 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1022 []>, XS, Requires<[HasAVX, HasSSE2]>, VEX_4V;
1023 def VCVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst),
1024 (ins FR32:$src1, f32mem:$src2),
1025 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1026 []>, XS, VEX_4V, Requires<[HasAVX, HasSSE2, OptForSize]>;
1028 def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src),
1029 "cvtss2sd\t{$src, $dst|$dst, $src}",
1030 [(set FR64:$dst, (fextend FR32:$src))]>, XS,
1031 Requires<[HasSSE2]>;
1032 def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src),
1033 "cvtss2sd\t{$src, $dst|$dst, $src}",
1034 [(set FR64:$dst, (extloadf32 addr:$src))]>, XS,
1035 Requires<[HasSSE2, OptForSize]>;
1037 let isAsmParserOnly = 1 in {
1038 def Int_VCVTSS2SDrr: I<0x5A, MRMSrcReg,
1039 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1040 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1041 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
1042 VR128:$src2))]>, XS, VEX_4V,
1043 Requires<[HasAVX, HasSSE2]>;
1044 def Int_VCVTSS2SDrm: I<0x5A, MRMSrcMem,
1045 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
1046 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1047 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
1048 (load addr:$src2)))]>, XS, VEX_4V,
1049 Requires<[HasAVX, HasSSE2]>;
1051 let Constraints = "$src1 = $dst" in { // SSE2 instructions with XS prefix
1052 def Int_CVTSS2SDrr: I<0x5A, MRMSrcReg,
1053 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1054 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
1055 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
1056 VR128:$src2))]>, XS,
1057 Requires<[HasSSE2]>;
1058 def Int_CVTSS2SDrm: I<0x5A, MRMSrcMem,
1059 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
1060 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
1061 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
1062 (load addr:$src2)))]>, XS,
1063 Requires<[HasSSE2]>;
1066 def : Pat<(extloadf32 addr:$src),
1067 (CVTSS2SDrr (MOVSSrm addr:$src))>,
1068 Requires<[HasSSE2, OptForSpeed]>;
1070 // Convert doubleword to packed single/double fp
1071 let isAsmParserOnly = 1 in { // SSE2 instructions without OpSize prefix
1072 def Int_VCVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1073 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
1074 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
1075 TB, VEX, Requires<[HasAVX, HasSSE2]>;
1076 def Int_VCVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
1077 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
1078 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps
1079 (bitconvert (memopv2i64 addr:$src))))]>,
1080 TB, VEX, Requires<[HasAVX, HasSSE2]>;
1082 def Int_CVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1083 "cvtdq2ps\t{$src, $dst|$dst, $src}",
1084 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
1085 TB, Requires<[HasSSE2]>;
1086 def Int_CVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
1087 "cvtdq2ps\t{$src, $dst|$dst, $src}",
1088 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps
1089 (bitconvert (memopv2i64 addr:$src))))]>,
1090 TB, Requires<[HasSSE2]>;
1092 // FIXME: why the non-intrinsic version is described as SSE3?
1093 let isAsmParserOnly = 1 in { // SSE2 instructions with XS prefix
1094 def Int_VCVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1095 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
1096 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
1097 XS, VEX, Requires<[HasAVX, HasSSE2]>;
1098 def Int_VCVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
1099 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
1100 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd
1101 (bitconvert (memopv2i64 addr:$src))))]>,
1102 XS, VEX, Requires<[HasAVX, HasSSE2]>;
1104 def Int_CVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1105 "cvtdq2pd\t{$src, $dst|$dst, $src}",
1106 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
1107 XS, Requires<[HasSSE2]>;
1108 def Int_CVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
1109 "cvtdq2pd\t{$src, $dst|$dst, $src}",
1110 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd
1111 (bitconvert (memopv2i64 addr:$src))))]>,
1112 XS, Requires<[HasSSE2]>;
1114 // Convert packed single/double fp to doubleword
1115 let isAsmParserOnly = 1 in {
1116 def VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1117 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1118 def VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1119 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1121 def CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1122 "cvtps2dq\t{$src, $dst|$dst, $src}", []>;
1123 def CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1124 "cvtps2dq\t{$src, $dst|$dst, $src}", []>;
1126 let isAsmParserOnly = 1 in {
1127 def Int_VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1128 "cvtps2dq\t{$src, $dst|$dst, $src}",
1129 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>,
1131 def Int_VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst),
1133 "cvtps2dq\t{$src, $dst|$dst, $src}",
1134 [(set VR128:$dst, (int_x86_sse2_cvtps2dq
1135 (memop addr:$src)))]>, VEX;
1137 def Int_CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1138 "cvtps2dq\t{$src, $dst|$dst, $src}",
1139 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>;
1140 def Int_CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1141 "cvtps2dq\t{$src, $dst|$dst, $src}",
1142 [(set VR128:$dst, (int_x86_sse2_cvtps2dq
1143 (memop addr:$src)))]>;
1145 let isAsmParserOnly = 1 in { // SSE2 packed instructions with XD prefix
1146 def Int_VCVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1147 "vcvtpd2dq\t{$src, $dst|$dst, $src}",
1148 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
1149 XD, VEX, Requires<[HasAVX, HasSSE2]>;
1150 def Int_VCVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1151 "vcvtpd2dq\t{$src, $dst|$dst, $src}",
1152 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
1153 (memop addr:$src)))]>,
1154 XD, VEX, Requires<[HasAVX, HasSSE2]>;
1156 def Int_CVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1157 "cvtpd2dq\t{$src, $dst|$dst, $src}",
1158 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
1159 XD, Requires<[HasSSE2]>;
1160 def Int_CVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1161 "cvtpd2dq\t{$src, $dst|$dst, $src}",
1162 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
1163 (memop addr:$src)))]>,
1164 XD, Requires<[HasSSE2]>;
1167 // Convert with truncation packed single/double fp to doubleword
1168 let isAsmParserOnly = 1 in { // SSE2 packed instructions with XS prefix
1169 def VCVTTPS2DQrr : VSSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1170 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1171 def VCVTTPS2DQrm : VSSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1172 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1174 def CVTTPS2DQrr : SSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1175 "cvttps2dq\t{$src, $dst|$dst, $src}", []>;
1176 def CVTTPS2DQrm : SSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1177 "cvttps2dq\t{$src, $dst|$dst, $src}", []>;
1180 let isAsmParserOnly = 1 in {
1181 def Int_VCVTTPS2DQrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1182 "vcvttps2dq\t{$src, $dst|$dst, $src}",
1184 (int_x86_sse2_cvttps2dq VR128:$src))]>,
1185 XS, VEX, Requires<[HasAVX, HasSSE2]>;
1186 def Int_VCVTTPS2DQrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1187 "vcvttps2dq\t{$src, $dst|$dst, $src}",
1188 [(set VR128:$dst, (int_x86_sse2_cvttps2dq
1189 (memop addr:$src)))]>,
1190 XS, VEX, Requires<[HasAVX, HasSSE2]>;
1192 def Int_CVTTPS2DQrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1193 "cvttps2dq\t{$src, $dst|$dst, $src}",
1195 (int_x86_sse2_cvttps2dq VR128:$src))]>,
1196 XS, Requires<[HasSSE2]>;
1197 def Int_CVTTPS2DQrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1198 "cvttps2dq\t{$src, $dst|$dst, $src}",
1199 [(set VR128:$dst, (int_x86_sse2_cvttps2dq
1200 (memop addr:$src)))]>,
1201 XS, Requires<[HasSSE2]>;
1203 let isAsmParserOnly = 1 in {
1204 def Int_VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst),
1206 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1207 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>,
1209 def Int_VCVTTPD2DQrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst),
1211 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1212 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
1213 (memop addr:$src)))]>, VEX;
1215 def Int_CVTTPD2DQrr : PDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1216 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1217 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>;
1218 def Int_CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst),(ins f128mem:$src),
1219 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1220 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
1221 (memop addr:$src)))]>;
1223 // Convert packed single to packed double
1224 let isAsmParserOnly = 1 in { // SSE2 instructions without OpSize prefix
1225 def VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1226 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX,
1228 def VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1229 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX,
1232 def CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1233 "cvtps2pd\t{$src, $dst|$dst, $src}", []>, TB;
1234 def CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1235 "cvtps2pd\t{$src, $dst|$dst, $src}", []>, TB;
1237 let isAsmParserOnly = 1 in {
1238 def Int_VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1239 "cvtps2pd\t{$src, $dst|$dst, $src}",
1240 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
1241 VEX, Requires<[HasAVX, HasSSE2]>;
1242 def Int_VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1243 "cvtps2pd\t{$src, $dst|$dst, $src}",
1244 [(set VR128:$dst, (int_x86_sse2_cvtps2pd
1245 (load addr:$src)))]>,
1246 VEX, Requires<[HasAVX, HasSSE2]>;
1248 def Int_CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1249 "cvtps2pd\t{$src, $dst|$dst, $src}",
1250 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
1251 TB, Requires<[HasSSE2]>;
1252 def Int_CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1253 "cvtps2pd\t{$src, $dst|$dst, $src}",
1254 [(set VR128:$dst, (int_x86_sse2_cvtps2pd
1255 (load addr:$src)))]>,
1256 TB, Requires<[HasSSE2]>;
1258 // Convert packed double to packed single
1259 let isAsmParserOnly = 1 in {
1260 def VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1261 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>, VEX;
1262 // FIXME: the memory form of this instruction should described using
1263 // use extra asm syntax
1265 def CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1266 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>;
1267 def CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1268 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>;
1271 let isAsmParserOnly = 1 in {
1272 def Int_VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1273 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1274 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
1275 def Int_VCVTPD2PSrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst),
1277 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1278 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps
1279 (memop addr:$src)))]>;
1281 def Int_CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1282 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1283 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
1284 def Int_CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1285 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1286 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps
1287 (memop addr:$src)))]>;
1289 //===----------------------------------------------------------------------===//
1290 // SSE 1 & 2 - Compare Instructions
1291 //===----------------------------------------------------------------------===//
1293 // sse12_cmp_scalar - sse 1 & 2 compare scalar instructions
1294 multiclass sse12_cmp_scalar<RegisterClass RC, X86MemOperand x86memop,
1295 string asm, string asm_alt> {
1296 def rr : SIi8<0xC2, MRMSrcReg,
1297 (outs RC:$dst), (ins RC:$src1, RC:$src, SSECC:$cc),
1300 def rm : SIi8<0xC2, MRMSrcMem,
1301 (outs RC:$dst), (ins RC:$src1, x86memop:$src, SSECC:$cc),
1303 // Accept explicit immediate argument form instead of comparison code.
1304 let isAsmParserOnly = 1 in {
1305 def rr_alt : SIi8<0xC2, MRMSrcReg,
1306 (outs RC:$dst), (ins RC:$src1, RC:$src, i8imm:$src2),
1309 def rm_alt : SIi8<0xC2, MRMSrcMem,
1310 (outs RC:$dst), (ins RC:$src1, x86memop:$src, i8imm:$src2),
1315 let neverHasSideEffects = 1, isAsmParserOnly = 1 in {
1316 defm VCMPSS : sse12_cmp_scalar<FR32, f32mem,
1317 "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}",
1318 "cmpss\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}">,
1320 defm VCMPSD : sse12_cmp_scalar<FR64, f64mem,
1321 "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}",
1322 "cmpsd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}">,
1326 let Constraints = "$src1 = $dst", neverHasSideEffects = 1 in {
1327 defm CMPSS : sse12_cmp_scalar<FR32, f32mem,
1328 "cmp${cc}ss\t{$src, $dst|$dst, $src}",
1329 "cmpss\t{$src2, $src, $dst|$dst, $src, $src2}">, XS;
1330 defm CMPSD : sse12_cmp_scalar<FR64, f64mem,
1331 "cmp${cc}sd\t{$src, $dst|$dst, $src}",
1332 "cmpsd\t{$src2, $src, $dst|$dst, $src, $src2}">, XD;
1335 multiclass sse12_cmp_scalar_int<RegisterClass RC, X86MemOperand x86memop,
1336 Intrinsic Int, string asm> {
1337 def rr : SIi8<0xC2, MRMSrcReg, (outs VR128:$dst),
1338 (ins VR128:$src1, VR128:$src, SSECC:$cc), asm,
1339 [(set VR128:$dst, (Int VR128:$src1,
1340 VR128:$src, imm:$cc))]>;
1341 def rm : SIi8<0xC2, MRMSrcMem, (outs VR128:$dst),
1342 (ins VR128:$src1, f32mem:$src, SSECC:$cc), asm,
1343 [(set VR128:$dst, (Int VR128:$src1,
1344 (load addr:$src), imm:$cc))]>;
1347 // Aliases to match intrinsics which expect XMM operand(s).
1348 let isAsmParserOnly = 1 in {
1349 defm Int_VCMPSS : sse12_cmp_scalar_int<VR128, f32mem, int_x86_sse_cmp_ss,
1350 "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}">,
1352 defm Int_VCMPSD : sse12_cmp_scalar_int<VR128, f64mem, int_x86_sse2_cmp_sd,
1353 "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}">,
1356 let Constraints = "$src1 = $dst" in {
1357 defm Int_CMPSS : sse12_cmp_scalar_int<VR128, f32mem, int_x86_sse_cmp_ss,
1358 "cmp${cc}ss\t{$src, $dst|$dst, $src}">, XS;
1359 defm Int_CMPSD : sse12_cmp_scalar_int<VR128, f64mem, int_x86_sse2_cmp_sd,
1360 "cmp${cc}sd\t{$src, $dst|$dst, $src}">, XD;
1364 // sse12_ord_cmp - Unordered/Ordered scalar fp compare and set EFLAGS
1365 multiclass sse12_ord_cmp<bits<8> opc, RegisterClass RC, SDNode OpNode,
1366 ValueType vt, X86MemOperand x86memop,
1367 PatFrag ld_frag, string OpcodeStr, Domain d> {
1368 def rr: PI<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
1369 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
1370 [(set EFLAGS, (OpNode (vt RC:$src1), RC:$src2))], d>;
1371 def rm: PI<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
1372 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
1373 [(set EFLAGS, (OpNode (vt RC:$src1),
1374 (ld_frag addr:$src2)))], d>;
1377 let Defs = [EFLAGS] in {
1378 let isAsmParserOnly = 1 in {
1379 defm VUCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
1380 "ucomiss", SSEPackedSingle>, VEX;
1381 defm VUCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
1382 "ucomisd", SSEPackedDouble>, OpSize, VEX;
1383 let Pattern = []<dag> in {
1384 defm VCOMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
1385 "comiss", SSEPackedSingle>, VEX;
1386 defm VCOMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
1387 "comisd", SSEPackedDouble>, OpSize, VEX;
1390 defm Int_VUCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
1391 load, "ucomiss", SSEPackedSingle>, VEX;
1392 defm Int_VUCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
1393 load, "ucomisd", SSEPackedDouble>, OpSize, VEX;
1395 defm Int_VCOMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem,
1396 load, "comiss", SSEPackedSingle>, VEX;
1397 defm Int_VCOMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem,
1398 load, "comisd", SSEPackedDouble>, OpSize, VEX;
1400 defm UCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
1401 "ucomiss", SSEPackedSingle>, TB;
1402 defm UCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
1403 "ucomisd", SSEPackedDouble>, TB, OpSize;
1405 let Pattern = []<dag> in {
1406 defm COMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
1407 "comiss", SSEPackedSingle>, TB;
1408 defm COMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
1409 "comisd", SSEPackedDouble>, TB, OpSize;
1412 defm Int_UCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
1413 load, "ucomiss", SSEPackedSingle>, TB;
1414 defm Int_UCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
1415 load, "ucomisd", SSEPackedDouble>, TB, OpSize;
1417 defm Int_COMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem, load,
1418 "comiss", SSEPackedSingle>, TB;
1419 defm Int_COMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem, load,
1420 "comisd", SSEPackedDouble>, TB, OpSize;
1421 } // Defs = [EFLAGS]
1423 // sse12_cmp_packed - sse 1 & 2 compared packed instructions
1424 multiclass sse12_cmp_packed<RegisterClass RC, X86MemOperand x86memop,
1425 Intrinsic Int, string asm, string asm_alt,
1427 def rri : PIi8<0xC2, MRMSrcReg,
1428 (outs RC:$dst), (ins RC:$src1, RC:$src, SSECC:$cc), asm,
1429 [(set RC:$dst, (Int RC:$src1, RC:$src, imm:$cc))], d>;
1430 def rmi : PIi8<0xC2, MRMSrcMem,
1431 (outs RC:$dst), (ins RC:$src1, f128mem:$src, SSECC:$cc), asm,
1432 [(set RC:$dst, (Int RC:$src1, (memop addr:$src), imm:$cc))], d>;
1433 // Accept explicit immediate argument form instead of comparison code.
1434 let isAsmParserOnly = 1 in {
1435 def rri_alt : PIi8<0xC2, MRMSrcReg,
1436 (outs RC:$dst), (ins RC:$src1, RC:$src, i8imm:$src2),
1438 def rmi_alt : PIi8<0xC2, MRMSrcMem,
1439 (outs RC:$dst), (ins RC:$src1, f128mem:$src, i8imm:$src2),
1444 let isAsmParserOnly = 1 in {
1445 defm VCMPPS : sse12_cmp_packed<VR128, f128mem, int_x86_sse_cmp_ps,
1446 "cmp${cc}ps\t{$src, $src1, $dst|$dst, $src1, $src}",
1447 "cmpps\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1448 SSEPackedSingle>, VEX_4V;
1449 defm VCMPPD : sse12_cmp_packed<VR128, f128mem, int_x86_sse2_cmp_pd,
1450 "cmp${cc}pd\t{$src, $src1, $dst|$dst, $src1, $src}",
1451 "cmppd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1452 SSEPackedDouble>, OpSize, VEX_4V;
1454 let Constraints = "$src1 = $dst" in {
1455 defm CMPPS : sse12_cmp_packed<VR128, f128mem, int_x86_sse_cmp_ps,
1456 "cmp${cc}ps\t{$src, $dst|$dst, $src}",
1457 "cmpps\t{$src2, $src, $dst|$dst, $src, $src2}",
1458 SSEPackedSingle>, TB;
1459 defm CMPPD : sse12_cmp_packed<VR128, f128mem, int_x86_sse2_cmp_pd,
1460 "cmp${cc}pd\t{$src, $dst|$dst, $src}",
1461 "cmppd\t{$src2, $src, $dst|$dst, $src, $src2}",
1462 SSEPackedDouble>, TB, OpSize;
1465 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
1466 (CMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
1467 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), (memop addr:$src2), imm:$cc)),
1468 (CMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
1469 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
1470 (CMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
1471 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), (memop addr:$src2), imm:$cc)),
1472 (CMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
1474 //===----------------------------------------------------------------------===//
1475 // SSE 1 & 2 - Shuffle Instructions
1476 //===----------------------------------------------------------------------===//
1478 /// sse12_shuffle - sse 1 & 2 shuffle instructions
1479 multiclass sse12_shuffle<RegisterClass RC, X86MemOperand x86memop,
1480 ValueType vt, string asm, PatFrag mem_frag,
1481 Domain d, bit IsConvertibleToThreeAddress = 0> {
1482 def rmi : PIi8<0xC6, MRMSrcMem, (outs VR128:$dst),
1483 (ins VR128:$src1, f128mem:$src2, i8imm:$src3), asm,
1484 [(set VR128:$dst, (vt (shufp:$src3
1485 VR128:$src1, (mem_frag addr:$src2))))], d>;
1486 let isConvertibleToThreeAddress = IsConvertibleToThreeAddress in
1487 def rri : PIi8<0xC6, MRMSrcReg, (outs VR128:$dst),
1488 (ins VR128:$src1, VR128:$src2, i8imm:$src3), asm,
1490 (vt (shufp:$src3 VR128:$src1, VR128:$src2)))], d>;
1493 let isAsmParserOnly = 1 in {
1494 defm VSHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
1495 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
1496 memopv4f32, SSEPackedSingle>, VEX_4V;
1497 defm VSHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
1498 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src2, $src2, $src3}",
1499 memopv2f64, SSEPackedDouble>, OpSize, VEX_4V;
1502 let Constraints = "$src1 = $dst" in {
1503 defm SHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
1504 "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1505 memopv4f32, SSEPackedSingle, 1 /* cvt to pshufd */>,
1507 defm SHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
1508 "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1509 memopv2f64, SSEPackedDouble>, TB, OpSize;
1512 //===----------------------------------------------------------------------===//
1513 // SSE 1 & 2 - Unpack Instructions
1514 //===----------------------------------------------------------------------===//
1516 /// sse12_unpack_interleave - sse 1 & 2 unpack and interleave
1517 multiclass sse12_unpack_interleave<bits<8> opc, PatFrag OpNode, ValueType vt,
1518 PatFrag mem_frag, RegisterClass RC,
1519 X86MemOperand x86memop, string asm,
1521 def rr : PI<opc, MRMSrcReg,
1522 (outs RC:$dst), (ins RC:$src1, RC:$src2),
1524 (vt (OpNode RC:$src1, RC:$src2)))], d>;
1525 def rm : PI<opc, MRMSrcMem,
1526 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
1528 (vt (OpNode RC:$src1,
1529 (mem_frag addr:$src2))))], d>;
1532 let AddedComplexity = 10 in {
1533 let isAsmParserOnly = 1 in {
1534 defm VUNPCKHPS: sse12_unpack_interleave<0x15, unpckh, v4f32, memopv4f32,
1535 VR128, f128mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1536 SSEPackedSingle>, VEX_4V;
1537 defm VUNPCKHPD: sse12_unpack_interleave<0x15, unpckh, v2f64, memopv2f64,
1538 VR128, f128mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1539 SSEPackedDouble>, OpSize, VEX_4V;
1540 defm VUNPCKLPS: sse12_unpack_interleave<0x14, unpckl, v4f32, memopv4f32,
1541 VR128, f128mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1542 SSEPackedSingle>, VEX_4V;
1543 defm VUNPCKLPD: sse12_unpack_interleave<0x14, unpckl, v2f64, memopv2f64,
1544 VR128, f128mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1545 SSEPackedDouble>, OpSize, VEX_4V;
1547 defm VUNPCKHPSY: sse12_unpack_interleave<0x15, unpckh, v8f32, memopv8f32,
1548 VR256, f256mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1549 SSEPackedSingle>, VEX_4V;
1550 defm VUNPCKHPDY: sse12_unpack_interleave<0x15, unpckh, v4f64, memopv4f64,
1551 VR256, f256mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1552 SSEPackedDouble>, OpSize, VEX_4V;
1553 defm VUNPCKLPSY: sse12_unpack_interleave<0x14, unpckl, v8f32, memopv8f32,
1554 VR256, f256mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1555 SSEPackedSingle>, VEX_4V;
1556 defm VUNPCKLPDY: sse12_unpack_interleave<0x14, unpckl, v4f64, memopv4f64,
1557 VR256, f256mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1558 SSEPackedDouble>, OpSize, VEX_4V;
1561 let Constraints = "$src1 = $dst" in {
1562 defm UNPCKHPS: sse12_unpack_interleave<0x15, unpckh, v4f32, memopv4f32,
1563 VR128, f128mem, "unpckhps\t{$src2, $dst|$dst, $src2}",
1564 SSEPackedSingle>, TB;
1565 defm UNPCKHPD: sse12_unpack_interleave<0x15, unpckh, v2f64, memopv2f64,
1566 VR128, f128mem, "unpckhpd\t{$src2, $dst|$dst, $src2}",
1567 SSEPackedDouble>, TB, OpSize;
1568 defm UNPCKLPS: sse12_unpack_interleave<0x14, unpckl, v4f32, memopv4f32,
1569 VR128, f128mem, "unpcklps\t{$src2, $dst|$dst, $src2}",
1570 SSEPackedSingle>, TB;
1571 defm UNPCKLPD: sse12_unpack_interleave<0x14, unpckl, v2f64, memopv2f64,
1572 VR128, f128mem, "unpcklpd\t{$src2, $dst|$dst, $src2}",
1573 SSEPackedDouble>, TB, OpSize;
1574 } // Constraints = "$src1 = $dst"
1575 } // AddedComplexity
1577 //===----------------------------------------------------------------------===//
1578 // SSE 1 & 2 - Extract Floating-Point Sign mask
1579 //===----------------------------------------------------------------------===//
1581 /// sse12_extr_sign_mask - sse 1 & 2 unpack and interleave
1582 multiclass sse12_extr_sign_mask<RegisterClass RC, Intrinsic Int, string asm,
1584 def rr : PI<0x50, MRMSrcReg, (outs GR32:$dst), (ins RC:$src),
1585 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
1586 [(set GR32:$dst, (Int RC:$src))], d>;
1590 defm MOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps, "movmskps",
1591 SSEPackedSingle>, TB;
1592 defm MOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd, "movmskpd",
1593 SSEPackedDouble>, TB, OpSize;
1595 let isAsmParserOnly = 1 in {
1596 defm VMOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps,
1597 "movmskps", SSEPackedSingle>, VEX;
1598 defm VMOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd,
1599 "movmskpd", SSEPackedDouble>, OpSize,
1603 //===----------------------------------------------------------------------===//
1604 // SSE 1 & 2 - Misc aliasing of packed SSE 1 & 2 instructions
1605 //===----------------------------------------------------------------------===//
1607 // Aliases of packed SSE1 & SSE2 instructions for scalar use. These all have
1608 // names that start with 'Fs'.
1610 // Alias instructions that map fld0 to pxor for sse.
1611 let isReMaterializable = 1, isAsCheapAsAMove = 1, isCodeGenOnly = 1,
1612 canFoldAsLoad = 1 in {
1613 // FIXME: Set encoding to pseudo!
1614 def FsFLD0SS : I<0xEF, MRMInitReg, (outs FR32:$dst), (ins), "",
1615 [(set FR32:$dst, fp32imm0)]>,
1616 Requires<[HasSSE1]>, TB, OpSize;
1617 def FsFLD0SD : I<0xEF, MRMInitReg, (outs FR64:$dst), (ins), "",
1618 [(set FR64:$dst, fpimm0)]>,
1619 Requires<[HasSSE2]>, TB, OpSize;
1622 // Alias instruction to do FR32 or FR64 reg-to-reg copy using movaps. Upper
1623 // bits are disregarded.
1624 let neverHasSideEffects = 1 in {
1625 def FsMOVAPSrr : PSI<0x28, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
1626 "movaps\t{$src, $dst|$dst, $src}", []>;
1627 def FsMOVAPDrr : PDI<0x28, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
1628 "movapd\t{$src, $dst|$dst, $src}", []>;
1631 // Alias instruction to load FR32 or FR64 from f128mem using movaps. Upper
1632 // bits are disregarded.
1633 let canFoldAsLoad = 1, isReMaterializable = 1 in {
1634 def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
1635 "movaps\t{$src, $dst|$dst, $src}",
1636 [(set FR32:$dst, (alignedloadfsf32 addr:$src))]>;
1637 def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
1638 "movapd\t{$src, $dst|$dst, $src}",
1639 [(set FR64:$dst, (alignedloadfsf64 addr:$src))]>;
1642 //===----------------------------------------------------------------------===//
1643 // SSE 1 & 2 - Logical Instructions
1644 //===----------------------------------------------------------------------===//
1646 /// sse12_fp_alias_pack_logical - SSE 1 & 2 aliased packed FP logical ops
1648 multiclass sse12_fp_alias_pack_logical<bits<8> opc, string OpcodeStr,
1649 SDNode OpNode, bit MayLoad = 0> {
1650 let isAsmParserOnly = 1 in {
1651 defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr,
1652 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"), OpNode, FR32,
1653 f32, f128mem, memopfsf32, SSEPackedSingle, MayLoad>, VEX_4V;
1655 defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr,
1656 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), OpNode, FR64,
1657 f64, f128mem, memopfsf64, SSEPackedDouble, MayLoad>, OpSize,
1661 let Constraints = "$src1 = $dst" in {
1662 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr,
1663 "ps\t{$src2, $dst|$dst, $src2}"), OpNode, FR32, f32,
1664 f128mem, memopfsf32, SSEPackedSingle, MayLoad>, TB;
1666 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr,
1667 "pd\t{$src2, $dst|$dst, $src2}"), OpNode, FR64, f64,
1668 f128mem, memopfsf64, SSEPackedDouble, MayLoad>, TB, OpSize;
1672 // Alias bitwise logical operations using SSE logical ops on packed FP values.
1673 defm FsAND : sse12_fp_alias_pack_logical<0x54, "and", X86fand>;
1674 defm FsOR : sse12_fp_alias_pack_logical<0x56, "or", X86for>;
1675 defm FsXOR : sse12_fp_alias_pack_logical<0x57, "xor", X86fxor>;
1677 let neverHasSideEffects = 1, Pattern = []<dag>, isCommutable = 0 in
1678 defm FsANDN : sse12_fp_alias_pack_logical<0x55, "andn", undef, 1>;
1680 /// sse12_fp_packed_logical - SSE 1 & 2 packed FP logical ops
1682 multiclass sse12_fp_packed_logical<bits<8> opc, string OpcodeStr,
1683 SDNode OpNode, int HasPat = 0,
1684 list<list<dag>> Pattern = []> {
1685 let isAsmParserOnly = 1 in {
1686 defm V#NAME#PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
1687 !strconcat(OpcodeStr, "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1689 !if(HasPat, Pattern[0], // rr
1690 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1,
1692 !if(HasPat, Pattern[2], // rm
1693 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
1694 (memopv2i64 addr:$src2)))])>,
1697 defm V#NAME#PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
1698 !strconcat(OpcodeStr, "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1700 !if(HasPat, Pattern[1], // rr
1701 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1704 !if(HasPat, Pattern[3], // rm
1705 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1706 (memopv2i64 addr:$src2)))])>,
1709 let Constraints = "$src1 = $dst" in {
1710 defm PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
1711 !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"), f128mem,
1712 !if(HasPat, Pattern[0], // rr
1713 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1,
1715 !if(HasPat, Pattern[2], // rm
1716 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
1717 (memopv2i64 addr:$src2)))])>, TB;
1719 defm PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
1720 !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"), f128mem,
1721 !if(HasPat, Pattern[1], // rr
1722 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1725 !if(HasPat, Pattern[3], // rm
1726 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1727 (memopv2i64 addr:$src2)))])>,
1732 defm AND : sse12_fp_packed_logical<0x54, "and", and>;
1733 defm OR : sse12_fp_packed_logical<0x56, "or", or>;
1734 defm XOR : sse12_fp_packed_logical<0x57, "xor", xor>;
1735 let isCommutable = 0 in
1736 defm ANDN : sse12_fp_packed_logical<0x55, "andn", undef /* dummy */, 1, [
1738 [(set VR128:$dst, (v2i64 (and (xor VR128:$src1,
1739 (bc_v2i64 (v4i32 immAllOnesV))),
1742 [(set VR128:$dst, (and (vnot (bc_v2i64 (v2f64 VR128:$src1))),
1743 (bc_v2i64 (v2f64 VR128:$src2))))],
1745 [(set VR128:$dst, (v2i64 (and (xor (bc_v2i64 (v4f32 VR128:$src1)),
1746 (bc_v2i64 (v4i32 immAllOnesV))),
1747 (memopv2i64 addr:$src2))))],
1749 [(set VR128:$dst, (and (vnot (bc_v2i64 (v2f64 VR128:$src1))),
1750 (memopv2i64 addr:$src2)))]]>;
1752 //===----------------------------------------------------------------------===//
1753 // SSE 1 & 2 - Arithmetic Instructions
1754 //===----------------------------------------------------------------------===//
1756 /// basic_sse12_fp_binop_rm - SSE 1 & 2 binops come in both scalar and
1759 /// In addition, we also have a special variant of the scalar form here to
1760 /// represent the associated intrinsic operation. This form is unlike the
1761 /// plain scalar form, in that it takes an entire vector (instead of a scalar)
1762 /// and leaves the top elements unmodified (therefore these cannot be commuted).
1764 /// These three forms can each be reg+reg or reg+mem.
1766 multiclass basic_sse12_fp_binop_rm<bits<8> opc, string OpcodeStr,
1769 let isAsmParserOnly = 1 in {
1770 defm V#NAME#SS : sse12_fp_scalar<opc,
1771 !strconcat(OpcodeStr, "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1772 OpNode, FR32, f32mem>, XS, VEX_4V;
1774 defm V#NAME#SD : sse12_fp_scalar<opc,
1775 !strconcat(OpcodeStr, "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1776 OpNode, FR64, f64mem>, XD, VEX_4V;
1778 defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr,
1779 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"), OpNode,
1780 VR128, v4f32, f128mem, memopv4f32, SSEPackedSingle>,
1783 defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr,
1784 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), OpNode,
1785 VR128, v2f64, f128mem, memopv2f64, SSEPackedDouble>,
1788 defm V#NAME#SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
1789 !strconcat(OpcodeStr, "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1790 "", "_ss", ssmem, sse_load_f32>, XS, VEX_4V;
1792 defm V#NAME#SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
1793 !strconcat(OpcodeStr, "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1794 "2", "_sd", sdmem, sse_load_f64>, XD, VEX_4V;
1797 let Constraints = "$src1 = $dst" in {
1798 defm SS : sse12_fp_scalar<opc,
1799 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
1800 OpNode, FR32, f32mem>, XS;
1802 defm SD : sse12_fp_scalar<opc,
1803 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
1804 OpNode, FR64, f64mem>, XD;
1806 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr,
1807 "ps\t{$src2, $dst|$dst, $src2}"), OpNode, VR128, v4f32,
1808 f128mem, memopv4f32, SSEPackedSingle>, TB;
1810 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr,
1811 "pd\t{$src2, $dst|$dst, $src2}"), OpNode, VR128, v2f64,
1812 f128mem, memopv2f64, SSEPackedDouble>, TB, OpSize;
1814 defm SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
1815 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
1816 "", "_ss", ssmem, sse_load_f32>, XS;
1818 defm SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
1819 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
1820 "2", "_sd", sdmem, sse_load_f64>, XD;
1824 // Arithmetic instructions
1825 defm ADD : basic_sse12_fp_binop_rm<0x58, "add", fadd>;
1826 defm MUL : basic_sse12_fp_binop_rm<0x59, "mul", fmul>;
1828 let isCommutable = 0 in {
1829 defm SUB : basic_sse12_fp_binop_rm<0x5C, "sub", fsub>;
1830 defm DIV : basic_sse12_fp_binop_rm<0x5E, "div", fdiv>;
1833 /// sse12_fp_binop_rm - Other SSE 1 & 2 binops
1835 /// This multiclass is like basic_sse12_fp_binop_rm, with the addition of
1836 /// instructions for a full-vector intrinsic form. Operations that map
1837 /// onto C operators don't use this form since they just use the plain
1838 /// vector form instead of having a separate vector intrinsic form.
1840 multiclass sse12_fp_binop_rm<bits<8> opc, string OpcodeStr,
1843 let isAsmParserOnly = 1 in {
1844 // Scalar operation, reg+reg.
1845 defm V#NAME#SS : sse12_fp_scalar<opc,
1846 !strconcat(OpcodeStr, "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1847 OpNode, FR32, f32mem>, XS, VEX_4V;
1849 defm V#NAME#SD : sse12_fp_scalar<opc,
1850 !strconcat(OpcodeStr, "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1851 OpNode, FR64, f64mem>, XD, VEX_4V;
1853 defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr,
1854 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"), OpNode,
1855 VR128, v4f32, f128mem, memopv4f32, SSEPackedSingle>,
1858 defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr,
1859 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), OpNode,
1860 VR128, v2f64, f128mem, memopv2f64, SSEPackedDouble>,
1863 defm V#NAME#SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
1864 !strconcat(OpcodeStr, "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1865 "", "_ss", ssmem, sse_load_f32>, XS, VEX_4V;
1867 defm V#NAME#SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
1868 !strconcat(OpcodeStr, "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1869 "2", "_sd", sdmem, sse_load_f64>, XD, VEX_4V;
1871 defm V#NAME#PS : sse12_fp_packed_int<opc, OpcodeStr, VR128,
1872 !strconcat(OpcodeStr, "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1873 "", "_ps", f128mem, memopv4f32, SSEPackedSingle>, VEX_4V;
1875 defm V#NAME#PD : sse12_fp_packed_int<opc, OpcodeStr, VR128,
1876 !strconcat(OpcodeStr, "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1877 "2", "_pd", f128mem, memopv2f64, SSEPackedDouble>, OpSize,
1881 let Constraints = "$src1 = $dst" in {
1882 // Scalar operation, reg+reg.
1883 defm SS : sse12_fp_scalar<opc,
1884 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
1885 OpNode, FR32, f32mem>, XS;
1886 defm SD : sse12_fp_scalar<opc,
1887 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
1888 OpNode, FR64, f64mem>, XD;
1889 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr,
1890 "ps\t{$src2, $dst|$dst, $src2}"), OpNode, VR128, v4f32,
1891 f128mem, memopv4f32, SSEPackedSingle>, TB;
1893 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr,
1894 "pd\t{$src2, $dst|$dst, $src2}"), OpNode, VR128, v2f64,
1895 f128mem, memopv2f64, SSEPackedDouble>, TB, OpSize;
1897 defm SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
1898 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
1899 "", "_ss", ssmem, sse_load_f32>, XS;
1901 defm SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
1902 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
1903 "2", "_sd", sdmem, sse_load_f64>, XD;
1905 defm PS : sse12_fp_packed_int<opc, OpcodeStr, VR128,
1906 !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
1907 "", "_ps", f128mem, memopv4f32, SSEPackedSingle>, TB;
1909 defm PD : sse12_fp_packed_int<opc, OpcodeStr, VR128,
1910 !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
1911 "2", "_pd", f128mem, memopv2f64, SSEPackedDouble>, TB, OpSize;
1915 let isCommutable = 0 in {
1916 defm MAX : sse12_fp_binop_rm<0x5F, "max", X86fmax>;
1917 defm MIN : sse12_fp_binop_rm<0x5D, "min", X86fmin>;
1921 /// In addition, we also have a special variant of the scalar form here to
1922 /// represent the associated intrinsic operation. This form is unlike the
1923 /// plain scalar form, in that it takes an entire vector (instead of a
1924 /// scalar) and leaves the top elements undefined.
1926 /// And, we have a special variant form for a full-vector intrinsic form.
1928 /// sse1_fp_unop_s - SSE1 unops in scalar form.
1929 multiclass sse1_fp_unop_s<bits<8> opc, string OpcodeStr,
1930 SDNode OpNode, Intrinsic F32Int> {
1931 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
1932 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1933 [(set FR32:$dst, (OpNode FR32:$src))]>;
1934 def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
1935 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1936 [(set FR32:$dst, (OpNode (load addr:$src)))]>, XS,
1937 Requires<[HasSSE1, OptForSize]>;
1938 def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1939 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1940 [(set VR128:$dst, (F32Int VR128:$src))]>;
1941 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins ssmem:$src),
1942 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1943 [(set VR128:$dst, (F32Int sse_load_f32:$src))]>;
1946 /// sse1_fp_unop_p - SSE1 unops in scalar form.
1947 multiclass sse1_fp_unop_p<bits<8> opc, string OpcodeStr,
1948 SDNode OpNode, Intrinsic V4F32Int> {
1949 def PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1950 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1951 [(set VR128:$dst, (v4f32 (OpNode VR128:$src)))]>;
1952 def PSm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1953 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1954 [(set VR128:$dst, (OpNode (memopv4f32 addr:$src)))]>;
1955 def PSr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1956 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1957 [(set VR128:$dst, (V4F32Int VR128:$src))]>;
1958 def PSm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1959 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1960 [(set VR128:$dst, (V4F32Int (memopv4f32 addr:$src)))]>;
1963 /// sse1_fp_unop_s_avx - AVX SSE1 unops in scalar form.
1964 multiclass sse1_fp_unop_s_avx<bits<8> opc, string OpcodeStr,
1965 SDNode OpNode, Intrinsic F32Int> {
1966 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
1967 !strconcat(!strconcat("v", OpcodeStr),
1968 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1969 def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f32mem:$src2),
1970 !strconcat(!strconcat("v", OpcodeStr),
1971 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1972 []>, XS, Requires<[HasAVX, HasSSE1, OptForSize]>;
1973 def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst),
1974 (ins VR128:$src1, VR128:$src2),
1975 !strconcat(!strconcat("v", OpcodeStr),
1976 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1977 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst),
1978 (ins VR128:$src1, ssmem:$src2),
1979 !strconcat(!strconcat("v", OpcodeStr),
1980 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1983 /// sse2_fp_unop_s - SSE2 unops in scalar form.
1984 multiclass sse2_fp_unop_s<bits<8> opc, string OpcodeStr,
1985 SDNode OpNode, Intrinsic F64Int> {
1986 def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
1987 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1988 [(set FR64:$dst, (OpNode FR64:$src))]>;
1989 def SDm : SDI<opc, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
1990 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1991 [(set FR64:$dst, (OpNode (load addr:$src)))]>;
1992 def SDr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1993 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1994 [(set VR128:$dst, (F64Int VR128:$src))]>;
1995 def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins sdmem:$src),
1996 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1997 [(set VR128:$dst, (F64Int sse_load_f64:$src))]>;
2000 /// sse2_fp_unop_p - SSE2 unops in vector forms.
2001 multiclass sse2_fp_unop_p<bits<8> opc, string OpcodeStr,
2002 SDNode OpNode, Intrinsic V2F64Int> {
2003 def PDr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2004 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
2005 [(set VR128:$dst, (v2f64 (OpNode VR128:$src)))]>;
2006 def PDm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2007 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
2008 [(set VR128:$dst, (OpNode (memopv2f64 addr:$src)))]>;
2009 def PDr_Int : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2010 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
2011 [(set VR128:$dst, (V2F64Int VR128:$src))]>;
2012 def PDm_Int : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2013 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
2014 [(set VR128:$dst, (V2F64Int (memopv2f64 addr:$src)))]>;
2017 /// sse2_fp_unop_s_avx - AVX SSE2 unops in scalar form.
2018 multiclass sse2_fp_unop_s_avx<bits<8> opc, string OpcodeStr,
2019 SDNode OpNode, Intrinsic F64Int> {
2020 def SDr : VSDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
2021 !strconcat(OpcodeStr,
2022 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
2023 def SDm : VSDI<opc, MRMSrcMem, (outs FR64:$dst),
2024 (ins FR64:$src1, f64mem:$src2),
2025 !strconcat(OpcodeStr,
2026 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
2027 def SDr_Int : VSDI<opc, MRMSrcReg, (outs VR128:$dst),
2028 (ins VR128:$src1, VR128:$src2),
2029 !strconcat(OpcodeStr, "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2031 def SDm_Int : VSDI<opc, MRMSrcMem, (outs VR128:$dst),
2032 (ins VR128:$src1, sdmem:$src2),
2033 !strconcat(OpcodeStr, "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2037 let isAsmParserOnly = 1 in {
2039 let Predicates = [HasAVX, HasSSE2] in {
2040 defm VSQRT : sse2_fp_unop_s_avx<0x51, "sqrt", fsqrt, int_x86_sse2_sqrt_sd>,
2043 defm VSQRT : sse2_fp_unop_p<0x51, "vsqrt", fsqrt, int_x86_sse2_sqrt_pd>, VEX;
2046 let Predicates = [HasAVX, HasSSE1] in {
2047 defm VSQRT : sse1_fp_unop_s_avx<0x51, "sqrt", fsqrt, int_x86_sse_sqrt_ss>,
2049 defm VSQRT : sse1_fp_unop_p<0x51, "vsqrt", fsqrt, int_x86_sse_sqrt_ps>, VEX;
2050 // Reciprocal approximations. Note that these typically require refinement
2051 // in order to obtain suitable precision.
2052 defm VRSQRT : sse1_fp_unop_s_avx<0x52, "rsqrt", X86frsqrt,
2053 int_x86_sse_rsqrt_ss>, VEX_4V;
2054 defm VRSQRT : sse1_fp_unop_p<0x52, "vrsqrt", X86frsqrt, int_x86_sse_rsqrt_ps>,
2056 defm VRCP : sse1_fp_unop_s_avx<0x53, "rcp", X86frcp, int_x86_sse_rcp_ss>,
2058 defm VRCP : sse1_fp_unop_p<0x53, "vrcp", X86frcp, int_x86_sse_rcp_ps>,
2064 defm SQRT : sse1_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse_sqrt_ss>,
2065 sse1_fp_unop_p<0x51, "sqrt", fsqrt, int_x86_sse_sqrt_ps>,
2066 sse2_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse2_sqrt_sd>,
2067 sse2_fp_unop_p<0x51, "sqrt", fsqrt, int_x86_sse2_sqrt_pd>;
2069 // Reciprocal approximations. Note that these typically require refinement
2070 // in order to obtain suitable precision.
2071 defm RSQRT : sse1_fp_unop_s<0x52, "rsqrt", X86frsqrt, int_x86_sse_rsqrt_ss>,
2072 sse1_fp_unop_p<0x52, "rsqrt", X86frsqrt, int_x86_sse_rsqrt_ps>;
2073 defm RCP : sse1_fp_unop_s<0x53, "rcp", X86frcp, int_x86_sse_rcp_ss>,
2074 sse1_fp_unop_p<0x53, "rcp", X86frcp, int_x86_sse_rcp_ps>;
2076 // There is no f64 version of the reciprocal approximation instructions.
2078 //===----------------------------------------------------------------------===//
2079 // SSE 1 & 2 - Non-temporal stores
2080 //===----------------------------------------------------------------------===//
2082 let isAsmParserOnly = 1 in {
2083 def VMOVNTPSmr_Int : VPSI<0x2B, MRMDestMem, (outs),
2084 (ins i128mem:$dst, VR128:$src),
2085 "movntps\t{$src, $dst|$dst, $src}",
2086 [(int_x86_sse_movnt_ps addr:$dst, VR128:$src)]>, VEX;
2087 def VMOVNTPDmr_Int : VPDI<0x2B, MRMDestMem, (outs),
2088 (ins i128mem:$dst, VR128:$src),
2089 "movntpd\t{$src, $dst|$dst, $src}",
2090 [(int_x86_sse2_movnt_pd addr:$dst, VR128:$src)]>, VEX;
2092 let ExeDomain = SSEPackedInt in
2093 def VMOVNTDQmr_Int : VPDI<0xE7, MRMDestMem, (outs),
2094 (ins f128mem:$dst, VR128:$src),
2095 "movntdq\t{$src, $dst|$dst, $src}",
2096 [(int_x86_sse2_movnt_dq addr:$dst, VR128:$src)]>, VEX;
2098 let AddedComplexity = 400 in { // Prefer non-temporal versions
2099 def VMOVNTPSmr : VPSI<0x2B, MRMDestMem, (outs),
2100 (ins f128mem:$dst, VR128:$src),
2101 "movntps\t{$src, $dst|$dst, $src}",
2102 [(alignednontemporalstore (v4f32 VR128:$src),
2104 def VMOVNTPDmr : VPDI<0x2B, MRMDestMem, (outs),
2105 (ins f128mem:$dst, VR128:$src),
2106 "movntpd\t{$src, $dst|$dst, $src}",
2107 [(alignednontemporalstore (v2f64 VR128:$src),
2109 def VMOVNTDQ_64mr : VPDI<0xE7, MRMDestMem, (outs),
2110 (ins f128mem:$dst, VR128:$src),
2111 "movntdq\t{$src, $dst|$dst, $src}",
2112 [(alignednontemporalstore (v2f64 VR128:$src),
2114 let ExeDomain = SSEPackedInt in
2115 def VMOVNTDQmr : VPDI<0xE7, MRMDestMem, (outs),
2116 (ins f128mem:$dst, VR128:$src),
2117 "movntdq\t{$src, $dst|$dst, $src}",
2118 [(alignednontemporalstore (v4f32 VR128:$src),
2121 def VMOVNTPSYmr : VPSI<0x2B, MRMDestMem, (outs),
2122 (ins f256mem:$dst, VR256:$src),
2123 "movntps\t{$src, $dst|$dst, $src}",
2124 [(alignednontemporalstore (v8f32 VR256:$src),
2126 def VMOVNTPDYmr : VPDI<0x2B, MRMDestMem, (outs),
2127 (ins f256mem:$dst, VR256:$src),
2128 "movntpd\t{$src, $dst|$dst, $src}",
2129 [(alignednontemporalstore (v4f64 VR256:$src),
2131 def VMOVNTDQY_64mr : VPDI<0xE7, MRMDestMem, (outs),
2132 (ins f256mem:$dst, VR256:$src),
2133 "movntdq\t{$src, $dst|$dst, $src}",
2134 [(alignednontemporalstore (v4f64 VR256:$src),
2136 let ExeDomain = SSEPackedInt in
2137 def VMOVNTDQYmr : VPDI<0xE7, MRMDestMem, (outs),
2138 (ins f256mem:$dst, VR256:$src),
2139 "movntdq\t{$src, $dst|$dst, $src}",
2140 [(alignednontemporalstore (v8f32 VR256:$src),
2145 def MOVNTPSmr_Int : PSI<0x2B, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2146 "movntps\t{$src, $dst|$dst, $src}",
2147 [(int_x86_sse_movnt_ps addr:$dst, VR128:$src)]>;
2148 def MOVNTPDmr_Int : PDI<0x2B, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2149 "movntpd\t{$src, $dst|$dst, $src}",
2150 [(int_x86_sse2_movnt_pd addr:$dst, VR128:$src)]>;
2152 let ExeDomain = SSEPackedInt in
2153 def MOVNTDQmr_Int : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2154 "movntdq\t{$src, $dst|$dst, $src}",
2155 [(int_x86_sse2_movnt_dq addr:$dst, VR128:$src)]>;
2157 let AddedComplexity = 400 in { // Prefer non-temporal versions
2158 def MOVNTPSmr : PSI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2159 "movntps\t{$src, $dst|$dst, $src}",
2160 [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)]>;
2161 def MOVNTPDmr : PDI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2162 "movntpd\t{$src, $dst|$dst, $src}",
2163 [(alignednontemporalstore(v2f64 VR128:$src), addr:$dst)]>;
2165 def MOVNTDQ_64mr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2166 "movntdq\t{$src, $dst|$dst, $src}",
2167 [(alignednontemporalstore (v2f64 VR128:$src), addr:$dst)]>;
2169 let ExeDomain = SSEPackedInt in
2170 def MOVNTDQmr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2171 "movntdq\t{$src, $dst|$dst, $src}",
2172 [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)]>;
2174 // There is no AVX form for instructions below this point
2175 def MOVNTImr : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
2176 "movnti\t{$src, $dst|$dst, $src}",
2177 [(nontemporalstore (i32 GR32:$src), addr:$dst)]>,
2178 TB, Requires<[HasSSE2]>;
2180 def MOVNTI_64mr : RI<0xC3, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
2181 "movnti\t{$src, $dst|$dst, $src}",
2182 [(nontemporalstore (i64 GR64:$src), addr:$dst)]>,
2183 TB, Requires<[HasSSE2]>;
2186 def MOVNTImr_Int : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
2187 "movnti\t{$src, $dst|$dst, $src}",
2188 [(int_x86_sse2_movnt_i addr:$dst, GR32:$src)]>,
2189 TB, Requires<[HasSSE2]>;
2191 //===----------------------------------------------------------------------===//
2192 // SSE 1 & 2 - Misc Instructions (No AVX form)
2193 //===----------------------------------------------------------------------===//
2195 // Prefetch intrinsic.
2196 def PREFETCHT0 : PSI<0x18, MRM1m, (outs), (ins i8mem:$src),
2197 "prefetcht0\t$src", [(prefetch addr:$src, imm, (i32 3))]>;
2198 def PREFETCHT1 : PSI<0x18, MRM2m, (outs), (ins i8mem:$src),
2199 "prefetcht1\t$src", [(prefetch addr:$src, imm, (i32 2))]>;
2200 def PREFETCHT2 : PSI<0x18, MRM3m, (outs), (ins i8mem:$src),
2201 "prefetcht2\t$src", [(prefetch addr:$src, imm, (i32 1))]>;
2202 def PREFETCHNTA : PSI<0x18, MRM0m, (outs), (ins i8mem:$src),
2203 "prefetchnta\t$src", [(prefetch addr:$src, imm, (i32 0))]>;
2205 // Load, store, and memory fence
2206 def SFENCE : I<0xAE, MRM_F8, (outs), (ins), "sfence", [(int_x86_sse_sfence)]>,
2207 TB, Requires<[HasSSE1]>;
2209 // Alias instructions that map zero vector to pxor / xorp* for sse.
2210 // We set canFoldAsLoad because this can be converted to a constant-pool
2211 // load of an all-zeros value if folding it would be beneficial.
2212 // FIXME: Change encoding to pseudo!
2213 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
2214 isCodeGenOnly = 1 in {
2215 def V_SET0PS : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2216 [(set VR128:$dst, (v4f32 immAllZerosV))]>;
2217 def V_SET0PD : PDI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2218 [(set VR128:$dst, (v2f64 immAllZerosV))]>;
2219 let ExeDomain = SSEPackedInt in
2220 def V_SET0PI : PDI<0xEF, MRMInitReg, (outs VR128:$dst), (ins), "",
2221 [(set VR128:$dst, (v4i32 immAllZerosV))]>;
2224 def : Pat<(v2i64 immAllZerosV), (V_SET0PI)>;
2225 def : Pat<(v8i16 immAllZerosV), (V_SET0PI)>;
2226 def : Pat<(v16i8 immAllZerosV), (V_SET0PI)>;
2228 def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
2229 (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
2231 //===----------------------------------------------------------------------===//
2232 // SSE 1 & 2 - Load/Store XCSR register
2233 //===----------------------------------------------------------------------===//
2235 let isAsmParserOnly = 1 in {
2236 def VLDMXCSR : VPSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
2237 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>, VEX;
2238 def VSTMXCSR : VPSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
2239 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>, VEX;
2242 def LDMXCSR : PSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
2243 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>;
2244 def STMXCSR : PSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
2245 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>;
2247 //===---------------------------------------------------------------------===//
2248 // SSE2 - Move Aligned/Unaligned Packed Integer Instructions
2249 //===---------------------------------------------------------------------===//
2250 let ExeDomain = SSEPackedInt in { // SSE integer instructions
2252 let isAsmParserOnly = 1 in {
2253 let neverHasSideEffects = 1 in
2254 def VMOVDQArr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2255 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2256 def VMOVDQUrr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2257 "movdqu\t{$src, $dst|$dst, $src}", []>, XS, VEX;
2259 let canFoldAsLoad = 1, mayLoad = 1 in {
2260 def VMOVDQArm : VPDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2261 "movdqa\t{$src, $dst|$dst, $src}",
2262 [/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/]>,
2264 def VMOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2265 "vmovdqu\t{$src, $dst|$dst, $src}",
2266 [/*(set VR128:$dst, (loadv2i64 addr:$src))*/]>,
2267 XS, VEX, Requires<[HasAVX, HasSSE2]>;
2270 let mayStore = 1 in {
2271 def VMOVDQAmr : VPDI<0x7F, MRMDestMem, (outs),
2272 (ins i128mem:$dst, VR128:$src),
2273 "movdqa\t{$src, $dst|$dst, $src}",
2274 [/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/]>, VEX;
2275 def VMOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2276 "vmovdqu\t{$src, $dst|$dst, $src}",
2277 [/*(store (v2i64 VR128:$src), addr:$dst)*/]>,
2278 XS, VEX, Requires<[HasAVX, HasSSE2]>;
2282 let neverHasSideEffects = 1 in
2283 def MOVDQArr : PDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2284 "movdqa\t{$src, $dst|$dst, $src}", []>;
2286 let canFoldAsLoad = 1, mayLoad = 1 in {
2287 def MOVDQArm : PDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2288 "movdqa\t{$src, $dst|$dst, $src}",
2289 [/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/]>;
2290 def MOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2291 "movdqu\t{$src, $dst|$dst, $src}",
2292 [/*(set VR128:$dst, (loadv2i64 addr:$src))*/]>,
2293 XS, Requires<[HasSSE2]>;
2296 let mayStore = 1 in {
2297 def MOVDQAmr : PDI<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2298 "movdqa\t{$src, $dst|$dst, $src}",
2299 [/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/]>;
2300 def MOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2301 "movdqu\t{$src, $dst|$dst, $src}",
2302 [/*(store (v2i64 VR128:$src), addr:$dst)*/]>,
2303 XS, Requires<[HasSSE2]>;
2306 // Intrinsic forms of MOVDQU load and store
2307 let isAsmParserOnly = 1 in {
2308 let canFoldAsLoad = 1 in
2309 def VMOVDQUrm_Int : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2310 "vmovdqu\t{$src, $dst|$dst, $src}",
2311 [(set VR128:$dst, (int_x86_sse2_loadu_dq addr:$src))]>,
2312 XS, VEX, Requires<[HasAVX, HasSSE2]>;
2313 def VMOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2314 "vmovdqu\t{$src, $dst|$dst, $src}",
2315 [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
2316 XS, VEX, Requires<[HasAVX, HasSSE2]>;
2319 let canFoldAsLoad = 1 in
2320 def MOVDQUrm_Int : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2321 "movdqu\t{$src, $dst|$dst, $src}",
2322 [(set VR128:$dst, (int_x86_sse2_loadu_dq addr:$src))]>,
2323 XS, Requires<[HasSSE2]>;
2324 def MOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2325 "movdqu\t{$src, $dst|$dst, $src}",
2326 [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
2327 XS, Requires<[HasSSE2]>;
2329 } // ExeDomain = SSEPackedInt
2331 //===---------------------------------------------------------------------===//
2332 // SSE2 - Packed Integer Arithmetic Instructions
2333 //===---------------------------------------------------------------------===//
2335 let ExeDomain = SSEPackedInt in { // SSE integer instructions
2337 multiclass PDI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId,
2338 bit IsCommutable = 0, bit Is2Addr = 1> {
2339 let isCommutable = IsCommutable in
2340 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2341 (ins VR128:$src1, VR128:$src2),
2343 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2344 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2345 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
2346 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2347 (ins VR128:$src1, i128mem:$src2),
2349 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2350 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2351 [(set VR128:$dst, (IntId VR128:$src1,
2352 (bitconvert (memopv2i64 addr:$src2))))]>;
2355 multiclass PDI_binop_rmi_int<bits<8> opc, bits<8> opc2, Format ImmForm,
2356 string OpcodeStr, Intrinsic IntId,
2357 Intrinsic IntId2, bit Is2Addr = 1> {
2358 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2359 (ins VR128:$src1, VR128:$src2),
2361 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2362 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2363 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
2364 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2365 (ins VR128:$src1, i128mem:$src2),
2367 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2368 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2369 [(set VR128:$dst, (IntId VR128:$src1,
2370 (bitconvert (memopv2i64 addr:$src2))))]>;
2371 def ri : PDIi8<opc2, ImmForm, (outs VR128:$dst),
2372 (ins VR128:$src1, i32i8imm:$src2),
2374 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2375 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2376 [(set VR128:$dst, (IntId2 VR128:$src1, (i32 imm:$src2)))]>;
2379 /// PDI_binop_rm - Simple SSE2 binary operator.
2380 multiclass PDI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
2381 ValueType OpVT, bit IsCommutable = 0, bit Is2Addr = 1> {
2382 let isCommutable = IsCommutable in
2383 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2384 (ins VR128:$src1, VR128:$src2),
2386 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2387 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2388 [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]>;
2389 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2390 (ins VR128:$src1, i128mem:$src2),
2392 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2393 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2394 [(set VR128:$dst, (OpVT (OpNode VR128:$src1,
2395 (bitconvert (memopv2i64 addr:$src2)))))]>;
2398 /// PDI_binop_rm_v2i64 - Simple SSE2 binary operator whose type is v2i64.
2400 /// FIXME: we could eliminate this and use PDI_binop_rm instead if tblgen knew
2401 /// to collapse (bitconvert VT to VT) into its operand.
2403 multiclass PDI_binop_rm_v2i64<bits<8> opc, string OpcodeStr, SDNode OpNode,
2404 bit IsCommutable = 0, bit Is2Addr = 1> {
2405 let isCommutable = IsCommutable in
2406 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2407 (ins VR128:$src1, VR128:$src2),
2409 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2410 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2411 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))]>;
2412 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2413 (ins VR128:$src1, i128mem:$src2),
2415 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2416 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2417 [(set VR128:$dst, (OpNode VR128:$src1, (memopv2i64 addr:$src2)))]>;
2420 } // ExeDomain = SSEPackedInt
2422 // 128-bit Integer Arithmetic
2424 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE2] in {
2425 defm VPADDB : PDI_binop_rm<0xFC, "vpaddb", add, v16i8, 1, 0 /*3addr*/>, VEX_4V;
2426 defm VPADDW : PDI_binop_rm<0xFD, "vpaddw", add, v8i16, 1, 0>, VEX_4V;
2427 defm VPADDD : PDI_binop_rm<0xFE, "vpaddd", add, v4i32, 1, 0>, VEX_4V;
2428 defm VPADDQ : PDI_binop_rm_v2i64<0xD4, "vpaddq", add, 1, 0>, VEX_4V;
2429 defm VPMULLW : PDI_binop_rm<0xD5, "vpmullw", mul, v8i16, 1, 0>, VEX_4V;
2430 defm VPSUBB : PDI_binop_rm<0xF8, "vpsubb", sub, v16i8, 0, 0>, VEX_4V;
2431 defm VPSUBW : PDI_binop_rm<0xF9, "vpsubw", sub, v8i16, 0, 0>, VEX_4V;
2432 defm VPSUBD : PDI_binop_rm<0xFA, "vpsubd", sub, v4i32, 0, 0>, VEX_4V;
2433 defm VPSUBQ : PDI_binop_rm_v2i64<0xFB, "vpsubq", sub, 0, 0>, VEX_4V;
2436 defm VPSUBSB : PDI_binop_rm_int<0xE8, "vpsubsb" , int_x86_sse2_psubs_b, 0, 0>,
2438 defm VPSUBSW : PDI_binop_rm_int<0xE9, "vpsubsw" , int_x86_sse2_psubs_w, 0, 0>,
2440 defm VPSUBUSB : PDI_binop_rm_int<0xD8, "vpsubusb", int_x86_sse2_psubus_b, 0, 0>,
2442 defm VPSUBUSW : PDI_binop_rm_int<0xD9, "vpsubusw", int_x86_sse2_psubus_w, 0, 0>,
2444 defm VPADDSB : PDI_binop_rm_int<0xEC, "vpaddsb" , int_x86_sse2_padds_b, 1, 0>,
2446 defm VPADDSW : PDI_binop_rm_int<0xED, "vpaddsw" , int_x86_sse2_padds_w, 1, 0>,
2448 defm VPADDUSB : PDI_binop_rm_int<0xDC, "vpaddusb", int_x86_sse2_paddus_b, 1, 0>,
2450 defm VPADDUSW : PDI_binop_rm_int<0xDD, "vpaddusw", int_x86_sse2_paddus_w, 1, 0>,
2452 defm VPMULHUW : PDI_binop_rm_int<0xE4, "vpmulhuw", int_x86_sse2_pmulhu_w, 1, 0>,
2454 defm VPMULHW : PDI_binop_rm_int<0xE5, "vpmulhw" , int_x86_sse2_pmulh_w, 1, 0>,
2456 defm VPMULUDQ : PDI_binop_rm_int<0xF4, "vpmuludq", int_x86_sse2_pmulu_dq, 1, 0>,
2458 defm VPMADDWD : PDI_binop_rm_int<0xF5, "vpmaddwd", int_x86_sse2_pmadd_wd, 1, 0>,
2460 defm VPAVGB : PDI_binop_rm_int<0xE0, "vpavgb", int_x86_sse2_pavg_b, 1, 0>,
2462 defm VPAVGW : PDI_binop_rm_int<0xE3, "vpavgw", int_x86_sse2_pavg_w, 1, 0>,
2464 defm VPMINUB : PDI_binop_rm_int<0xDA, "vpminub", int_x86_sse2_pminu_b, 1, 0>,
2466 defm VPMINSW : PDI_binop_rm_int<0xEA, "vpminsw", int_x86_sse2_pmins_w, 1, 0>,
2468 defm VPMAXUB : PDI_binop_rm_int<0xDE, "vpmaxub", int_x86_sse2_pmaxu_b, 1, 0>,
2470 defm VPMAXSW : PDI_binop_rm_int<0xEE, "vpmaxsw", int_x86_sse2_pmaxs_w, 1, 0>,
2472 defm VPSADBW : PDI_binop_rm_int<0xF6, "vpsadbw", int_x86_sse2_psad_bw, 1, 0>,
2476 let Constraints = "$src1 = $dst" in {
2477 defm PADDB : PDI_binop_rm<0xFC, "paddb", add, v16i8, 1>;
2478 defm PADDW : PDI_binop_rm<0xFD, "paddw", add, v8i16, 1>;
2479 defm PADDD : PDI_binop_rm<0xFE, "paddd", add, v4i32, 1>;
2480 defm PADDQ : PDI_binop_rm_v2i64<0xD4, "paddq", add, 1>;
2481 defm PMULLW : PDI_binop_rm<0xD5, "pmullw", mul, v8i16, 1>;
2482 defm PSUBB : PDI_binop_rm<0xF8, "psubb", sub, v16i8>;
2483 defm PSUBW : PDI_binop_rm<0xF9, "psubw", sub, v8i16>;
2484 defm PSUBD : PDI_binop_rm<0xFA, "psubd", sub, v4i32>;
2485 defm PSUBQ : PDI_binop_rm_v2i64<0xFB, "psubq", sub>;
2488 defm PSUBSB : PDI_binop_rm_int<0xE8, "psubsb" , int_x86_sse2_psubs_b>;
2489 defm PSUBSW : PDI_binop_rm_int<0xE9, "psubsw" , int_x86_sse2_psubs_w>;
2490 defm PSUBUSB : PDI_binop_rm_int<0xD8, "psubusb", int_x86_sse2_psubus_b>;
2491 defm PSUBUSW : PDI_binop_rm_int<0xD9, "psubusw", int_x86_sse2_psubus_w>;
2492 defm PADDSB : PDI_binop_rm_int<0xEC, "paddsb" , int_x86_sse2_padds_b, 1>;
2493 defm PADDSW : PDI_binop_rm_int<0xED, "paddsw" , int_x86_sse2_padds_w, 1>;
2494 defm PADDUSB : PDI_binop_rm_int<0xDC, "paddusb", int_x86_sse2_paddus_b, 1>;
2495 defm PADDUSW : PDI_binop_rm_int<0xDD, "paddusw", int_x86_sse2_paddus_w, 1>;
2496 defm PMULHUW : PDI_binop_rm_int<0xE4, "pmulhuw", int_x86_sse2_pmulhu_w, 1>;
2497 defm PMULHW : PDI_binop_rm_int<0xE5, "pmulhw" , int_x86_sse2_pmulh_w, 1>;
2498 defm PMULUDQ : PDI_binop_rm_int<0xF4, "pmuludq", int_x86_sse2_pmulu_dq, 1>;
2499 defm PMADDWD : PDI_binop_rm_int<0xF5, "pmaddwd", int_x86_sse2_pmadd_wd, 1>;
2500 defm PAVGB : PDI_binop_rm_int<0xE0, "pavgb", int_x86_sse2_pavg_b, 1>;
2501 defm PAVGW : PDI_binop_rm_int<0xE3, "pavgw", int_x86_sse2_pavg_w, 1>;
2502 defm PMINUB : PDI_binop_rm_int<0xDA, "pminub", int_x86_sse2_pminu_b, 1>;
2503 defm PMINSW : PDI_binop_rm_int<0xEA, "pminsw", int_x86_sse2_pmins_w, 1>;
2504 defm PMAXUB : PDI_binop_rm_int<0xDE, "pmaxub", int_x86_sse2_pmaxu_b, 1>;
2505 defm PMAXSW : PDI_binop_rm_int<0xEE, "pmaxsw", int_x86_sse2_pmaxs_w, 1>;
2506 defm PSADBW : PDI_binop_rm_int<0xF6, "psadbw", int_x86_sse2_psad_bw, 1>;
2508 } // Constraints = "$src1 = $dst"
2510 //===---------------------------------------------------------------------===//
2511 // SSE2 - Packed Integer Logical Instructions
2512 //===---------------------------------------------------------------------===//
2514 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE2] in {
2515 defm VPSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "vpsllw",
2516 int_x86_sse2_psll_w, int_x86_sse2_pslli_w, 0>,
2518 defm VPSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "vpslld",
2519 int_x86_sse2_psll_d, int_x86_sse2_pslli_d, 0>,
2521 defm VPSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "vpsllq",
2522 int_x86_sse2_psll_q, int_x86_sse2_pslli_q, 0>,
2525 defm VPSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "vpsrlw",
2526 int_x86_sse2_psrl_w, int_x86_sse2_psrli_w, 0>,
2528 defm VPSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "vpsrld",
2529 int_x86_sse2_psrl_d, int_x86_sse2_psrli_d, 0>,
2531 defm VPSRLQ : PDI_binop_rmi_int<0xD3, 0x73, MRM2r, "vpsrlq",
2532 int_x86_sse2_psrl_q, int_x86_sse2_psrli_q, 0>,
2535 defm VPSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "vpsraw",
2536 int_x86_sse2_psra_w, int_x86_sse2_psrai_w, 0>,
2538 defm VPSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "vpsrad",
2539 int_x86_sse2_psra_d, int_x86_sse2_psrai_d, 0>,
2542 defm VPAND : PDI_binop_rm_v2i64<0xDB, "vpand", and, 1, 0>, VEX_4V;
2543 defm VPOR : PDI_binop_rm_v2i64<0xEB, "vpor" , or, 1, 0>, VEX_4V;
2544 defm VPXOR : PDI_binop_rm_v2i64<0xEF, "vpxor", xor, 1, 0>, VEX_4V;
2546 let ExeDomain = SSEPackedInt in {
2547 let neverHasSideEffects = 1 in {
2548 // 128-bit logical shifts.
2549 def VPSLLDQri : PDIi8<0x73, MRM7r,
2550 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2551 "vpslldq\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
2553 def VPSRLDQri : PDIi8<0x73, MRM3r,
2554 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2555 "vpsrldq\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
2557 // PSRADQri doesn't exist in SSE[1-3].
2559 def VPANDNrr : PDI<0xDF, MRMSrcReg,
2560 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2561 "vpandn\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2562 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
2563 VR128:$src2)))]>, VEX_4V;
2565 def VPANDNrm : PDI<0xDF, MRMSrcMem,
2566 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2567 "vpandn\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2568 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
2569 (memopv2i64 addr:$src2))))]>,
2574 let Constraints = "$src1 = $dst" in {
2575 defm PSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "psllw",
2576 int_x86_sse2_psll_w, int_x86_sse2_pslli_w>;
2577 defm PSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "pslld",
2578 int_x86_sse2_psll_d, int_x86_sse2_pslli_d>;
2579 defm PSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "psllq",
2580 int_x86_sse2_psll_q, int_x86_sse2_pslli_q>;
2582 defm PSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "psrlw",
2583 int_x86_sse2_psrl_w, int_x86_sse2_psrli_w>;
2584 defm PSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "psrld",
2585 int_x86_sse2_psrl_d, int_x86_sse2_psrli_d>;
2586 defm PSRLQ : PDI_binop_rmi_int<0xD3, 0x73, MRM2r, "psrlq",
2587 int_x86_sse2_psrl_q, int_x86_sse2_psrli_q>;
2589 defm PSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "psraw",
2590 int_x86_sse2_psra_w, int_x86_sse2_psrai_w>;
2591 defm PSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "psrad",
2592 int_x86_sse2_psra_d, int_x86_sse2_psrai_d>;
2594 defm PAND : PDI_binop_rm_v2i64<0xDB, "pand", and, 1>;
2595 defm POR : PDI_binop_rm_v2i64<0xEB, "por" , or, 1>;
2596 defm PXOR : PDI_binop_rm_v2i64<0xEF, "pxor", xor, 1>;
2598 let ExeDomain = SSEPackedInt in {
2599 let neverHasSideEffects = 1 in {
2600 // 128-bit logical shifts.
2601 def PSLLDQri : PDIi8<0x73, MRM7r,
2602 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2603 "pslldq\t{$src2, $dst|$dst, $src2}", []>;
2604 def PSRLDQri : PDIi8<0x73, MRM3r,
2605 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2606 "psrldq\t{$src2, $dst|$dst, $src2}", []>;
2607 // PSRADQri doesn't exist in SSE[1-3].
2609 def PANDNrr : PDI<0xDF, MRMSrcReg,
2610 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2611 "pandn\t{$src2, $dst|$dst, $src2}",
2612 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
2615 def PANDNrm : PDI<0xDF, MRMSrcMem,
2616 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2617 "pandn\t{$src2, $dst|$dst, $src2}",
2618 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
2619 (memopv2i64 addr:$src2))))]>;
2621 } // Constraints = "$src1 = $dst"
2623 let Predicates = [HasSSE2] in {
2624 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
2625 (v2i64 (PSLLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2626 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
2627 (v2i64 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2628 def : Pat<(int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2),
2629 (v2i64 (PSLLDQri VR128:$src1, imm:$src2))>;
2630 def : Pat<(int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2),
2631 (v2i64 (PSRLDQri VR128:$src1, imm:$src2))>;
2632 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
2633 (v2f64 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2635 // Shift up / down and insert zero's.
2636 def : Pat<(v2i64 (X86vshl VR128:$src, (i8 imm:$amt))),
2637 (v2i64 (PSLLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2638 def : Pat<(v2i64 (X86vshr VR128:$src, (i8 imm:$amt))),
2639 (v2i64 (PSRLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2642 //===---------------------------------------------------------------------===//
2643 // SSE2 - Packed Integer Comparison Instructions
2644 //===---------------------------------------------------------------------===//
2646 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE2] in {
2647 defm VPCMPEQB : PDI_binop_rm_int<0x74, "vpcmpeqb", int_x86_sse2_pcmpeq_b, 1,
2649 defm VPCMPEQW : PDI_binop_rm_int<0x75, "vpcmpeqw", int_x86_sse2_pcmpeq_w, 1,
2651 defm VPCMPEQD : PDI_binop_rm_int<0x76, "vpcmpeqd", int_x86_sse2_pcmpeq_d, 1,
2653 defm VPCMPGTB : PDI_binop_rm_int<0x64, "vpcmpgtb", int_x86_sse2_pcmpgt_b, 0,
2655 defm VPCMPGTW : PDI_binop_rm_int<0x65, "vpcmpgtw", int_x86_sse2_pcmpgt_w, 0,
2657 defm VPCMPGTD : PDI_binop_rm_int<0x66, "vpcmpgtd", int_x86_sse2_pcmpgt_d, 0,
2661 let Constraints = "$src1 = $dst" in {
2662 defm PCMPEQB : PDI_binop_rm_int<0x74, "pcmpeqb", int_x86_sse2_pcmpeq_b, 1>;
2663 defm PCMPEQW : PDI_binop_rm_int<0x75, "pcmpeqw", int_x86_sse2_pcmpeq_w, 1>;
2664 defm PCMPEQD : PDI_binop_rm_int<0x76, "pcmpeqd", int_x86_sse2_pcmpeq_d, 1>;
2665 defm PCMPGTB : PDI_binop_rm_int<0x64, "pcmpgtb", int_x86_sse2_pcmpgt_b>;
2666 defm PCMPGTW : PDI_binop_rm_int<0x65, "pcmpgtw", int_x86_sse2_pcmpgt_w>;
2667 defm PCMPGTD : PDI_binop_rm_int<0x66, "pcmpgtd", int_x86_sse2_pcmpgt_d>;
2668 } // Constraints = "$src1 = $dst"
2670 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, VR128:$src2)),
2671 (PCMPEQBrr VR128:$src1, VR128:$src2)>;
2672 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, (memop addr:$src2))),
2673 (PCMPEQBrm VR128:$src1, addr:$src2)>;
2674 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, VR128:$src2)),
2675 (PCMPEQWrr VR128:$src1, VR128:$src2)>;
2676 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, (memop addr:$src2))),
2677 (PCMPEQWrm VR128:$src1, addr:$src2)>;
2678 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, VR128:$src2)),
2679 (PCMPEQDrr VR128:$src1, VR128:$src2)>;
2680 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, (memop addr:$src2))),
2681 (PCMPEQDrm VR128:$src1, addr:$src2)>;
2683 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, VR128:$src2)),
2684 (PCMPGTBrr VR128:$src1, VR128:$src2)>;
2685 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, (memop addr:$src2))),
2686 (PCMPGTBrm VR128:$src1, addr:$src2)>;
2687 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, VR128:$src2)),
2688 (PCMPGTWrr VR128:$src1, VR128:$src2)>;
2689 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, (memop addr:$src2))),
2690 (PCMPGTWrm VR128:$src1, addr:$src2)>;
2691 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, VR128:$src2)),
2692 (PCMPGTDrr VR128:$src1, VR128:$src2)>;
2693 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, (memop addr:$src2))),
2694 (PCMPGTDrm VR128:$src1, addr:$src2)>;
2696 //===---------------------------------------------------------------------===//
2697 // SSE2 - Packed Integer Pack Instructions
2698 //===---------------------------------------------------------------------===//
2700 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE2] in {
2701 defm VPACKSSWB : PDI_binop_rm_int<0x63, "vpacksswb", int_x86_sse2_packsswb_128,
2703 defm VPACKSSDW : PDI_binop_rm_int<0x6B, "vpackssdw", int_x86_sse2_packssdw_128,
2705 defm VPACKUSWB : PDI_binop_rm_int<0x67, "vpackuswb", int_x86_sse2_packuswb_128,
2709 let Constraints = "$src1 = $dst" in {
2710 defm PACKSSWB : PDI_binop_rm_int<0x63, "packsswb", int_x86_sse2_packsswb_128>;
2711 defm PACKSSDW : PDI_binop_rm_int<0x6B, "packssdw", int_x86_sse2_packssdw_128>;
2712 defm PACKUSWB : PDI_binop_rm_int<0x67, "packuswb", int_x86_sse2_packuswb_128>;
2713 } // Constraints = "$src1 = $dst"
2715 //===---------------------------------------------------------------------===//
2716 // SSE2 - Packed Integer Shuffle Instructions
2717 //===---------------------------------------------------------------------===//
2719 let ExeDomain = SSEPackedInt in {
2720 multiclass sse2_pshuffle<string OpcodeStr, ValueType vt, PatFrag pshuf_frag,
2722 def ri : Ii8<0x70, MRMSrcReg,
2723 (outs VR128:$dst), (ins VR128:$src1, i8imm:$src2),
2724 !strconcat(OpcodeStr,
2725 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2726 [(set VR128:$dst, (vt (pshuf_frag:$src2 VR128:$src1,
2728 def mi : Ii8<0x70, MRMSrcMem,
2729 (outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
2730 !strconcat(OpcodeStr,
2731 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2732 [(set VR128:$dst, (vt (pshuf_frag:$src2
2733 (bc_frag (memopv2i64 addr:$src1)),
2736 } // ExeDomain = SSEPackedInt
2738 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE2] in {
2739 let AddedComplexity = 5 in
2740 defm VPSHUFD : sse2_pshuffle<"vpshufd", v4i32, pshufd, bc_v4i32>, OpSize,
2743 // SSE2 with ImmT == Imm8 and XS prefix.
2744 defm VPSHUFHW : sse2_pshuffle<"vpshufhw", v8i16, pshufhw, bc_v8i16>, XS,
2747 // SSE2 with ImmT == Imm8 and XD prefix.
2748 defm VPSHUFLW : sse2_pshuffle<"vpshuflw", v8i16, pshuflw, bc_v8i16>, XD,
2752 let Predicates = [HasSSE2] in {
2753 let AddedComplexity = 5 in
2754 defm PSHUFD : sse2_pshuffle<"pshufd", v4i32, pshufd, bc_v4i32>, TB, OpSize;
2756 // SSE2 with ImmT == Imm8 and XS prefix.
2757 defm PSHUFHW : sse2_pshuffle<"pshufhw", v8i16, pshufhw, bc_v8i16>, XS;
2759 // SSE2 with ImmT == Imm8 and XD prefix.
2760 defm PSHUFLW : sse2_pshuffle<"pshuflw", v8i16, pshuflw, bc_v8i16>, XD;
2763 //===---------------------------------------------------------------------===//
2764 // SSE2 - Packed Integer Unpack Instructions
2765 //===---------------------------------------------------------------------===//
2767 let ExeDomain = SSEPackedInt in {
2768 multiclass sse2_unpack<bits<8> opc, string OpcodeStr, ValueType vt,
2769 PatFrag unp_frag, PatFrag bc_frag, bit Is2Addr = 1> {
2770 def rr : PDI<opc, MRMSrcReg,
2771 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2773 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
2774 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2775 [(set VR128:$dst, (vt (unp_frag VR128:$src1, VR128:$src2)))]>;
2776 def rm : PDI<opc, MRMSrcMem,
2777 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2779 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
2780 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2781 [(set VR128:$dst, (unp_frag VR128:$src1,
2782 (bc_frag (memopv2i64
2786 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE2] in {
2787 defm VPUNPCKLBW : sse2_unpack<0x60, "vpunpcklbw", v16i8, unpckl, bc_v16i8,
2789 defm VPUNPCKLWD : sse2_unpack<0x61, "vpunpcklwd", v8i16, unpckl, bc_v8i16,
2791 defm VPUNPCKLDQ : sse2_unpack<0x62, "vpunpckldq", v4i32, unpckl, bc_v4i32,
2794 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2795 /// knew to collapse (bitconvert VT to VT) into its operand.
2796 def VPUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
2797 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2798 "vpunpcklqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2800 (v2i64 (unpckl VR128:$src1, VR128:$src2)))]>, VEX_4V;
2801 def VPUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
2802 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2803 "vpunpcklqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2805 (v2i64 (unpckl VR128:$src1,
2806 (memopv2i64 addr:$src2))))]>, VEX_4V;
2808 defm VPUNPCKHBW : sse2_unpack<0x68, "vpunpckhbw", v16i8, unpckh, bc_v16i8,
2810 defm VPUNPCKHWD : sse2_unpack<0x69, "vpunpckhwd", v8i16, unpckh, bc_v8i16,
2812 defm VPUNPCKHDQ : sse2_unpack<0x6A, "vpunpckhdq", v4i32, unpckh, bc_v4i32,
2815 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2816 /// knew to collapse (bitconvert VT to VT) into its operand.
2817 def VPUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
2818 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2819 "vpunpckhqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2821 (v2i64 (unpckh VR128:$src1, VR128:$src2)))]>, VEX_4V;
2822 def VPUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
2823 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2824 "vpunpckhqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2826 (v2i64 (unpckh VR128:$src1,
2827 (memopv2i64 addr:$src2))))]>, VEX_4V;
2830 let Constraints = "$src1 = $dst" in {
2831 defm PUNPCKLBW : sse2_unpack<0x60, "punpcklbw", v16i8, unpckl, bc_v16i8>;
2832 defm PUNPCKLWD : sse2_unpack<0x61, "punpcklwd", v8i16, unpckl, bc_v8i16>;
2833 defm PUNPCKLDQ : sse2_unpack<0x62, "punpckldq", v4i32, unpckl, bc_v4i32>;
2835 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2836 /// knew to collapse (bitconvert VT to VT) into its operand.
2837 def PUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
2838 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2839 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
2841 (v2i64 (unpckl VR128:$src1, VR128:$src2)))]>;
2842 def PUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
2843 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2844 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
2846 (v2i64 (unpckl VR128:$src1,
2847 (memopv2i64 addr:$src2))))]>;
2849 defm PUNPCKHBW : sse2_unpack<0x68, "punpckhbw", v16i8, unpckh, bc_v16i8>;
2850 defm PUNPCKHWD : sse2_unpack<0x69, "punpckhwd", v8i16, unpckh, bc_v8i16>;
2851 defm PUNPCKHDQ : sse2_unpack<0x6A, "punpckhdq", v4i32, unpckh, bc_v4i32>;
2853 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2854 /// knew to collapse (bitconvert VT to VT) into its operand.
2855 def PUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
2856 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2857 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
2859 (v2i64 (unpckh VR128:$src1, VR128:$src2)))]>;
2860 def PUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
2861 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2862 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
2864 (v2i64 (unpckh VR128:$src1,
2865 (memopv2i64 addr:$src2))))]>;
2868 } // ExeDomain = SSEPackedInt
2870 //===---------------------------------------------------------------------===//
2871 // SSE2 - Packed Integer Extract and Insert
2872 //===---------------------------------------------------------------------===//
2874 let ExeDomain = SSEPackedInt in {
2875 multiclass sse2_pinsrw<bit Is2Addr = 1> {
2876 def rri : Ii8<0xC4, MRMSrcReg,
2877 (outs VR128:$dst), (ins VR128:$src1,
2878 GR32:$src2, i32i8imm:$src3),
2880 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2881 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
2883 (X86pinsrw VR128:$src1, GR32:$src2, imm:$src3))]>;
2884 def rmi : Ii8<0xC4, MRMSrcMem,
2885 (outs VR128:$dst), (ins VR128:$src1,
2886 i16mem:$src2, i32i8imm:$src3),
2888 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2889 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
2891 (X86pinsrw VR128:$src1, (extloadi16 addr:$src2),
2896 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE2] in
2897 def VPEXTRWri : Ii8<0xC5, MRMSrcReg,
2898 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
2899 "vpextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2900 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
2901 imm:$src2))]>, OpSize, VEX;
2902 def PEXTRWri : PDIi8<0xC5, MRMSrcReg,
2903 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
2904 "pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2905 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
2909 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE2] in
2910 defm PINSRW : sse2_pinsrw<0>, OpSize, VEX_4V;
2912 let Constraints = "$src1 = $dst" in
2913 defm VPINSRW : sse2_pinsrw, TB, OpSize;
2915 } // ExeDomain = SSEPackedInt
2917 //===---------------------------------------------------------------------===//
2918 // SSE2 - Packed Mask Creation
2919 //===---------------------------------------------------------------------===//
2921 let ExeDomain = SSEPackedInt in {
2923 let isAsmParserOnly = 1 in
2924 def VPMOVMSKBrr : VPDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
2925 "pmovmskb\t{$src, $dst|$dst, $src}",
2926 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>, VEX;
2927 def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
2928 "pmovmskb\t{$src, $dst|$dst, $src}",
2929 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>;
2931 } // ExeDomain = SSEPackedInt
2933 //===---------------------------------------------------------------------===//
2934 // SSE2 - Conditional Store
2935 //===---------------------------------------------------------------------===//
2937 let ExeDomain = SSEPackedInt in {
2939 let isAsmParserOnly = 1 in {
2941 def VMASKMOVDQU : VPDI<0xF7, MRMSrcReg, (outs),
2942 (ins VR128:$src, VR128:$mask),
2943 "maskmovdqu\t{$mask, $src|$src, $mask}",
2944 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>, VEX;
2946 def VMASKMOVDQU64 : VPDI<0xF7, MRMSrcReg, (outs),
2947 (ins VR128:$src, VR128:$mask),
2948 "maskmovdqu\t{$mask, $src|$src, $mask}",
2949 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>, VEX;
2953 def MASKMOVDQU : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
2954 "maskmovdqu\t{$mask, $src|$src, $mask}",
2955 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>;
2957 def MASKMOVDQU64 : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
2958 "maskmovdqu\t{$mask, $src|$src, $mask}",
2959 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>;
2961 } // ExeDomain = SSEPackedInt
2963 //===---------------------------------------------------------------------===//
2964 // SSE2 - Move Doubleword
2965 //===---------------------------------------------------------------------===//
2967 // Move Int Doubleword to Packed Double Int
2968 let isAsmParserOnly = 1 in {
2969 def VMOVDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
2970 "movd\t{$src, $dst|$dst, $src}",
2972 (v4i32 (scalar_to_vector GR32:$src)))]>, VEX;
2973 def VMOVDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
2974 "movd\t{$src, $dst|$dst, $src}",
2976 (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>,
2979 def MOVDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
2980 "movd\t{$src, $dst|$dst, $src}",
2982 (v4i32 (scalar_to_vector GR32:$src)))]>;
2983 def MOVDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
2984 "movd\t{$src, $dst|$dst, $src}",
2986 (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>;
2989 // Move Int Doubleword to Single Scalar
2990 let isAsmParserOnly = 1 in {
2991 def VMOVDI2SSrr : VPDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
2992 "movd\t{$src, $dst|$dst, $src}",
2993 [(set FR32:$dst, (bitconvert GR32:$src))]>, VEX;
2995 def VMOVDI2SSrm : VPDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
2996 "movd\t{$src, $dst|$dst, $src}",
2997 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>,
3000 def MOVDI2SSrr : PDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
3001 "movd\t{$src, $dst|$dst, $src}",
3002 [(set FR32:$dst, (bitconvert GR32:$src))]>;
3004 def MOVDI2SSrm : PDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
3005 "movd\t{$src, $dst|$dst, $src}",
3006 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>;
3008 // Move Packed Doubleword Int to Packed Double Int
3009 let isAsmParserOnly = 1 in {
3010 def VMOVPDI2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
3011 "movd\t{$src, $dst|$dst, $src}",
3012 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
3014 def VMOVPDI2DImr : VPDI<0x7E, MRMDestMem, (outs),
3015 (ins i32mem:$dst, VR128:$src),
3016 "movd\t{$src, $dst|$dst, $src}",
3017 [(store (i32 (vector_extract (v4i32 VR128:$src),
3018 (iPTR 0))), addr:$dst)]>, VEX;
3020 def MOVPDI2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
3021 "movd\t{$src, $dst|$dst, $src}",
3022 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
3024 def MOVPDI2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src),
3025 "movd\t{$src, $dst|$dst, $src}",
3026 [(store (i32 (vector_extract (v4i32 VR128:$src),
3027 (iPTR 0))), addr:$dst)]>;
3029 // Move Scalar Single to Double Int
3030 let isAsmParserOnly = 1 in {
3031 def VMOVSS2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
3032 "movd\t{$src, $dst|$dst, $src}",
3033 [(set GR32:$dst, (bitconvert FR32:$src))]>, VEX;
3034 def VMOVSS2DImr : VPDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
3035 "movd\t{$src, $dst|$dst, $src}",
3036 [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>, VEX;
3038 def MOVSS2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
3039 "movd\t{$src, $dst|$dst, $src}",
3040 [(set GR32:$dst, (bitconvert FR32:$src))]>;
3041 def MOVSS2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
3042 "movd\t{$src, $dst|$dst, $src}",
3043 [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>;
3045 // movd / movq to XMM register zero-extends
3046 let AddedComplexity = 15, isAsmParserOnly = 1 in {
3047 def VMOVZDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
3048 "movd\t{$src, $dst|$dst, $src}",
3049 [(set VR128:$dst, (v4i32 (X86vzmovl
3050 (v4i32 (scalar_to_vector GR32:$src)))))]>,
3052 def VMOVZQI2PQIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3053 "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
3054 [(set VR128:$dst, (v2i64 (X86vzmovl
3055 (v2i64 (scalar_to_vector GR64:$src)))))]>,
3058 let AddedComplexity = 15 in {
3059 def MOVZDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
3060 "movd\t{$src, $dst|$dst, $src}",
3061 [(set VR128:$dst, (v4i32 (X86vzmovl
3062 (v4i32 (scalar_to_vector GR32:$src)))))]>;
3063 def MOVZQI2PQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3064 "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
3065 [(set VR128:$dst, (v2i64 (X86vzmovl
3066 (v2i64 (scalar_to_vector GR64:$src)))))]>;
3069 let AddedComplexity = 20 in {
3070 let isAsmParserOnly = 1 in
3071 def VMOVZDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
3072 "movd\t{$src, $dst|$dst, $src}",
3074 (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
3075 (loadi32 addr:$src))))))]>,
3077 def MOVZDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
3078 "movd\t{$src, $dst|$dst, $src}",
3080 (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
3081 (loadi32 addr:$src))))))]>;
3083 def : Pat<(v4i32 (X86vzmovl (loadv4i32 addr:$src))),
3084 (MOVZDI2PDIrm addr:$src)>;
3085 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
3086 (MOVZDI2PDIrm addr:$src)>;
3087 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
3088 (MOVZDI2PDIrm addr:$src)>;
3091 //===---------------------------------------------------------------------===//
3092 // SSE2 - Move Quadword
3093 //===---------------------------------------------------------------------===//
3095 // Move Quadword Int to Packed Quadword Int
3096 let isAsmParserOnly = 1 in
3097 def VMOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3098 "vmovq\t{$src, $dst|$dst, $src}",
3100 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
3101 VEX, Requires<[HasAVX, HasSSE2]>;
3102 def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3103 "movq\t{$src, $dst|$dst, $src}",
3105 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
3106 Requires<[HasSSE2]>; // SSE2 instruction with XS Prefix
3108 // Move Packed Quadword Int to Quadword Int
3109 let isAsmParserOnly = 1 in
3110 def VMOVPQI2QImr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3111 "movq\t{$src, $dst|$dst, $src}",
3112 [(store (i64 (vector_extract (v2i64 VR128:$src),
3113 (iPTR 0))), addr:$dst)]>, VEX;
3114 def MOVPQI2QImr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3115 "movq\t{$src, $dst|$dst, $src}",
3116 [(store (i64 (vector_extract (v2i64 VR128:$src),
3117 (iPTR 0))), addr:$dst)]>;
3119 def : Pat<(f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
3120 (f64 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
3122 // Store / copy lower 64-bits of a XMM register.
3123 let isAsmParserOnly = 1 in
3124 def VMOVLQ128mr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3125 "movq\t{$src, $dst|$dst, $src}",
3126 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>, VEX;
3127 def MOVLQ128mr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3128 "movq\t{$src, $dst|$dst, $src}",
3129 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>;
3131 let AddedComplexity = 20, isAsmParserOnly = 1 in
3132 def VMOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3133 "vmovq\t{$src, $dst|$dst, $src}",
3135 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
3136 (loadi64 addr:$src))))))]>,
3137 XS, VEX, Requires<[HasAVX, HasSSE2]>;
3139 let AddedComplexity = 20 in {
3140 def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3141 "movq\t{$src, $dst|$dst, $src}",
3143 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
3144 (loadi64 addr:$src))))))]>,
3145 XS, Requires<[HasSSE2]>;
3147 def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
3148 (MOVZQI2PQIrm addr:$src)>;
3149 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4f32 addr:$src)))),
3150 (MOVZQI2PQIrm addr:$src)>;
3151 def : Pat<(v2i64 (X86vzload addr:$src)), (MOVZQI2PQIrm addr:$src)>;
3154 // Moving from XMM to XMM and clear upper 64 bits. Note, there is a bug in
3155 // IA32 document. movq xmm1, xmm2 does clear the high bits.
3156 let isAsmParserOnly = 1, AddedComplexity = 15 in
3157 def VMOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3158 "vmovq\t{$src, $dst|$dst, $src}",
3159 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
3160 XS, VEX, Requires<[HasAVX, HasSSE2]>;
3161 let AddedComplexity = 15 in
3162 def MOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3163 "movq\t{$src, $dst|$dst, $src}",
3164 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
3165 XS, Requires<[HasSSE2]>;
3167 let AddedComplexity = 20, isAsmParserOnly = 1 in
3168 def VMOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3169 "vmovq\t{$src, $dst|$dst, $src}",
3170 [(set VR128:$dst, (v2i64 (X86vzmovl
3171 (loadv2i64 addr:$src))))]>,
3172 XS, VEX, Requires<[HasAVX, HasSSE2]>;
3173 let AddedComplexity = 20 in {
3174 def MOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3175 "movq\t{$src, $dst|$dst, $src}",
3176 [(set VR128:$dst, (v2i64 (X86vzmovl
3177 (loadv2i64 addr:$src))))]>,
3178 XS, Requires<[HasSSE2]>;
3180 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4i32 addr:$src)))),
3181 (MOVZPQILo2PQIrm addr:$src)>;
3184 // Instructions to match in the assembler
3185 let isAsmParserOnly = 1 in {
3186 // This instructions is in fact an alias to movd with 64 bit dst
3187 def VMOVQs64rr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3188 "movq\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
3189 def VMOVQd64rr : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
3190 "movq\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
3193 // Instructions for the disassembler
3194 // xr = XMM register
3197 let isAsmParserOnly = 1 in
3198 def VMOVQxrxr: I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3199 "vmovq\t{$src, $dst|$dst, $src}", []>, VEX, XS;
3200 def MOVQxrxr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3201 "movq\t{$src, $dst|$dst, $src}", []>, XS;
3203 //===---------------------------------------------------------------------===//
3204 // SSE2 - Misc Instructions
3205 //===---------------------------------------------------------------------===//
3208 def CLFLUSH : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
3209 "clflush\t$src", [(int_x86_sse2_clflush addr:$src)]>,
3210 TB, Requires<[HasSSE2]>;
3212 // Load, store, and memory fence
3213 def LFENCE : I<0xAE, MRM_E8, (outs), (ins),
3214 "lfence", [(int_x86_sse2_lfence)]>, TB, Requires<[HasSSE2]>;
3215 def MFENCE : I<0xAE, MRM_F0, (outs), (ins),
3216 "mfence", [(int_x86_sse2_mfence)]>, TB, Requires<[HasSSE2]>;
3218 // Pause. This "instruction" is encoded as "rep; nop", so even though it
3219 // was introduced with SSE2, it's backward compatible.
3220 def PAUSE : I<0x90, RawFrm, (outs), (ins), "pause", []>, REP;
3222 //TODO: custom lower this so as to never even generate the noop
3223 def : Pat<(membarrier (i8 imm), (i8 imm), (i8 imm), (i8 imm),
3225 def : Pat<(membarrier (i8 0), (i8 0), (i8 0), (i8 1), (i8 1)), (SFENCE)>;
3226 def : Pat<(membarrier (i8 1), (i8 0), (i8 0), (i8 0), (i8 1)), (LFENCE)>;
3227 def : Pat<(membarrier (i8 imm), (i8 imm), (i8 imm), (i8 imm),
3230 // Alias instructions that map zero vector to pxor / xorp* for sse.
3231 // We set canFoldAsLoad because this can be converted to a constant-pool
3232 // load of an all-ones value if folding it would be beneficial.
3233 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
3234 isCodeGenOnly = 1, ExeDomain = SSEPackedInt in
3235 // FIXME: Change encoding to pseudo.
3236 def V_SETALLONES : PDI<0x76, MRMInitReg, (outs VR128:$dst), (ins), "",
3237 [(set VR128:$dst, (v4i32 immAllOnesV))]>;
3239 //===---------------------------------------------------------------------===//
3240 // SSE3 - Conversion Instructions
3241 //===---------------------------------------------------------------------===//
3243 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE3] in {
3244 def VCVTPD2DQrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3245 "vcvtpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
3246 def VCVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3247 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3248 def VCVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3249 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3252 def CVTPD2DQrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3253 "cvtpd2dq\t{$src, $dst|$dst, $src}", []>;
3254 def CVTPD2DQrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3255 "cvtpd2dq\t{$src, $dst|$dst, $src}", []>;
3256 def CVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3257 "cvtdq2pd\t{$src, $dst|$dst, $src}", []>;
3258 def CVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3259 "cvtdq2pd\t{$src, $dst|$dst, $src}", []>;
3261 //===---------------------------------------------------------------------===//
3262 // SSE3 - Move Instructions
3263 //===---------------------------------------------------------------------===//
3265 // Replicate Single FP
3266 multiclass sse3_replicate_sfp<bits<8> op, PatFrag rep_frag, string OpcodeStr> {
3267 def rr : S3SI<op, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3268 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3269 [(set VR128:$dst, (v4f32 (rep_frag
3270 VR128:$src, (undef))))]>;
3271 def rm : S3SI<op, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3272 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3273 [(set VR128:$dst, (rep_frag
3274 (memopv4f32 addr:$src), (undef)))]>;
3277 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE3] in {
3278 defm VMOVSHDUP : sse3_replicate_sfp<0x16, movshdup, "vmovshdup">, VEX;
3279 defm VMOVSLDUP : sse3_replicate_sfp<0x12, movsldup, "vmovsldup">, VEX;
3281 defm MOVSHDUP : sse3_replicate_sfp<0x16, movshdup, "movshdup">;
3282 defm MOVSLDUP : sse3_replicate_sfp<0x12, movsldup, "movsldup">;
3284 // Replicate Double FP
3285 multiclass sse3_replicate_dfp<string OpcodeStr> {
3286 def rr : S3DI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3287 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3288 [(set VR128:$dst,(v2f64 (movddup VR128:$src, (undef))))]>;
3289 def rm : S3DI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
3290 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3292 (v2f64 (movddup (scalar_to_vector (loadf64 addr:$src)),
3296 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE3] in
3297 defm VMOVDDUP : sse3_replicate_dfp<"vmovddup">, VEX;
3298 defm MOVDDUP : sse3_replicate_dfp<"movddup">;
3300 // Move Unaligned Integer
3301 let isAsmParserOnly = 1 in
3302 def VLDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3303 "vlddqu\t{$src, $dst|$dst, $src}",
3304 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>, VEX;
3305 def LDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3306 "lddqu\t{$src, $dst|$dst, $src}",
3307 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>;
3309 def : Pat<(movddup (bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src)))),
3311 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3313 // Several Move patterns
3314 let AddedComplexity = 5 in {
3315 def : Pat<(movddup (memopv2f64 addr:$src), (undef)),
3316 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3317 def : Pat<(movddup (bc_v4f32 (memopv2f64 addr:$src)), (undef)),
3318 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3319 def : Pat<(movddup (memopv2i64 addr:$src), (undef)),
3320 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3321 def : Pat<(movddup (bc_v4i32 (memopv2i64 addr:$src)), (undef)),
3322 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3325 // vector_shuffle v1, <undef> <1, 1, 3, 3>
3326 let AddedComplexity = 15 in
3327 def : Pat<(v4i32 (movshdup VR128:$src, (undef))),
3328 (MOVSHDUPrr VR128:$src)>, Requires<[HasSSE3]>;
3329 let AddedComplexity = 20 in
3330 def : Pat<(v4i32 (movshdup (bc_v4i32 (memopv2i64 addr:$src)), (undef))),
3331 (MOVSHDUPrm addr:$src)>, Requires<[HasSSE3]>;
3333 // vector_shuffle v1, <undef> <0, 0, 2, 2>
3334 let AddedComplexity = 15 in
3335 def : Pat<(v4i32 (movsldup VR128:$src, (undef))),
3336 (MOVSLDUPrr VR128:$src)>, Requires<[HasSSE3]>;
3337 let AddedComplexity = 20 in
3338 def : Pat<(v4i32 (movsldup (bc_v4i32 (memopv2i64 addr:$src)), (undef))),
3339 (MOVSLDUPrm addr:$src)>, Requires<[HasSSE3]>;
3341 //===---------------------------------------------------------------------===//
3342 // SSE3 - Arithmetic
3343 //===---------------------------------------------------------------------===//
3345 multiclass sse3_addsub<Intrinsic Int, string OpcodeStr, bit Is2Addr = 1> {
3346 def rr : I<0xD0, MRMSrcReg,
3347 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
3349 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3350 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3351 [(set VR128:$dst, (Int VR128:$src1,
3353 def rm : I<0xD0, MRMSrcMem,
3354 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
3356 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3357 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3358 [(set VR128:$dst, (Int VR128:$src1,
3359 (memop addr:$src2)))]>;
3363 let isAsmParserOnly = 1, Predicates = [HasSSE3, HasAVX],
3364 ExeDomain = SSEPackedDouble in {
3365 defm VADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "vaddsubps", 0>, XD,
3367 defm VADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "vaddsubpd", 0>, OpSize,
3370 let Constraints = "$src1 = $dst", Predicates = [HasSSE3],
3371 ExeDomain = SSEPackedDouble in {
3372 defm ADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "addsubps">, XD;
3373 defm ADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "addsubpd">, TB, OpSize;
3376 //===---------------------------------------------------------------------===//
3377 // SSE3 Instructions
3378 //===---------------------------------------------------------------------===//
3381 class S3D_Intrr<bits<8> o, string OpcodeStr, Intrinsic IntId, bit Is2Addr = 1>
3382 : S3DI<o, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
3384 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3385 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3386 [(set VR128:$dst, (v4f32 (IntId VR128:$src1, VR128:$src2)))]>;
3387 class S3D_Intrm<bits<8> o, string OpcodeStr, Intrinsic IntId, bit Is2Addr = 1>
3388 : S3DI<o, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
3390 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3391 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3392 [(set VR128:$dst, (v4f32 (IntId VR128:$src1, (memop addr:$src2))))]>;
3393 class S3_Intrr<bits<8> o, string OpcodeStr, Intrinsic IntId, bit Is2Addr = 1>
3394 : S3I<o, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
3396 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3397 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3398 [(set VR128:$dst, (v2f64 (IntId VR128:$src1, VR128:$src2)))]>;
3399 class S3_Intrm<bits<8> o, string OpcodeStr, Intrinsic IntId, bit Is2Addr = 1>
3400 : S3I<o, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
3402 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3403 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3404 [(set VR128:$dst, (v2f64 (IntId VR128:$src1, (memopv2f64 addr:$src2))))]>;
3406 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE3] in {
3407 def VHADDPSrr : S3D_Intrr<0x7C, "vhaddps", int_x86_sse3_hadd_ps, 0>, VEX_4V;
3408 def VHADDPSrm : S3D_Intrm<0x7C, "vhaddps", int_x86_sse3_hadd_ps, 0>, VEX_4V;
3409 def VHADDPDrr : S3_Intrr <0x7C, "vhaddpd", int_x86_sse3_hadd_pd, 0>, VEX_4V;
3410 def VHADDPDrm : S3_Intrm <0x7C, "vhaddpd", int_x86_sse3_hadd_pd, 0>, VEX_4V;
3411 def VHSUBPSrr : S3D_Intrr<0x7D, "vhsubps", int_x86_sse3_hsub_ps, 0>, VEX_4V;
3412 def VHSUBPSrm : S3D_Intrm<0x7D, "vhsubps", int_x86_sse3_hsub_ps, 0>, VEX_4V;
3413 def VHSUBPDrr : S3_Intrr <0x7D, "vhsubpd", int_x86_sse3_hsub_pd, 0>, VEX_4V;
3414 def VHSUBPDrm : S3_Intrm <0x7D, "vhsubpd", int_x86_sse3_hsub_pd, 0>, VEX_4V;
3417 let Constraints = "$src1 = $dst" in {
3418 def HADDPSrr : S3D_Intrr<0x7C, "haddps", int_x86_sse3_hadd_ps>;
3419 def HADDPSrm : S3D_Intrm<0x7C, "haddps", int_x86_sse3_hadd_ps>;
3420 def HADDPDrr : S3_Intrr <0x7C, "haddpd", int_x86_sse3_hadd_pd>;
3421 def HADDPDrm : S3_Intrm <0x7C, "haddpd", int_x86_sse3_hadd_pd>;
3422 def HSUBPSrr : S3D_Intrr<0x7D, "hsubps", int_x86_sse3_hsub_ps>;
3423 def HSUBPSrm : S3D_Intrm<0x7D, "hsubps", int_x86_sse3_hsub_ps>;
3424 def HSUBPDrr : S3_Intrr <0x7D, "hsubpd", int_x86_sse3_hsub_pd>;
3425 def HSUBPDrm : S3_Intrm <0x7D, "hsubpd", int_x86_sse3_hsub_pd>;
3428 //===---------------------------------------------------------------------===//
3429 // SSSE3 - Packed Absolute Instructions
3430 //===---------------------------------------------------------------------===//
3432 /// SS3I_unop_rm_int - Simple SSSE3 unary op whose type can be v*{i8,i16,i32}.
3433 multiclass SS3I_unop_rm_int<bits<8> opc, string OpcodeStr,
3434 PatFrag mem_frag64, PatFrag mem_frag128,
3435 Intrinsic IntId64, Intrinsic IntId128> {
3436 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst), (ins VR64:$src),
3437 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3438 [(set VR64:$dst, (IntId64 VR64:$src))]>;
3440 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst), (ins i64mem:$src),
3441 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3443 (IntId64 (bitconvert (mem_frag64 addr:$src))))]>;
3445 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
3447 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3448 [(set VR128:$dst, (IntId128 VR128:$src))]>,
3451 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
3453 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3456 (bitconvert (mem_frag128 addr:$src))))]>, OpSize;
3459 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE3] in {
3460 defm VPABSB : SS3I_unop_rm_int<0x1C, "vpabsb", memopv8i8, memopv16i8,
3461 int_x86_ssse3_pabs_b,
3462 int_x86_ssse3_pabs_b_128>, VEX;
3463 defm VPABSW : SS3I_unop_rm_int<0x1D, "vpabsw", memopv4i16, memopv8i16,
3464 int_x86_ssse3_pabs_w,
3465 int_x86_ssse3_pabs_w_128>, VEX;
3466 defm VPABSD : SS3I_unop_rm_int<0x1E, "vpabsd", memopv2i32, memopv4i32,
3467 int_x86_ssse3_pabs_d,
3468 int_x86_ssse3_pabs_d_128>, VEX;
3471 defm PABSB : SS3I_unop_rm_int<0x1C, "pabsb", memopv8i8, memopv16i8,
3472 int_x86_ssse3_pabs_b,
3473 int_x86_ssse3_pabs_b_128>;
3474 defm PABSW : SS3I_unop_rm_int<0x1D, "pabsw", memopv4i16, memopv8i16,
3475 int_x86_ssse3_pabs_w,
3476 int_x86_ssse3_pabs_w_128>;
3477 defm PABSD : SS3I_unop_rm_int<0x1E, "pabsd", memopv2i32, memopv4i32,
3478 int_x86_ssse3_pabs_d,
3479 int_x86_ssse3_pabs_d_128>;
3481 //===---------------------------------------------------------------------===//
3482 // SSSE3 - Packed Binary Operator Instructions
3483 //===---------------------------------------------------------------------===//
3485 /// SS3I_binop_rm_int - Simple SSSE3 bin op whose type can be v*{i8,i16,i32}.
3486 multiclass SS3I_binop_rm_int<bits<8> opc, string OpcodeStr,
3487 PatFrag mem_frag64, PatFrag mem_frag128,
3488 Intrinsic IntId64, Intrinsic IntId128,
3490 let isCommutable = 1 in
3491 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
3492 (ins VR64:$src1, VR64:$src2),
3494 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3495 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3496 [(set VR64:$dst, (IntId64 VR64:$src1, VR64:$src2))]>;
3497 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
3498 (ins VR64:$src1, i64mem:$src2),
3500 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3501 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3503 (IntId64 VR64:$src1,
3504 (bitconvert (memopv8i8 addr:$src2))))]>;
3506 let isCommutable = 1 in
3507 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
3508 (ins VR128:$src1, VR128:$src2),
3510 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3511 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3512 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
3514 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
3515 (ins VR128:$src1, i128mem:$src2),
3517 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3518 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3520 (IntId128 VR128:$src1,
3521 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
3524 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE3] in {
3525 let isCommutable = 0 in {
3526 defm VPHADDW : SS3I_binop_rm_int<0x01, "vphaddw", memopv4i16, memopv8i16,
3527 int_x86_ssse3_phadd_w,
3528 int_x86_ssse3_phadd_w_128, 0>, VEX_4V;
3529 defm VPHADDD : SS3I_binop_rm_int<0x02, "vphaddd", memopv2i32, memopv4i32,
3530 int_x86_ssse3_phadd_d,
3531 int_x86_ssse3_phadd_d_128, 0>, VEX_4V;
3532 defm VPHADDSW : SS3I_binop_rm_int<0x03, "vphaddsw", memopv4i16, memopv8i16,
3533 int_x86_ssse3_phadd_sw,
3534 int_x86_ssse3_phadd_sw_128, 0>, VEX_4V;
3535 defm VPHSUBW : SS3I_binop_rm_int<0x05, "vphsubw", memopv4i16, memopv8i16,
3536 int_x86_ssse3_phsub_w,
3537 int_x86_ssse3_phsub_w_128, 0>, VEX_4V;
3538 defm VPHSUBD : SS3I_binop_rm_int<0x06, "vphsubd", memopv2i32, memopv4i32,
3539 int_x86_ssse3_phsub_d,
3540 int_x86_ssse3_phsub_d_128, 0>, VEX_4V;
3541 defm VPHSUBSW : SS3I_binop_rm_int<0x07, "vphsubsw", memopv4i16, memopv8i16,
3542 int_x86_ssse3_phsub_sw,
3543 int_x86_ssse3_phsub_sw_128, 0>, VEX_4V;
3544 defm VPMADDUBSW : SS3I_binop_rm_int<0x04, "vpmaddubsw", memopv8i8, memopv16i8,
3545 int_x86_ssse3_pmadd_ub_sw,
3546 int_x86_ssse3_pmadd_ub_sw_128, 0>, VEX_4V;
3547 defm VPSHUFB : SS3I_binop_rm_int<0x00, "vpshufb", memopv8i8, memopv16i8,
3548 int_x86_ssse3_pshuf_b,
3549 int_x86_ssse3_pshuf_b_128, 0>, VEX_4V;
3550 defm VPSIGNB : SS3I_binop_rm_int<0x08, "vpsignb", memopv8i8, memopv16i8,
3551 int_x86_ssse3_psign_b,
3552 int_x86_ssse3_psign_b_128, 0>, VEX_4V;
3553 defm VPSIGNW : SS3I_binop_rm_int<0x09, "vpsignw", memopv4i16, memopv8i16,
3554 int_x86_ssse3_psign_w,
3555 int_x86_ssse3_psign_w_128, 0>, VEX_4V;
3556 defm VPSIGND : SS3I_binop_rm_int<0x0A, "vpsignd", memopv2i32, memopv4i32,
3557 int_x86_ssse3_psign_d,
3558 int_x86_ssse3_psign_d_128, 0>, VEX_4V;
3560 defm VPMULHRSW : SS3I_binop_rm_int<0x0B, "vpmulhrsw", memopv4i16, memopv8i16,
3561 int_x86_ssse3_pmul_hr_sw,
3562 int_x86_ssse3_pmul_hr_sw_128, 0>, VEX_4V;
3565 // None of these have i8 immediate fields.
3566 let ImmT = NoImm, Constraints = "$src1 = $dst" in {
3567 let isCommutable = 0 in {
3568 defm PHADDW : SS3I_binop_rm_int<0x01, "phaddw", memopv4i16, memopv8i16,
3569 int_x86_ssse3_phadd_w,
3570 int_x86_ssse3_phadd_w_128>;
3571 defm PHADDD : SS3I_binop_rm_int<0x02, "phaddd", memopv2i32, memopv4i32,
3572 int_x86_ssse3_phadd_d,
3573 int_x86_ssse3_phadd_d_128>;
3574 defm PHADDSW : SS3I_binop_rm_int<0x03, "phaddsw", memopv4i16, memopv8i16,
3575 int_x86_ssse3_phadd_sw,
3576 int_x86_ssse3_phadd_sw_128>;
3577 defm PHSUBW : SS3I_binop_rm_int<0x05, "phsubw", memopv4i16, memopv8i16,
3578 int_x86_ssse3_phsub_w,
3579 int_x86_ssse3_phsub_w_128>;
3580 defm PHSUBD : SS3I_binop_rm_int<0x06, "phsubd", memopv2i32, memopv4i32,
3581 int_x86_ssse3_phsub_d,
3582 int_x86_ssse3_phsub_d_128>;
3583 defm PHSUBSW : SS3I_binop_rm_int<0x07, "phsubsw", memopv4i16, memopv8i16,
3584 int_x86_ssse3_phsub_sw,
3585 int_x86_ssse3_phsub_sw_128>;
3586 defm PMADDUBSW : SS3I_binop_rm_int<0x04, "pmaddubsw", memopv8i8, memopv16i8,
3587 int_x86_ssse3_pmadd_ub_sw,
3588 int_x86_ssse3_pmadd_ub_sw_128>;
3589 defm PSHUFB : SS3I_binop_rm_int<0x00, "pshufb", memopv8i8, memopv16i8,
3590 int_x86_ssse3_pshuf_b,
3591 int_x86_ssse3_pshuf_b_128>;
3592 defm PSIGNB : SS3I_binop_rm_int<0x08, "psignb", memopv8i8, memopv16i8,
3593 int_x86_ssse3_psign_b,
3594 int_x86_ssse3_psign_b_128>;
3595 defm PSIGNW : SS3I_binop_rm_int<0x09, "psignw", memopv4i16, memopv8i16,
3596 int_x86_ssse3_psign_w,
3597 int_x86_ssse3_psign_w_128>;
3598 defm PSIGND : SS3I_binop_rm_int<0x0A, "psignd", memopv2i32, memopv4i32,
3599 int_x86_ssse3_psign_d,
3600 int_x86_ssse3_psign_d_128>;
3602 defm PMULHRSW : SS3I_binop_rm_int<0x0B, "pmulhrsw", memopv4i16, memopv8i16,
3603 int_x86_ssse3_pmul_hr_sw,
3604 int_x86_ssse3_pmul_hr_sw_128>;
3607 def : Pat<(X86pshufb VR128:$src, VR128:$mask),
3608 (PSHUFBrr128 VR128:$src, VR128:$mask)>, Requires<[HasSSSE3]>;
3609 def : Pat<(X86pshufb VR128:$src, (bc_v16i8 (memopv2i64 addr:$mask))),
3610 (PSHUFBrm128 VR128:$src, addr:$mask)>, Requires<[HasSSSE3]>;
3612 //===---------------------------------------------------------------------===//
3613 // SSSE3 - Packed Align Instruction Patterns
3614 //===---------------------------------------------------------------------===//
3616 multiclass sse3_palign<string asm, bit Is2Addr = 1> {
3617 def R64rr : SS3AI<0x0F, MRMSrcReg, (outs VR64:$dst),
3618 (ins VR64:$src1, VR64:$src2, i8imm:$src3),
3620 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3622 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
3624 def R64rm : SS3AI<0x0F, MRMSrcMem, (outs VR64:$dst),
3625 (ins VR64:$src1, i64mem:$src2, i8imm:$src3),
3627 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3629 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
3632 def R128rr : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
3633 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
3635 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3637 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
3639 def R128rm : SS3AI<0x0F, MRMSrcMem, (outs VR128:$dst),
3640 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
3642 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3644 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
3648 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE3] in
3649 defm VPALIGN : sse3_palign<"vpalignr", 0>, VEX_4V;
3650 let Constraints = "$src1 = $dst" in
3651 defm PALIGN : sse3_palign<"palignr">;
3653 let AddedComplexity = 5 in {
3655 def : Pat<(v1i64 (palign:$src3 VR64:$src1, VR64:$src2)),
3656 (PALIGNR64rr VR64:$src2, VR64:$src1,
3657 (SHUFFLE_get_palign_imm VR64:$src3))>,
3658 Requires<[HasSSSE3]>;
3659 def : Pat<(v2i32 (palign:$src3 VR64:$src1, VR64:$src2)),
3660 (PALIGNR64rr VR64:$src2, VR64:$src1,
3661 (SHUFFLE_get_palign_imm VR64:$src3))>,
3662 Requires<[HasSSSE3]>;
3663 def : Pat<(v4i16 (palign:$src3 VR64:$src1, VR64:$src2)),
3664 (PALIGNR64rr VR64:$src2, VR64:$src1,
3665 (SHUFFLE_get_palign_imm VR64:$src3))>,
3666 Requires<[HasSSSE3]>;
3667 def : Pat<(v8i8 (palign:$src3 VR64:$src1, VR64:$src2)),
3668 (PALIGNR64rr VR64:$src2, VR64:$src1,
3669 (SHUFFLE_get_palign_imm VR64:$src3))>,
3670 Requires<[HasSSSE3]>;
3672 def : Pat<(v4i32 (palign:$src3 VR128:$src1, VR128:$src2)),
3673 (PALIGNR128rr VR128:$src2, VR128:$src1,
3674 (SHUFFLE_get_palign_imm VR128:$src3))>,
3675 Requires<[HasSSSE3]>;
3676 def : Pat<(v4f32 (palign:$src3 VR128:$src1, VR128:$src2)),
3677 (PALIGNR128rr VR128:$src2, VR128:$src1,
3678 (SHUFFLE_get_palign_imm VR128:$src3))>,
3679 Requires<[HasSSSE3]>;
3680 def : Pat<(v8i16 (palign:$src3 VR128:$src1, VR128:$src2)),
3681 (PALIGNR128rr VR128:$src2, VR128:$src1,
3682 (SHUFFLE_get_palign_imm VR128:$src3))>,
3683 Requires<[HasSSSE3]>;
3684 def : Pat<(v16i8 (palign:$src3 VR128:$src1, VR128:$src2)),
3685 (PALIGNR128rr VR128:$src2, VR128:$src1,
3686 (SHUFFLE_get_palign_imm VR128:$src3))>,
3687 Requires<[HasSSSE3]>;
3690 //===---------------------------------------------------------------------===//
3691 // SSSE3 Misc Instructions
3692 //===---------------------------------------------------------------------===//
3694 // Thread synchronization
3695 def MONITOR : I<0x01, MRM_C8, (outs), (ins), "monitor",
3696 [(int_x86_sse3_monitor EAX, ECX, EDX)]>,TB, Requires<[HasSSE3]>;
3697 def MWAIT : I<0x01, MRM_C9, (outs), (ins), "mwait",
3698 [(int_x86_sse3_mwait ECX, EAX)]>, TB, Requires<[HasSSE3]>;
3700 //===---------------------------------------------------------------------===//
3701 // Non-Instruction Patterns
3702 //===---------------------------------------------------------------------===//
3704 // extload f32 -> f64. This matches load+fextend because we have a hack in
3705 // the isel (PreprocessForFPConvert) that can introduce loads after dag
3707 // Since these loads aren't folded into the fextend, we have to match it
3709 let Predicates = [HasSSE2] in
3710 def : Pat<(fextend (loadf32 addr:$src)),
3711 (CVTSS2SDrm addr:$src)>;
3714 let Predicates = [HasSSE2] in {
3715 def : Pat<(v2i64 (bitconvert (v4i32 VR128:$src))), (v2i64 VR128:$src)>;
3716 def : Pat<(v2i64 (bitconvert (v8i16 VR128:$src))), (v2i64 VR128:$src)>;
3717 def : Pat<(v2i64 (bitconvert (v16i8 VR128:$src))), (v2i64 VR128:$src)>;
3718 def : Pat<(v2i64 (bitconvert (v2f64 VR128:$src))), (v2i64 VR128:$src)>;
3719 def : Pat<(v2i64 (bitconvert (v4f32 VR128:$src))), (v2i64 VR128:$src)>;
3720 def : Pat<(v4i32 (bitconvert (v2i64 VR128:$src))), (v4i32 VR128:$src)>;
3721 def : Pat<(v4i32 (bitconvert (v8i16 VR128:$src))), (v4i32 VR128:$src)>;
3722 def : Pat<(v4i32 (bitconvert (v16i8 VR128:$src))), (v4i32 VR128:$src)>;
3723 def : Pat<(v4i32 (bitconvert (v2f64 VR128:$src))), (v4i32 VR128:$src)>;
3724 def : Pat<(v4i32 (bitconvert (v4f32 VR128:$src))), (v4i32 VR128:$src)>;
3725 def : Pat<(v8i16 (bitconvert (v2i64 VR128:$src))), (v8i16 VR128:$src)>;
3726 def : Pat<(v8i16 (bitconvert (v4i32 VR128:$src))), (v8i16 VR128:$src)>;
3727 def : Pat<(v8i16 (bitconvert (v16i8 VR128:$src))), (v8i16 VR128:$src)>;
3728 def : Pat<(v8i16 (bitconvert (v2f64 VR128:$src))), (v8i16 VR128:$src)>;
3729 def : Pat<(v8i16 (bitconvert (v4f32 VR128:$src))), (v8i16 VR128:$src)>;
3730 def : Pat<(v16i8 (bitconvert (v2i64 VR128:$src))), (v16i8 VR128:$src)>;
3731 def : Pat<(v16i8 (bitconvert (v4i32 VR128:$src))), (v16i8 VR128:$src)>;
3732 def : Pat<(v16i8 (bitconvert (v8i16 VR128:$src))), (v16i8 VR128:$src)>;
3733 def : Pat<(v16i8 (bitconvert (v2f64 VR128:$src))), (v16i8 VR128:$src)>;
3734 def : Pat<(v16i8 (bitconvert (v4f32 VR128:$src))), (v16i8 VR128:$src)>;
3735 def : Pat<(v4f32 (bitconvert (v2i64 VR128:$src))), (v4f32 VR128:$src)>;
3736 def : Pat<(v4f32 (bitconvert (v4i32 VR128:$src))), (v4f32 VR128:$src)>;
3737 def : Pat<(v4f32 (bitconvert (v8i16 VR128:$src))), (v4f32 VR128:$src)>;
3738 def : Pat<(v4f32 (bitconvert (v16i8 VR128:$src))), (v4f32 VR128:$src)>;
3739 def : Pat<(v4f32 (bitconvert (v2f64 VR128:$src))), (v4f32 VR128:$src)>;
3740 def : Pat<(v2f64 (bitconvert (v2i64 VR128:$src))), (v2f64 VR128:$src)>;
3741 def : Pat<(v2f64 (bitconvert (v4i32 VR128:$src))), (v2f64 VR128:$src)>;
3742 def : Pat<(v2f64 (bitconvert (v8i16 VR128:$src))), (v2f64 VR128:$src)>;
3743 def : Pat<(v2f64 (bitconvert (v16i8 VR128:$src))), (v2f64 VR128:$src)>;
3744 def : Pat<(v2f64 (bitconvert (v4f32 VR128:$src))), (v2f64 VR128:$src)>;
3747 // Move scalar to XMM zero-extended
3748 // movd to XMM register zero-extends
3749 let AddedComplexity = 15 in {
3750 // Zeroing a VR128 then do a MOVS{S|D} to the lower bits.
3751 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))),
3752 (MOVSDrr (v2f64 (V_SET0PS)), FR64:$src)>;
3753 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),
3754 (MOVSSrr (v4f32 (V_SET0PS)), FR32:$src)>;
3755 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
3756 (MOVSSrr (v4f32 (V_SET0PS)),
3757 (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss)))>;
3758 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
3759 (MOVSSrr (v4i32 (V_SET0PI)),
3760 (EXTRACT_SUBREG (v4i32 VR128:$src), sub_ss))>;
3763 // Splat v2f64 / v2i64
3764 let AddedComplexity = 10 in {
3765 def : Pat<(splat_lo (v2f64 VR128:$src), (undef)),
3766 (UNPCKLPDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3767 def : Pat<(unpckh (v2f64 VR128:$src), (undef)),
3768 (UNPCKHPDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3769 def : Pat<(splat_lo (v2i64 VR128:$src), (undef)),
3770 (PUNPCKLQDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3771 def : Pat<(unpckh (v2i64 VR128:$src), (undef)),
3772 (PUNPCKHQDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3775 // Special unary SHUFPSrri case.
3776 def : Pat<(v4f32 (pshufd:$src3 VR128:$src1, (undef))),
3777 (SHUFPSrri VR128:$src1, VR128:$src1,
3778 (SHUFFLE_get_shuf_imm VR128:$src3))>;
3779 let AddedComplexity = 5 in
3780 def : Pat<(v4f32 (pshufd:$src2 VR128:$src1, (undef))),
3781 (PSHUFDri VR128:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3782 Requires<[HasSSE2]>;
3783 // Special unary SHUFPDrri case.
3784 def : Pat<(v2i64 (pshufd:$src3 VR128:$src1, (undef))),
3785 (SHUFPDrri VR128:$src1, VR128:$src1,
3786 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3787 Requires<[HasSSE2]>;
3788 // Special unary SHUFPDrri case.
3789 def : Pat<(v2f64 (pshufd:$src3 VR128:$src1, (undef))),
3790 (SHUFPDrri VR128:$src1, VR128:$src1,
3791 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3792 Requires<[HasSSE2]>;
3793 // Unary v4f32 shuffle with PSHUF* in order to fold a load.
3794 def : Pat<(pshufd:$src2 (bc_v4i32 (memopv4f32 addr:$src1)), (undef)),
3795 (PSHUFDmi addr:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3796 Requires<[HasSSE2]>;
3798 // Special binary v4i32 shuffle cases with SHUFPS.
3799 def : Pat<(v4i32 (shufp:$src3 VR128:$src1, (v4i32 VR128:$src2))),
3800 (SHUFPSrri VR128:$src1, VR128:$src2,
3801 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3802 Requires<[HasSSE2]>;
3803 def : Pat<(v4i32 (shufp:$src3 VR128:$src1, (bc_v4i32 (memopv2i64 addr:$src2)))),
3804 (SHUFPSrmi VR128:$src1, addr:$src2,
3805 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3806 Requires<[HasSSE2]>;
3807 // Special binary v2i64 shuffle cases using SHUFPDrri.
3808 def : Pat<(v2i64 (shufp:$src3 VR128:$src1, VR128:$src2)),
3809 (SHUFPDrri VR128:$src1, VR128:$src2,
3810 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3811 Requires<[HasSSE2]>;
3813 // vector_shuffle v1, <undef>, <0, 0, 1, 1, ...>
3814 let AddedComplexity = 15 in {
3815 def : Pat<(v4i32 (unpckl_undef:$src2 VR128:$src, (undef))),
3816 (PSHUFDri VR128:$src, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3817 Requires<[OptForSpeed, HasSSE2]>;
3818 def : Pat<(v4f32 (unpckl_undef:$src2 VR128:$src, (undef))),
3819 (PSHUFDri VR128:$src, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3820 Requires<[OptForSpeed, HasSSE2]>;
3822 let AddedComplexity = 10 in {
3823 def : Pat<(v4f32 (unpckl_undef VR128:$src, (undef))),
3824 (UNPCKLPSrr VR128:$src, VR128:$src)>;
3825 def : Pat<(v16i8 (unpckl_undef VR128:$src, (undef))),
3826 (PUNPCKLBWrr VR128:$src, VR128:$src)>;
3827 def : Pat<(v8i16 (unpckl_undef VR128:$src, (undef))),
3828 (PUNPCKLWDrr VR128:$src, VR128:$src)>;
3829 def : Pat<(v4i32 (unpckl_undef VR128:$src, (undef))),
3830 (PUNPCKLDQrr VR128:$src, VR128:$src)>;
3833 // vector_shuffle v1, <undef>, <2, 2, 3, 3, ...>
3834 let AddedComplexity = 15 in {
3835 def : Pat<(v4i32 (unpckh_undef:$src2 VR128:$src, (undef))),
3836 (PSHUFDri VR128:$src, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3837 Requires<[OptForSpeed, HasSSE2]>;
3838 def : Pat<(v4f32 (unpckh_undef:$src2 VR128:$src, (undef))),
3839 (PSHUFDri VR128:$src, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3840 Requires<[OptForSpeed, HasSSE2]>;
3842 let AddedComplexity = 10 in {
3843 def : Pat<(v4f32 (unpckh_undef VR128:$src, (undef))),
3844 (UNPCKHPSrr VR128:$src, VR128:$src)>;
3845 def : Pat<(v16i8 (unpckh_undef VR128:$src, (undef))),
3846 (PUNPCKHBWrr VR128:$src, VR128:$src)>;
3847 def : Pat<(v8i16 (unpckh_undef VR128:$src, (undef))),
3848 (PUNPCKHWDrr VR128:$src, VR128:$src)>;
3849 def : Pat<(v4i32 (unpckh_undef VR128:$src, (undef))),
3850 (PUNPCKHDQrr VR128:$src, VR128:$src)>;
3853 let AddedComplexity = 20 in {
3854 // vector_shuffle v1, v2 <0, 1, 4, 5> using MOVLHPS
3855 def : Pat<(v4i32 (movlhps VR128:$src1, VR128:$src2)),
3856 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
3858 // vector_shuffle v1, v2 <6, 7, 2, 3> using MOVHLPS
3859 def : Pat<(v4i32 (movhlps VR128:$src1, VR128:$src2)),
3860 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
3862 // vector_shuffle v1, undef <2, ?, ?, ?> using MOVHLPS
3863 def : Pat<(v4f32 (movhlps_undef VR128:$src1, (undef))),
3864 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
3865 def : Pat<(v4i32 (movhlps_undef VR128:$src1, (undef))),
3866 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
3869 let AddedComplexity = 20 in {
3870 // vector_shuffle v1, (load v2) <4, 5, 2, 3> using MOVLPS
3871 def : Pat<(v4f32 (movlp VR128:$src1, (load addr:$src2))),
3872 (MOVLPSrm VR128:$src1, addr:$src2)>;
3873 def : Pat<(v2f64 (movlp VR128:$src1, (load addr:$src2))),
3874 (MOVLPDrm VR128:$src1, addr:$src2)>;
3875 def : Pat<(v4i32 (movlp VR128:$src1, (load addr:$src2))),
3876 (MOVLPSrm VR128:$src1, addr:$src2)>;
3877 def : Pat<(v2i64 (movlp VR128:$src1, (load addr:$src2))),
3878 (MOVLPDrm VR128:$src1, addr:$src2)>;
3881 // (store (vector_shuffle (load addr), v2, <4, 5, 2, 3>), addr) using MOVLPS
3882 def : Pat<(store (v4f32 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
3883 (MOVLPSmr addr:$src1, VR128:$src2)>;
3884 def : Pat<(store (v2f64 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
3885 (MOVLPDmr addr:$src1, VR128:$src2)>;
3886 def : Pat<(store (v4i32 (movlp (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)),
3888 (MOVLPSmr addr:$src1, VR128:$src2)>;
3889 def : Pat<(store (v2i64 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
3890 (MOVLPDmr addr:$src1, VR128:$src2)>;
3892 let AddedComplexity = 15 in {
3893 // Setting the lowest element in the vector.
3894 def : Pat<(v4i32 (movl VR128:$src1, VR128:$src2)),
3895 (MOVSSrr (v4i32 VR128:$src1),
3896 (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_ss))>;
3897 def : Pat<(v2i64 (movl VR128:$src1, VR128:$src2)),
3898 (MOVSDrr (v2i64 VR128:$src1),
3899 (EXTRACT_SUBREG (v2i64 VR128:$src2), sub_sd))>;
3901 // vector_shuffle v1, v2 <4, 5, 2, 3> using movsd
3902 def : Pat<(v4f32 (movlp VR128:$src1, VR128:$src2)),
3903 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, sub_sd))>,
3904 Requires<[HasSSE2]>;
3905 def : Pat<(v4i32 (movlp VR128:$src1, VR128:$src2)),
3906 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, sub_sd))>,
3907 Requires<[HasSSE2]>;
3910 // vector_shuffle v1, v2 <4, 5, 2, 3> using SHUFPSrri (we prefer movsd, but
3911 // fall back to this for SSE1)
3912 def : Pat<(v4f32 (movlp:$src3 VR128:$src1, (v4f32 VR128:$src2))),
3913 (SHUFPSrri VR128:$src2, VR128:$src1,
3914 (SHUFFLE_get_shuf_imm VR128:$src3))>;
3916 // Set lowest element and zero upper elements.
3917 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
3918 (MOVZPQILo2PQIrr VR128:$src)>, Requires<[HasSSE2]>;
3920 // Some special case pandn patterns.
3921 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v4i32 immAllOnesV))),
3923 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3924 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v8i16 immAllOnesV))),
3926 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3927 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v16i8 immAllOnesV))),
3929 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3931 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v4i32 immAllOnesV))),
3932 (memop addr:$src2))),
3933 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
3934 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v8i16 immAllOnesV))),
3935 (memop addr:$src2))),
3936 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
3937 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v16i8 immAllOnesV))),
3938 (memop addr:$src2))),
3939 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
3941 // vector -> vector casts
3942 def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
3943 (Int_CVTDQ2PSrr VR128:$src)>, Requires<[HasSSE2]>;
3944 def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
3945 (Int_CVTTPS2DQrr VR128:$src)>, Requires<[HasSSE2]>;
3946 def : Pat<(v2f64 (sint_to_fp (v2i32 VR64:$src))),
3947 (Int_CVTPI2PDrr VR64:$src)>, Requires<[HasSSE2]>;
3948 def : Pat<(v2i32 (fp_to_sint (v2f64 VR128:$src))),
3949 (Int_CVTTPD2PIrr VR128:$src)>, Requires<[HasSSE2]>;
3951 // Use movaps / movups for SSE integer load / store (one byte shorter).
3952 def : Pat<(alignedloadv4i32 addr:$src),
3953 (MOVAPSrm addr:$src)>;
3954 def : Pat<(loadv4i32 addr:$src),
3955 (MOVUPSrm addr:$src)>;
3956 def : Pat<(alignedloadv2i64 addr:$src),
3957 (MOVAPSrm addr:$src)>;
3958 def : Pat<(loadv2i64 addr:$src),
3959 (MOVUPSrm addr:$src)>;
3961 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
3962 (MOVAPSmr addr:$dst, VR128:$src)>;
3963 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
3964 (MOVAPSmr addr:$dst, VR128:$src)>;
3965 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
3966 (MOVAPSmr addr:$dst, VR128:$src)>;
3967 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
3968 (MOVAPSmr addr:$dst, VR128:$src)>;
3969 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
3970 (MOVUPSmr addr:$dst, VR128:$src)>;
3971 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
3972 (MOVUPSmr addr:$dst, VR128:$src)>;
3973 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
3974 (MOVUPSmr addr:$dst, VR128:$src)>;
3975 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
3976 (MOVUPSmr addr:$dst, VR128:$src)>;
3978 //===----------------------------------------------------------------------===//
3979 // SSE4.1 - Packed Move with Sign/Zero Extend
3980 //===----------------------------------------------------------------------===//
3982 multiclass SS41I_binop_rm_int8<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
3983 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3984 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3985 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
3987 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3988 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3990 (IntId (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))]>,
3994 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in {
3995 defm VPMOVSXBW : SS41I_binop_rm_int8<0x20, "vpmovsxbw", int_x86_sse41_pmovsxbw>,
3997 defm VPMOVSXWD : SS41I_binop_rm_int8<0x23, "vpmovsxwd", int_x86_sse41_pmovsxwd>,
3999 defm VPMOVSXDQ : SS41I_binop_rm_int8<0x25, "vpmovsxdq", int_x86_sse41_pmovsxdq>,
4001 defm VPMOVZXBW : SS41I_binop_rm_int8<0x30, "vpmovzxbw", int_x86_sse41_pmovzxbw>,
4003 defm VPMOVZXWD : SS41I_binop_rm_int8<0x33, "vpmovzxwd", int_x86_sse41_pmovzxwd>,
4005 defm VPMOVZXDQ : SS41I_binop_rm_int8<0x35, "vpmovzxdq", int_x86_sse41_pmovzxdq>,
4009 defm PMOVSXBW : SS41I_binop_rm_int8<0x20, "pmovsxbw", int_x86_sse41_pmovsxbw>;
4010 defm PMOVSXWD : SS41I_binop_rm_int8<0x23, "pmovsxwd", int_x86_sse41_pmovsxwd>;
4011 defm PMOVSXDQ : SS41I_binop_rm_int8<0x25, "pmovsxdq", int_x86_sse41_pmovsxdq>;
4012 defm PMOVZXBW : SS41I_binop_rm_int8<0x30, "pmovzxbw", int_x86_sse41_pmovzxbw>;
4013 defm PMOVZXWD : SS41I_binop_rm_int8<0x33, "pmovzxwd", int_x86_sse41_pmovzxwd>;
4014 defm PMOVZXDQ : SS41I_binop_rm_int8<0x35, "pmovzxdq", int_x86_sse41_pmovzxdq>;
4016 // Common patterns involving scalar load.
4017 def : Pat<(int_x86_sse41_pmovsxbw (vzmovl_v2i64 addr:$src)),
4018 (PMOVSXBWrm addr:$src)>, Requires<[HasSSE41]>;
4019 def : Pat<(int_x86_sse41_pmovsxbw (vzload_v2i64 addr:$src)),
4020 (PMOVSXBWrm addr:$src)>, Requires<[HasSSE41]>;
4022 def : Pat<(int_x86_sse41_pmovsxwd (vzmovl_v2i64 addr:$src)),
4023 (PMOVSXWDrm addr:$src)>, Requires<[HasSSE41]>;
4024 def : Pat<(int_x86_sse41_pmovsxwd (vzload_v2i64 addr:$src)),
4025 (PMOVSXWDrm addr:$src)>, Requires<[HasSSE41]>;
4027 def : Pat<(int_x86_sse41_pmovsxdq (vzmovl_v2i64 addr:$src)),
4028 (PMOVSXDQrm addr:$src)>, Requires<[HasSSE41]>;
4029 def : Pat<(int_x86_sse41_pmovsxdq (vzload_v2i64 addr:$src)),
4030 (PMOVSXDQrm addr:$src)>, Requires<[HasSSE41]>;
4032 def : Pat<(int_x86_sse41_pmovzxbw (vzmovl_v2i64 addr:$src)),
4033 (PMOVZXBWrm addr:$src)>, Requires<[HasSSE41]>;
4034 def : Pat<(int_x86_sse41_pmovzxbw (vzload_v2i64 addr:$src)),
4035 (PMOVZXBWrm addr:$src)>, Requires<[HasSSE41]>;
4037 def : Pat<(int_x86_sse41_pmovzxwd (vzmovl_v2i64 addr:$src)),
4038 (PMOVZXWDrm addr:$src)>, Requires<[HasSSE41]>;
4039 def : Pat<(int_x86_sse41_pmovzxwd (vzload_v2i64 addr:$src)),
4040 (PMOVZXWDrm addr:$src)>, Requires<[HasSSE41]>;
4042 def : Pat<(int_x86_sse41_pmovzxdq (vzmovl_v2i64 addr:$src)),
4043 (PMOVZXDQrm addr:$src)>, Requires<[HasSSE41]>;
4044 def : Pat<(int_x86_sse41_pmovzxdq (vzload_v2i64 addr:$src)),
4045 (PMOVZXDQrm addr:$src)>, Requires<[HasSSE41]>;
4048 multiclass SS41I_binop_rm_int4<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4049 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4050 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4051 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
4053 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
4054 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4056 (IntId (bitconvert (v4i32 (scalar_to_vector (loadi32 addr:$src))))))]>,
4060 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in {
4061 defm VPMOVSXBD : SS41I_binop_rm_int4<0x21, "vpmovsxbd", int_x86_sse41_pmovsxbd>,
4063 defm VPMOVSXWQ : SS41I_binop_rm_int4<0x24, "vpmovsxwq", int_x86_sse41_pmovsxwq>,
4065 defm VPMOVZXBD : SS41I_binop_rm_int4<0x31, "vpmovzxbd", int_x86_sse41_pmovzxbd>,
4067 defm VPMOVZXWQ : SS41I_binop_rm_int4<0x34, "vpmovzxwq", int_x86_sse41_pmovzxwq>,
4071 defm PMOVSXBD : SS41I_binop_rm_int4<0x21, "pmovsxbd", int_x86_sse41_pmovsxbd>;
4072 defm PMOVSXWQ : SS41I_binop_rm_int4<0x24, "pmovsxwq", int_x86_sse41_pmovsxwq>;
4073 defm PMOVZXBD : SS41I_binop_rm_int4<0x31, "pmovzxbd", int_x86_sse41_pmovzxbd>;
4074 defm PMOVZXWQ : SS41I_binop_rm_int4<0x34, "pmovzxwq", int_x86_sse41_pmovzxwq>;
4076 // Common patterns involving scalar load
4077 def : Pat<(int_x86_sse41_pmovsxbd (vzmovl_v4i32 addr:$src)),
4078 (PMOVSXBDrm addr:$src)>, Requires<[HasSSE41]>;
4079 def : Pat<(int_x86_sse41_pmovsxwq (vzmovl_v4i32 addr:$src)),
4080 (PMOVSXWQrm addr:$src)>, Requires<[HasSSE41]>;
4082 def : Pat<(int_x86_sse41_pmovzxbd (vzmovl_v4i32 addr:$src)),
4083 (PMOVZXBDrm addr:$src)>, Requires<[HasSSE41]>;
4084 def : Pat<(int_x86_sse41_pmovzxwq (vzmovl_v4i32 addr:$src)),
4085 (PMOVZXWQrm addr:$src)>, Requires<[HasSSE41]>;
4088 multiclass SS41I_binop_rm_int2<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4089 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4090 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4091 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
4093 // Expecting a i16 load any extended to i32 value.
4094 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i16mem:$src),
4095 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4096 [(set VR128:$dst, (IntId (bitconvert
4097 (v4i32 (scalar_to_vector (loadi16_anyext addr:$src))))))]>,
4101 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in {
4102 defm VPMOVSXBQ : SS41I_binop_rm_int2<0x22, "vpmovsxbq", int_x86_sse41_pmovsxbq>,
4104 defm VPMOVZXBQ : SS41I_binop_rm_int2<0x32, "vpmovzxbq", int_x86_sse41_pmovzxbq>,
4107 defm PMOVSXBQ : SS41I_binop_rm_int2<0x22, "pmovsxbq", int_x86_sse41_pmovsxbq>;
4108 defm PMOVZXBQ : SS41I_binop_rm_int2<0x32, "pmovzxbq", int_x86_sse41_pmovzxbq>;
4110 // Common patterns involving scalar load
4111 def : Pat<(int_x86_sse41_pmovsxbq
4112 (bitconvert (v4i32 (X86vzmovl
4113 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
4114 (PMOVSXBQrm addr:$src)>, Requires<[HasSSE41]>;
4116 def : Pat<(int_x86_sse41_pmovzxbq
4117 (bitconvert (v4i32 (X86vzmovl
4118 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
4119 (PMOVZXBQrm addr:$src)>, Requires<[HasSSE41]>;
4121 //===----------------------------------------------------------------------===//
4122 // SSE4.1 - Extract Instructions
4123 //===----------------------------------------------------------------------===//
4125 /// SS41I_binop_ext8 - SSE 4.1 extract 8 bits to 32 bit reg or 8 bit mem
4126 multiclass SS41I_extract8<bits<8> opc, string OpcodeStr> {
4127 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4128 (ins VR128:$src1, i32i8imm:$src2),
4129 !strconcat(OpcodeStr,
4130 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4131 [(set GR32:$dst, (X86pextrb (v16i8 VR128:$src1), imm:$src2))]>,
4133 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4134 (ins i8mem:$dst, VR128:$src1, i32i8imm:$src2),
4135 !strconcat(OpcodeStr,
4136 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4139 // There's an AssertZext in the way of writing the store pattern
4140 // (store (i8 (trunc (X86pextrb (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
4143 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in
4144 defm VPEXTRB : SS41I_extract8<0x14, "vpextrb">, VEX;
4146 defm PEXTRB : SS41I_extract8<0x14, "pextrb">;
4149 /// SS41I_extract16 - SSE 4.1 extract 16 bits to memory destination
4150 multiclass SS41I_extract16<bits<8> opc, string OpcodeStr> {
4151 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4152 (ins i16mem:$dst, VR128:$src1, i32i8imm:$src2),
4153 !strconcat(OpcodeStr,
4154 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4157 // There's an AssertZext in the way of writing the store pattern
4158 // (store (i16 (trunc (X86pextrw (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
4161 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in
4162 defm VPEXTRW : SS41I_extract16<0x15, "vpextrw">, VEX;
4164 defm PEXTRW : SS41I_extract16<0x15, "pextrw">;
4167 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
4168 multiclass SS41I_extract32<bits<8> opc, string OpcodeStr> {
4169 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4170 (ins VR128:$src1, i32i8imm:$src2),
4171 !strconcat(OpcodeStr,
4172 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4174 (extractelt (v4i32 VR128:$src1), imm:$src2))]>, OpSize;
4175 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4176 (ins i32mem:$dst, VR128:$src1, i32i8imm:$src2),
4177 !strconcat(OpcodeStr,
4178 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4179 [(store (extractelt (v4i32 VR128:$src1), imm:$src2),
4180 addr:$dst)]>, OpSize;
4183 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in
4184 defm VPEXTRD : SS41I_extract32<0x16, "vpextrd">, VEX;
4186 defm PEXTRD : SS41I_extract32<0x16, "pextrd">;
4188 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
4189 multiclass SS41I_extract64<bits<8> opc, string OpcodeStr> {
4190 def rr : SS4AIi8<opc, MRMDestReg, (outs GR64:$dst),
4191 (ins VR128:$src1, i32i8imm:$src2),
4192 !strconcat(OpcodeStr,
4193 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4195 (extractelt (v2i64 VR128:$src1), imm:$src2))]>, OpSize, REX_W;
4196 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4197 (ins i64mem:$dst, VR128:$src1, i32i8imm:$src2),
4198 !strconcat(OpcodeStr,
4199 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4200 [(store (extractelt (v2i64 VR128:$src1), imm:$src2),
4201 addr:$dst)]>, OpSize, REX_W;
4204 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in
4205 defm VPEXTRQ : SS41I_extract64<0x16, "vpextrq">, VEX, VEX_W;
4207 defm PEXTRQ : SS41I_extract64<0x16, "pextrq">;
4209 /// SS41I_extractf32 - SSE 4.1 extract 32 bits fp value to int reg or memory
4211 multiclass SS41I_extractf32<bits<8> opc, string OpcodeStr> {
4212 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4213 (ins VR128:$src1, i32i8imm:$src2),
4214 !strconcat(OpcodeStr,
4215 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4217 (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2))]>,
4219 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4220 (ins f32mem:$dst, VR128:$src1, i32i8imm:$src2),
4221 !strconcat(OpcodeStr,
4222 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4223 [(store (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2),
4224 addr:$dst)]>, OpSize;
4227 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in
4228 defm VEXTRACTPS : SS41I_extractf32<0x17, "vextractps">, VEX;
4229 defm EXTRACTPS : SS41I_extractf32<0x17, "extractps">;
4231 // Also match an EXTRACTPS store when the store is done as f32 instead of i32.
4232 def : Pat<(store (f32 (bitconvert (extractelt (bc_v4i32 (v4f32 VR128:$src1)),
4235 (EXTRACTPSmr addr:$dst, VR128:$src1, imm:$src2)>,
4236 Requires<[HasSSE41]>;
4238 //===----------------------------------------------------------------------===//
4239 // SSE4.1 - Insert Instructions
4240 //===----------------------------------------------------------------------===//
4242 multiclass SS41I_insert8<bits<8> opc, string asm, bit Is2Addr = 1> {
4243 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4244 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
4246 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4248 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4250 (X86pinsrb VR128:$src1, GR32:$src2, imm:$src3))]>, OpSize;
4251 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4252 (ins VR128:$src1, i8mem:$src2, i32i8imm:$src3),
4254 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4256 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4258 (X86pinsrb VR128:$src1, (extloadi8 addr:$src2),
4259 imm:$src3))]>, OpSize;
4262 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in
4263 defm VPINSRB : SS41I_insert8<0x20, "vpinsrb", 0>, VEX_4V;
4264 let Constraints = "$src1 = $dst" in
4265 defm PINSRB : SS41I_insert8<0x20, "pinsrb">;
4267 multiclass SS41I_insert32<bits<8> opc, string asm, bit Is2Addr = 1> {
4268 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4269 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
4271 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4273 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4275 (v4i32 (insertelt VR128:$src1, GR32:$src2, imm:$src3)))]>,
4277 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4278 (ins VR128:$src1, i32mem:$src2, i32i8imm:$src3),
4280 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4282 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4284 (v4i32 (insertelt VR128:$src1, (loadi32 addr:$src2),
4285 imm:$src3)))]>, OpSize;
4288 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in
4289 defm VPINSRD : SS41I_insert32<0x22, "vpinsrd", 0>, VEX_4V;
4290 let Constraints = "$src1 = $dst" in
4291 defm PINSRD : SS41I_insert32<0x22, "pinsrd">;
4293 multiclass SS41I_insert64<bits<8> opc, string asm, bit Is2Addr = 1> {
4294 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4295 (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
4297 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4299 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4301 (v2i64 (insertelt VR128:$src1, GR64:$src2, imm:$src3)))]>,
4303 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4304 (ins VR128:$src1, i64mem:$src2, i32i8imm:$src3),
4306 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4308 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4310 (v2i64 (insertelt VR128:$src1, (loadi64 addr:$src2),
4311 imm:$src3)))]>, OpSize;
4314 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in
4315 defm VPINSRQ : SS41I_insert64<0x22, "vpinsrq", 0>, VEX_4V, VEX_W;
4316 let Constraints = "$src1 = $dst" in
4317 defm PINSRQ : SS41I_insert64<0x22, "pinsrq">, REX_W;
4319 // insertps has a few different modes, there's the first two here below which
4320 // are optimized inserts that won't zero arbitrary elements in the destination
4321 // vector. The next one matches the intrinsic and could zero arbitrary elements
4322 // in the target vector.
4323 multiclass SS41I_insertf32<bits<8> opc, string asm, bit Is2Addr = 1> {
4324 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4325 (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4327 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4329 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4331 (X86insrtps VR128:$src1, VR128:$src2, imm:$src3))]>,
4333 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4334 (ins VR128:$src1, f32mem:$src2, i32i8imm:$src3),
4336 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4338 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4340 (X86insrtps VR128:$src1,
4341 (v4f32 (scalar_to_vector (loadf32 addr:$src2))),
4342 imm:$src3))]>, OpSize;
4345 let Constraints = "$src1 = $dst" in
4346 defm INSERTPS : SS41I_insertf32<0x21, "insertps">;
4347 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in
4348 defm VINSERTPS : SS41I_insertf32<0x21, "vinsertps", 0>, VEX_4V;
4350 def : Pat<(int_x86_sse41_insertps VR128:$src1, VR128:$src2, imm:$src3),
4351 (INSERTPSrr VR128:$src1, VR128:$src2, imm:$src3)>;
4353 //===----------------------------------------------------------------------===//
4354 // SSE4.1 - Round Instructions
4355 //===----------------------------------------------------------------------===//
4357 multiclass sse41_fp_unop_rm<bits<8> opcps, bits<8> opcpd,
4360 Intrinsic V2F64Int> {
4361 // Intrinsic operation, reg.
4362 // Vector intrinsic operation, reg
4363 def PSr_Int : SS4AIi8<opcps, MRMSrcReg,
4364 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
4365 !strconcat(OpcodeStr,
4366 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4367 [(set VR128:$dst, (V4F32Int VR128:$src1, imm:$src2))]>,
4370 // Vector intrinsic operation, mem
4371 def PSm_Int : Ii8<opcps, MRMSrcMem,
4372 (outs VR128:$dst), (ins f128mem:$src1, i32i8imm:$src2),
4373 !strconcat(OpcodeStr,
4374 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4376 (V4F32Int (memopv4f32 addr:$src1),imm:$src2))]>,
4378 Requires<[HasSSE41]>;
4380 // Vector intrinsic operation, reg
4381 def PDr_Int : SS4AIi8<opcpd, MRMSrcReg,
4382 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
4383 !strconcat(OpcodeStr,
4384 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4385 [(set VR128:$dst, (V2F64Int VR128:$src1, imm:$src2))]>,
4388 // Vector intrinsic operation, mem
4389 def PDm_Int : SS4AIi8<opcpd, MRMSrcMem,
4390 (outs VR128:$dst), (ins f128mem:$src1, i32i8imm:$src2),
4391 !strconcat(OpcodeStr,
4392 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4394 (V2F64Int (memopv2f64 addr:$src1),imm:$src2))]>,
4398 multiclass sse41_fp_unop_rm_avx<bits<8> opcps, bits<8> opcpd,
4400 // Intrinsic operation, reg.
4401 // Vector intrinsic operation, reg
4402 def PSr : SS4AIi8<opcps, MRMSrcReg,
4403 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
4404 !strconcat(OpcodeStr,
4405 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4408 // Vector intrinsic operation, mem
4409 def PSm : Ii8<opcps, MRMSrcMem,
4410 (outs VR128:$dst), (ins f128mem:$src1, i32i8imm:$src2),
4411 !strconcat(OpcodeStr,
4412 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4413 []>, TA, OpSize, Requires<[HasSSE41]>;
4415 // Vector intrinsic operation, reg
4416 def PDr : SS4AIi8<opcpd, MRMSrcReg,
4417 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
4418 !strconcat(OpcodeStr,
4419 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4422 // Vector intrinsic operation, mem
4423 def PDm : SS4AIi8<opcpd, MRMSrcMem,
4424 (outs VR128:$dst), (ins f128mem:$src1, i32i8imm:$src2),
4425 !strconcat(OpcodeStr,
4426 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4430 multiclass sse41_fp_binop_rm<bits<8> opcss, bits<8> opcsd,
4433 Intrinsic F64Int, bit Is2Addr = 1> {
4434 // Intrinsic operation, reg.
4435 def SSr_Int : SS4AIi8<opcss, MRMSrcReg,
4436 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4438 !strconcat(OpcodeStr,
4439 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4440 !strconcat(OpcodeStr,
4441 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4442 [(set VR128:$dst, (F32Int VR128:$src1, VR128:$src2, imm:$src3))]>,
4445 // Intrinsic operation, mem.
4446 def SSm_Int : SS4AIi8<opcss, MRMSrcMem,
4447 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
4449 !strconcat(OpcodeStr,
4450 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4451 !strconcat(OpcodeStr,
4452 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4454 (F32Int VR128:$src1, sse_load_f32:$src2, imm:$src3))]>,
4457 // Intrinsic operation, reg.
4458 def SDr_Int : SS4AIi8<opcsd, MRMSrcReg,
4459 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4461 !strconcat(OpcodeStr,
4462 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4463 !strconcat(OpcodeStr,
4464 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4465 [(set VR128:$dst, (F64Int VR128:$src1, VR128:$src2, imm:$src3))]>,
4468 // Intrinsic operation, mem.
4469 def SDm_Int : SS4AIi8<opcsd, MRMSrcMem,
4470 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
4472 !strconcat(OpcodeStr,
4473 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4474 !strconcat(OpcodeStr,
4475 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4477 (F64Int VR128:$src1, sse_load_f64:$src2, imm:$src3))]>,
4481 multiclass sse41_fp_binop_rm_avx<bits<8> opcss, bits<8> opcsd,
4483 // Intrinsic operation, reg.
4484 def SSr : SS4AIi8<opcss, MRMSrcReg,
4485 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4486 !strconcat(OpcodeStr,
4487 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4490 // Intrinsic operation, mem.
4491 def SSm : SS4AIi8<opcss, MRMSrcMem,
4492 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
4493 !strconcat(OpcodeStr,
4494 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4497 // Intrinsic operation, reg.
4498 def SDr : SS4AIi8<opcsd, MRMSrcReg,
4499 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4500 !strconcat(OpcodeStr,
4501 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4504 // Intrinsic operation, mem.
4505 def SDm : SS4AIi8<opcsd, MRMSrcMem,
4506 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
4507 !strconcat(OpcodeStr,
4508 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4512 // FP round - roundss, roundps, roundsd, roundpd
4513 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in {
4515 defm VROUND : sse41_fp_unop_rm<0x08, 0x09, "vround",
4516 int_x86_sse41_round_ps, int_x86_sse41_round_pd>,
4518 defm VROUND : sse41_fp_binop_rm<0x0A, 0x0B, "vround",
4519 int_x86_sse41_round_ss, int_x86_sse41_round_sd,
4521 // Instructions for the assembler
4522 defm VROUND : sse41_fp_unop_rm_avx<0x08, 0x09, "vround">, VEX;
4523 defm VROUND : sse41_fp_binop_rm_avx<0x0A, 0x0B, "vround">, VEX_4V;
4526 defm ROUND : sse41_fp_unop_rm<0x08, 0x09, "round",
4527 int_x86_sse41_round_ps, int_x86_sse41_round_pd>;
4528 let Constraints = "$src1 = $dst" in
4529 defm ROUND : sse41_fp_binop_rm<0x0A, 0x0B, "round",
4530 int_x86_sse41_round_ss, int_x86_sse41_round_sd>;
4532 //===----------------------------------------------------------------------===//
4533 // SSE4.1 - Misc Instructions
4534 //===----------------------------------------------------------------------===//
4536 // SS41I_unop_rm_int_v16 - SSE 4.1 unary operator whose type is v8i16.
4537 multiclass SS41I_unop_rm_int_v16<bits<8> opc, string OpcodeStr,
4538 Intrinsic IntId128> {
4539 def rr128 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4541 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4542 [(set VR128:$dst, (IntId128 VR128:$src))]>, OpSize;
4543 def rm128 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4545 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4548 (bitconvert (memopv8i16 addr:$src))))]>, OpSize;
4551 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in
4552 defm VPHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "vphminposuw",
4553 int_x86_sse41_phminposuw>, VEX;
4554 defm PHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "phminposuw",
4555 int_x86_sse41_phminposuw>;
4557 /// SS41I_binop_rm_int - Simple SSE 4.1 binary operator
4558 multiclass SS41I_binop_rm_int<bits<8> opc, string OpcodeStr,
4559 Intrinsic IntId128, bit Is2Addr = 1> {
4560 let isCommutable = 1 in
4561 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4562 (ins VR128:$src1, VR128:$src2),
4564 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4565 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4566 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>, OpSize;
4567 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4568 (ins VR128:$src1, i128mem:$src2),
4570 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4571 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4573 (IntId128 VR128:$src1,
4574 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
4577 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in {
4578 let isCommutable = 0 in
4579 defm VPACKUSDW : SS41I_binop_rm_int<0x2B, "vpackusdw", int_x86_sse41_packusdw,
4581 defm VPCMPEQQ : SS41I_binop_rm_int<0x29, "vpcmpeqq", int_x86_sse41_pcmpeqq,
4583 defm VPMINSB : SS41I_binop_rm_int<0x38, "vpminsb", int_x86_sse41_pminsb,
4585 defm VPMINSD : SS41I_binop_rm_int<0x39, "vpminsd", int_x86_sse41_pminsd,
4587 defm VPMINUD : SS41I_binop_rm_int<0x3B, "vpminud", int_x86_sse41_pminud,
4589 defm VPMINUW : SS41I_binop_rm_int<0x3A, "vpminuw", int_x86_sse41_pminuw,
4591 defm VPMAXSB : SS41I_binop_rm_int<0x3C, "vpmaxsb", int_x86_sse41_pmaxsb,
4593 defm VPMAXSD : SS41I_binop_rm_int<0x3D, "vpmaxsd", int_x86_sse41_pmaxsd,
4595 defm VPMAXUD : SS41I_binop_rm_int<0x3F, "vpmaxud", int_x86_sse41_pmaxud,
4597 defm VPMAXUW : SS41I_binop_rm_int<0x3E, "vpmaxuw", int_x86_sse41_pmaxuw,
4599 defm VPMULDQ : SS41I_binop_rm_int<0x28, "vpmuldq", int_x86_sse41_pmuldq,
4603 let Constraints = "$src1 = $dst" in {
4604 let isCommutable = 0 in
4605 defm PACKUSDW : SS41I_binop_rm_int<0x2B, "packusdw", int_x86_sse41_packusdw>;
4606 defm PCMPEQQ : SS41I_binop_rm_int<0x29, "pcmpeqq", int_x86_sse41_pcmpeqq>;
4607 defm PMINSB : SS41I_binop_rm_int<0x38, "pminsb", int_x86_sse41_pminsb>;
4608 defm PMINSD : SS41I_binop_rm_int<0x39, "pminsd", int_x86_sse41_pminsd>;
4609 defm PMINUD : SS41I_binop_rm_int<0x3B, "pminud", int_x86_sse41_pminud>;
4610 defm PMINUW : SS41I_binop_rm_int<0x3A, "pminuw", int_x86_sse41_pminuw>;
4611 defm PMAXSB : SS41I_binop_rm_int<0x3C, "pmaxsb", int_x86_sse41_pmaxsb>;
4612 defm PMAXSD : SS41I_binop_rm_int<0x3D, "pmaxsd", int_x86_sse41_pmaxsd>;
4613 defm PMAXUD : SS41I_binop_rm_int<0x3F, "pmaxud", int_x86_sse41_pmaxud>;
4614 defm PMAXUW : SS41I_binop_rm_int<0x3E, "pmaxuw", int_x86_sse41_pmaxuw>;
4615 defm PMULDQ : SS41I_binop_rm_int<0x28, "pmuldq", int_x86_sse41_pmuldq>;
4618 def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, VR128:$src2)),
4619 (PCMPEQQrr VR128:$src1, VR128:$src2)>;
4620 def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, (memop addr:$src2))),
4621 (PCMPEQQrm VR128:$src1, addr:$src2)>;
4623 /// SS48I_binop_rm - Simple SSE41 binary operator.
4624 multiclass SS48I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
4625 ValueType OpVT, bit Is2Addr = 1> {
4626 let isCommutable = 1 in
4627 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4628 (ins VR128:$src1, VR128:$src2),
4630 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4631 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4632 [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]>,
4634 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4635 (ins VR128:$src1, i128mem:$src2),
4637 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4638 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4639 [(set VR128:$dst, (OpNode VR128:$src1,
4640 (bc_v4i32 (memopv2i64 addr:$src2))))]>,
4644 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in
4645 defm VPMULLD : SS48I_binop_rm<0x40, "vpmulld", mul, v4i32, 0>, VEX_4V;
4646 let Constraints = "$src1 = $dst" in
4647 defm PMULLD : SS48I_binop_rm<0x40, "pmulld", mul, v4i32>;
4649 /// SS41I_binop_rmi_int - SSE 4.1 binary operator with 8-bit immediate
4650 multiclass SS41I_binop_rmi_int<bits<8> opc, string OpcodeStr,
4651 Intrinsic IntId128, bit Is2Addr = 1> {
4652 let isCommutable = 1 in
4653 def rri : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4654 (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4656 !strconcat(OpcodeStr,
4657 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4658 !strconcat(OpcodeStr,
4659 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4661 (IntId128 VR128:$src1, VR128:$src2, imm:$src3))]>,
4663 def rmi : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4664 (ins VR128:$src1, i128mem:$src2, i32i8imm:$src3),
4666 !strconcat(OpcodeStr,
4667 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4668 !strconcat(OpcodeStr,
4669 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4671 (IntId128 VR128:$src1,
4672 (bitconvert (memopv16i8 addr:$src2)), imm:$src3))]>,
4676 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in {
4677 let isCommutable = 0 in {
4678 defm VBLENDPS : SS41I_binop_rmi_int<0x0C, "vblendps", int_x86_sse41_blendps,
4680 defm VBLENDPD : SS41I_binop_rmi_int<0x0D, "vblendpd", int_x86_sse41_blendpd,
4682 defm VPBLENDW : SS41I_binop_rmi_int<0x0E, "vpblendw", int_x86_sse41_pblendw,
4684 defm VMPSADBW : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_sse41_mpsadbw,
4687 defm VDPPS : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_sse41_dpps,
4689 defm VDPPD : SS41I_binop_rmi_int<0x41, "vdppd", int_x86_sse41_dppd,
4693 let Constraints = "$src1 = $dst" in {
4694 let isCommutable = 0 in {
4695 defm BLENDPS : SS41I_binop_rmi_int<0x0C, "blendps", int_x86_sse41_blendps>;
4696 defm BLENDPD : SS41I_binop_rmi_int<0x0D, "blendpd", int_x86_sse41_blendpd>;
4697 defm PBLENDW : SS41I_binop_rmi_int<0x0E, "pblendw", int_x86_sse41_pblendw>;
4698 defm MPSADBW : SS41I_binop_rmi_int<0x42, "mpsadbw", int_x86_sse41_mpsadbw>;
4700 defm DPPS : SS41I_binop_rmi_int<0x40, "dpps", int_x86_sse41_dpps>;
4701 defm DPPD : SS41I_binop_rmi_int<0x41, "dppd", int_x86_sse41_dppd>;
4704 /// SS41I_quaternary_int_avx - AVX SSE 4.1 with 4 operators
4705 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in {
4706 multiclass SS41I_quaternary_int_avx<bits<8> opc, string OpcodeStr> {
4707 def rr : I<opc, MRMSrcReg, (outs VR128:$dst),
4708 (ins VR128:$src1, VR128:$src2, VR128:$src3),
4709 !strconcat(OpcodeStr,
4710 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4711 [], SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
4713 def rm : I<opc, MRMSrcMem, (outs VR128:$dst),
4714 (ins VR128:$src1, i128mem:$src2, VR128:$src3),
4715 !strconcat(OpcodeStr,
4716 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4717 [], SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
4721 defm VBLENDVPD : SS41I_quaternary_int_avx<0x4B, "vblendvpd">;
4722 defm VBLENDVPS : SS41I_quaternary_int_avx<0x4A, "vblendvps">;
4723 defm VPBLENDVB : SS41I_quaternary_int_avx<0x4C, "vpblendvb">;
4725 /// SS41I_ternary_int - SSE 4.1 ternary operator
4726 let Uses = [XMM0], Constraints = "$src1 = $dst" in {
4727 multiclass SS41I_ternary_int<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4728 def rr0 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4729 (ins VR128:$src1, VR128:$src2),
4730 !strconcat(OpcodeStr,
4731 "\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}"),
4732 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2, XMM0))]>,
4735 def rm0 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4736 (ins VR128:$src1, i128mem:$src2),
4737 !strconcat(OpcodeStr,
4738 "\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}"),
4741 (bitconvert (memopv16i8 addr:$src2)), XMM0))]>, OpSize;
4745 defm BLENDVPD : SS41I_ternary_int<0x15, "blendvpd", int_x86_sse41_blendvpd>;
4746 defm BLENDVPS : SS41I_ternary_int<0x14, "blendvps", int_x86_sse41_blendvps>;
4747 defm PBLENDVB : SS41I_ternary_int<0x10, "pblendvb", int_x86_sse41_pblendvb>;
4749 // ptest instruction we'll lower to this in X86ISelLowering primarily from
4750 // the intel intrinsic that corresponds to this.
4751 let Defs = [EFLAGS], isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in {
4752 def VPTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
4753 "vptest\t{$src2, $src1|$src1, $src2}",
4754 [(set EFLAGS, (X86ptest VR128:$src1, VR128:$src2))]>,
4756 def VPTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, i128mem:$src2),
4757 "vptest\t{$src2, $src1|$src1, $src2}",
4758 [(set EFLAGS, (X86ptest VR128:$src1, (load addr:$src2)))]>,
4762 let Defs = [EFLAGS] in {
4763 def PTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
4764 "ptest \t{$src2, $src1|$src1, $src2}",
4765 [(set EFLAGS, (X86ptest VR128:$src1, VR128:$src2))]>,
4767 def PTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, i128mem:$src2),
4768 "ptest \t{$src2, $src1|$src1, $src2}",
4769 [(set EFLAGS, (X86ptest VR128:$src1, (load addr:$src2)))]>,
4773 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in
4774 def VMOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4775 "vmovntdqa\t{$src, $dst|$dst, $src}",
4776 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
4778 def MOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4779 "movntdqa\t{$src, $dst|$dst, $src}",
4780 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
4783 //===----------------------------------------------------------------------===//
4784 // SSE4.2 - Compare Instructions
4785 //===----------------------------------------------------------------------===//
4787 /// SS42I_binop_rm_int - Simple SSE 4.2 binary operator
4788 multiclass SS42I_binop_rm_int<bits<8> opc, string OpcodeStr,
4789 Intrinsic IntId128, bit Is2Addr = 1> {
4790 def rr : SS428I<opc, MRMSrcReg, (outs VR128:$dst),
4791 (ins VR128:$src1, VR128:$src2),
4793 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4794 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4795 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
4797 def rm : SS428I<opc, MRMSrcMem, (outs VR128:$dst),
4798 (ins VR128:$src1, i128mem:$src2),
4800 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4801 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4803 (IntId128 VR128:$src1,
4804 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
4807 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE42] in
4808 defm VPCMPGTQ : SS42I_binop_rm_int<0x37, "vpcmpgtq", int_x86_sse42_pcmpgtq,
4810 let Constraints = "$src1 = $dst" in
4811 defm PCMPGTQ : SS42I_binop_rm_int<0x37, "pcmpgtq", int_x86_sse42_pcmpgtq>;
4813 def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, VR128:$src2)),
4814 (PCMPGTQrr VR128:$src1, VR128:$src2)>;
4815 def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, (memop addr:$src2))),
4816 (PCMPGTQrm VR128:$src1, addr:$src2)>;
4818 //===----------------------------------------------------------------------===//
4819 // SSE4.2 - String/text Processing Instructions
4820 //===----------------------------------------------------------------------===//
4822 // Packed Compare Implicit Length Strings, Return Mask
4823 let Defs = [EFLAGS], usesCustomInserter = 1 in {
4824 def PCMPISTRM128REG : SS42AI<0, Pseudo, (outs VR128:$dst),
4825 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
4826 "#PCMPISTRM128rr PSEUDO!",
4827 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128 VR128:$src1, VR128:$src2,
4828 imm:$src3))]>, OpSize;
4829 def PCMPISTRM128MEM : SS42AI<0, Pseudo, (outs VR128:$dst),
4830 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
4831 "#PCMPISTRM128rm PSEUDO!",
4832 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128
4833 VR128:$src1, (load addr:$src2), imm:$src3))]>, OpSize;
4836 let Defs = [XMM0, EFLAGS], isAsmParserOnly = 1,
4837 Predicates = [HasAVX, HasSSE42] in {
4838 def VPCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
4839 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
4840 "vpcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize, VEX;
4841 def VPCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
4842 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
4843 "vpcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize, VEX;
4846 let Defs = [XMM0, EFLAGS] in {
4847 def PCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
4848 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
4849 "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
4850 def PCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
4851 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
4852 "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
4855 // Packed Compare Explicit Length Strings, Return Mask
4856 let Defs = [EFLAGS], Uses = [EAX, EDX], usesCustomInserter = 1 in {
4857 def PCMPESTRM128REG : SS42AI<0, Pseudo, (outs VR128:$dst),
4858 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
4859 "#PCMPESTRM128rr PSEUDO!",
4861 (int_x86_sse42_pcmpestrm128
4862 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5))]>, OpSize;
4864 def PCMPESTRM128MEM : SS42AI<0, Pseudo, (outs VR128:$dst),
4865 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
4866 "#PCMPESTRM128rm PSEUDO!",
4867 [(set VR128:$dst, (int_x86_sse42_pcmpestrm128
4868 VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5))]>,
4872 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE42],
4873 Defs = [XMM0, EFLAGS], Uses = [EAX, EDX] in {
4874 def VPCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
4875 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
4876 "vpcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize, VEX;
4877 def VPCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
4878 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
4879 "vpcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize, VEX;
4882 let Defs = [XMM0, EFLAGS], Uses = [EAX, EDX] in {
4883 def PCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
4884 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
4885 "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
4886 def PCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
4887 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
4888 "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
4891 // Packed Compare Implicit Length Strings, Return Index
4892 let Defs = [ECX, EFLAGS] in {
4893 multiclass SS42AI_pcmpistri<Intrinsic IntId128, string asm = "pcmpistri"> {
4894 def rr : SS42AI<0x63, MRMSrcReg, (outs),
4895 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
4896 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
4897 [(set ECX, (IntId128 VR128:$src1, VR128:$src2, imm:$src3)),
4898 (implicit EFLAGS)]>, OpSize;
4899 def rm : SS42AI<0x63, MRMSrcMem, (outs),
4900 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
4901 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
4902 [(set ECX, (IntId128 VR128:$src1, (load addr:$src2), imm:$src3)),
4903 (implicit EFLAGS)]>, OpSize;
4907 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE42] in {
4908 defm VPCMPISTRI : SS42AI_pcmpistri<int_x86_sse42_pcmpistri128, "vpcmpistri">,
4910 defm VPCMPISTRIA : SS42AI_pcmpistri<int_x86_sse42_pcmpistria128, "vpcmpistri">,
4912 defm VPCMPISTRIC : SS42AI_pcmpistri<int_x86_sse42_pcmpistric128, "vpcmpistri">,
4914 defm VPCMPISTRIO : SS42AI_pcmpistri<int_x86_sse42_pcmpistrio128, "vpcmpistri">,
4916 defm VPCMPISTRIS : SS42AI_pcmpistri<int_x86_sse42_pcmpistris128, "vpcmpistri">,
4918 defm VPCMPISTRIZ : SS42AI_pcmpistri<int_x86_sse42_pcmpistriz128, "vpcmpistri">,
4922 defm PCMPISTRI : SS42AI_pcmpistri<int_x86_sse42_pcmpistri128>;
4923 defm PCMPISTRIA : SS42AI_pcmpistri<int_x86_sse42_pcmpistria128>;
4924 defm PCMPISTRIC : SS42AI_pcmpistri<int_x86_sse42_pcmpistric128>;
4925 defm PCMPISTRIO : SS42AI_pcmpistri<int_x86_sse42_pcmpistrio128>;
4926 defm PCMPISTRIS : SS42AI_pcmpistri<int_x86_sse42_pcmpistris128>;
4927 defm PCMPISTRIZ : SS42AI_pcmpistri<int_x86_sse42_pcmpistriz128>;
4929 // Packed Compare Explicit Length Strings, Return Index
4930 let Defs = [ECX, EFLAGS], Uses = [EAX, EDX] in {
4931 multiclass SS42AI_pcmpestri<Intrinsic IntId128, string asm = "pcmpestri"> {
4932 def rr : SS42AI<0x61, MRMSrcReg, (outs),
4933 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
4934 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
4935 [(set ECX, (IntId128 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5)),
4936 (implicit EFLAGS)]>, OpSize;
4937 def rm : SS42AI<0x61, MRMSrcMem, (outs),
4938 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
4939 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
4941 (IntId128 VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5)),
4942 (implicit EFLAGS)]>, OpSize;
4946 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE42] in {
4947 defm VPCMPESTRI : SS42AI_pcmpestri<int_x86_sse42_pcmpestri128, "vpcmpestri">,
4949 defm VPCMPESTRIA : SS42AI_pcmpestri<int_x86_sse42_pcmpestria128, "vpcmpestri">,
4951 defm VPCMPESTRIC : SS42AI_pcmpestri<int_x86_sse42_pcmpestric128, "vpcmpestri">,
4953 defm VPCMPESTRIO : SS42AI_pcmpestri<int_x86_sse42_pcmpestrio128, "vpcmpestri">,
4955 defm VPCMPESTRIS : SS42AI_pcmpestri<int_x86_sse42_pcmpestris128, "vpcmpestri">,
4957 defm VPCMPESTRIZ : SS42AI_pcmpestri<int_x86_sse42_pcmpestriz128, "vpcmpestri">,
4961 defm PCMPESTRI : SS42AI_pcmpestri<int_x86_sse42_pcmpestri128>;
4962 defm PCMPESTRIA : SS42AI_pcmpestri<int_x86_sse42_pcmpestria128>;
4963 defm PCMPESTRIC : SS42AI_pcmpestri<int_x86_sse42_pcmpestric128>;
4964 defm PCMPESTRIO : SS42AI_pcmpestri<int_x86_sse42_pcmpestrio128>;
4965 defm PCMPESTRIS : SS42AI_pcmpestri<int_x86_sse42_pcmpestris128>;
4966 defm PCMPESTRIZ : SS42AI_pcmpestri<int_x86_sse42_pcmpestriz128>;
4968 //===----------------------------------------------------------------------===//
4969 // SSE4.2 - CRC Instructions
4970 //===----------------------------------------------------------------------===//
4972 // No CRC instructions have AVX equivalents
4974 // crc intrinsic instruction
4975 // This set of instructions are only rm, the only difference is the size
4977 let Constraints = "$src1 = $dst" in {
4978 def CRC32m8 : SS42FI<0xF0, MRMSrcMem, (outs GR32:$dst),
4979 (ins GR32:$src1, i8mem:$src2),
4980 "crc32{b} \t{$src2, $src1|$src1, $src2}",
4982 (int_x86_sse42_crc32_8 GR32:$src1,
4983 (load addr:$src2)))]>;
4984 def CRC32r8 : SS42FI<0xF0, MRMSrcReg, (outs GR32:$dst),
4985 (ins GR32:$src1, GR8:$src2),
4986 "crc32{b} \t{$src2, $src1|$src1, $src2}",
4988 (int_x86_sse42_crc32_8 GR32:$src1, GR8:$src2))]>;
4989 def CRC32m16 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst),
4990 (ins GR32:$src1, i16mem:$src2),
4991 "crc32{w} \t{$src2, $src1|$src1, $src2}",
4993 (int_x86_sse42_crc32_16 GR32:$src1,
4994 (load addr:$src2)))]>,
4996 def CRC32r16 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst),
4997 (ins GR32:$src1, GR16:$src2),
4998 "crc32{w} \t{$src2, $src1|$src1, $src2}",
5000 (int_x86_sse42_crc32_16 GR32:$src1, GR16:$src2))]>,
5002 def CRC32m32 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst),
5003 (ins GR32:$src1, i32mem:$src2),
5004 "crc32{l} \t{$src2, $src1|$src1, $src2}",
5006 (int_x86_sse42_crc32_32 GR32:$src1,
5007 (load addr:$src2)))]>;
5008 def CRC32r32 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst),
5009 (ins GR32:$src1, GR32:$src2),
5010 "crc32{l} \t{$src2, $src1|$src1, $src2}",
5012 (int_x86_sse42_crc32_32 GR32:$src1, GR32:$src2))]>;
5013 def CRC64m8 : SS42FI<0xF0, MRMSrcMem, (outs GR64:$dst),
5014 (ins GR64:$src1, i8mem:$src2),
5015 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5017 (int_x86_sse42_crc64_8 GR64:$src1,
5018 (load addr:$src2)))]>,
5020 def CRC64r8 : SS42FI<0xF0, MRMSrcReg, (outs GR64:$dst),
5021 (ins GR64:$src1, GR8:$src2),
5022 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5024 (int_x86_sse42_crc64_8 GR64:$src1, GR8:$src2))]>,
5026 def CRC64m64 : SS42FI<0xF1, MRMSrcMem, (outs GR64:$dst),
5027 (ins GR64:$src1, i64mem:$src2),
5028 "crc32{q} \t{$src2, $src1|$src1, $src2}",
5030 (int_x86_sse42_crc64_64 GR64:$src1,
5031 (load addr:$src2)))]>,
5033 def CRC64r64 : SS42FI<0xF1, MRMSrcReg, (outs GR64:$dst),
5034 (ins GR64:$src1, GR64:$src2),
5035 "crc32{q} \t{$src2, $src1|$src1, $src2}",
5037 (int_x86_sse42_crc64_64 GR64:$src1, GR64:$src2))]>,
5041 //===----------------------------------------------------------------------===//
5042 // AES-NI Instructions
5043 //===----------------------------------------------------------------------===//
5045 multiclass AESI_binop_rm_int<bits<8> opc, string OpcodeStr,
5046 Intrinsic IntId128, bit Is2Addr = 1> {
5047 def rr : AES8I<opc, MRMSrcReg, (outs VR128:$dst),
5048 (ins VR128:$src1, VR128:$src2),
5050 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5051 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5052 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
5054 def rm : AES8I<opc, MRMSrcMem, (outs VR128:$dst),
5055 (ins VR128:$src1, i128mem:$src2),
5057 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5058 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5060 (IntId128 VR128:$src1,
5061 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
5064 // Perform One Round of an AES Encryption/Decryption Flow
5065 let isAsmParserOnly = 1, Predicates = [HasAVX, HasAES] in {
5066 defm VAESENC : AESI_binop_rm_int<0xDC, "vaesenc",
5067 int_x86_aesni_aesenc, 0>, VEX_4V;
5068 defm VAESENCLAST : AESI_binop_rm_int<0xDD, "vaesenclast",
5069 int_x86_aesni_aesenclast, 0>, VEX_4V;
5070 defm VAESDEC : AESI_binop_rm_int<0xDE, "vaesdec",
5071 int_x86_aesni_aesdec, 0>, VEX_4V;
5072 defm VAESDECLAST : AESI_binop_rm_int<0xDF, "vaesdeclast",
5073 int_x86_aesni_aesdeclast, 0>, VEX_4V;
5076 let Constraints = "$src1 = $dst" in {
5077 defm AESENC : AESI_binop_rm_int<0xDC, "aesenc",
5078 int_x86_aesni_aesenc>;
5079 defm AESENCLAST : AESI_binop_rm_int<0xDD, "aesenclast",
5080 int_x86_aesni_aesenclast>;
5081 defm AESDEC : AESI_binop_rm_int<0xDE, "aesdec",
5082 int_x86_aesni_aesdec>;
5083 defm AESDECLAST : AESI_binop_rm_int<0xDF, "aesdeclast",
5084 int_x86_aesni_aesdeclast>;
5087 def : Pat<(v2i64 (int_x86_aesni_aesenc VR128:$src1, VR128:$src2)),
5088 (AESENCrr VR128:$src1, VR128:$src2)>;
5089 def : Pat<(v2i64 (int_x86_aesni_aesenc VR128:$src1, (memop addr:$src2))),
5090 (AESENCrm VR128:$src1, addr:$src2)>;
5091 def : Pat<(v2i64 (int_x86_aesni_aesenclast VR128:$src1, VR128:$src2)),
5092 (AESENCLASTrr VR128:$src1, VR128:$src2)>;
5093 def : Pat<(v2i64 (int_x86_aesni_aesenclast VR128:$src1, (memop addr:$src2))),
5094 (AESENCLASTrm VR128:$src1, addr:$src2)>;
5095 def : Pat<(v2i64 (int_x86_aesni_aesdec VR128:$src1, VR128:$src2)),
5096 (AESDECrr VR128:$src1, VR128:$src2)>;
5097 def : Pat<(v2i64 (int_x86_aesni_aesdec VR128:$src1, (memop addr:$src2))),
5098 (AESDECrm VR128:$src1, addr:$src2)>;
5099 def : Pat<(v2i64 (int_x86_aesni_aesdeclast VR128:$src1, VR128:$src2)),
5100 (AESDECLASTrr VR128:$src1, VR128:$src2)>;
5101 def : Pat<(v2i64 (int_x86_aesni_aesdeclast VR128:$src1, (memop addr:$src2))),
5102 (AESDECLASTrm VR128:$src1, addr:$src2)>;
5104 // Perform the AES InvMixColumn Transformation
5105 let isAsmParserOnly = 1, Predicates = [HasAVX, HasAES] in {
5106 def VAESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
5108 "vaesimc\t{$src1, $dst|$dst, $src1}",
5110 (int_x86_aesni_aesimc VR128:$src1))]>,
5112 def VAESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
5113 (ins i128mem:$src1),
5114 "vaesimc\t{$src1, $dst|$dst, $src1}",
5116 (int_x86_aesni_aesimc (bitconvert (memopv2i64 addr:$src1))))]>,
5119 def AESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
5121 "aesimc\t{$src1, $dst|$dst, $src1}",
5123 (int_x86_aesni_aesimc VR128:$src1))]>,
5125 def AESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
5126 (ins i128mem:$src1),
5127 "aesimc\t{$src1, $dst|$dst, $src1}",
5129 (int_x86_aesni_aesimc (bitconvert (memopv2i64 addr:$src1))))]>,
5132 // AES Round Key Generation Assist
5133 let isAsmParserOnly = 1, Predicates = [HasAVX, HasAES] in {
5134 def VAESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
5135 (ins VR128:$src1, i8imm:$src2),
5136 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5138 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
5140 def VAESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
5141 (ins i128mem:$src1, i8imm:$src2),
5142 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5144 (int_x86_aesni_aeskeygenassist (bitconvert (memopv2i64 addr:$src1)),
5148 def AESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
5149 (ins VR128:$src1, i8imm:$src2),
5150 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5152 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
5154 def AESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
5155 (ins i128mem:$src1, i8imm:$src2),
5156 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5158 (int_x86_aesni_aeskeygenassist (bitconvert (memopv2i64 addr:$src1)),