1 //====- X86InstrSSE.td - Describe the X86 Instruction Set --*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86 SSE instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
17 //===----------------------------------------------------------------------===//
18 // SSE specific DAG Nodes.
19 //===----------------------------------------------------------------------===//
21 def SDTX86FPShiftOp : SDTypeProfile<1, 2, [ SDTCisSameAs<0, 1>,
22 SDTCisFP<0>, SDTCisInt<2> ]>;
23 def SDTX86VFCMP : SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisSameAs<1, 2>,
24 SDTCisFP<1>, SDTCisVT<3, i8>]>;
26 def X86fmin : SDNode<"X86ISD::FMIN", SDTFPBinOp>;
27 def X86fmax : SDNode<"X86ISD::FMAX", SDTFPBinOp>;
28 def X86fand : SDNode<"X86ISD::FAND", SDTFPBinOp,
29 [SDNPCommutative, SDNPAssociative]>;
30 def X86for : SDNode<"X86ISD::FOR", SDTFPBinOp,
31 [SDNPCommutative, SDNPAssociative]>;
32 def X86fxor : SDNode<"X86ISD::FXOR", SDTFPBinOp,
33 [SDNPCommutative, SDNPAssociative]>;
34 def X86frsqrt : SDNode<"X86ISD::FRSQRT", SDTFPUnaryOp>;
35 def X86frcp : SDNode<"X86ISD::FRCP", SDTFPUnaryOp>;
36 def X86fsrl : SDNode<"X86ISD::FSRL", SDTX86FPShiftOp>;
37 def X86comi : SDNode<"X86ISD::COMI", SDTX86CmpTest>;
38 def X86ucomi : SDNode<"X86ISD::UCOMI", SDTX86CmpTest>;
39 def X86pshufb : SDNode<"X86ISD::PSHUFB",
40 SDTypeProfile<1, 2, [SDTCisVT<0, v16i8>, SDTCisSameAs<0,1>,
42 def X86pextrb : SDNode<"X86ISD::PEXTRB",
43 SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
44 def X86pextrw : SDNode<"X86ISD::PEXTRW",
45 SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
46 def X86pinsrb : SDNode<"X86ISD::PINSRB",
47 SDTypeProfile<1, 3, [SDTCisVT<0, v16i8>, SDTCisSameAs<0,1>,
48 SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
49 def X86pinsrw : SDNode<"X86ISD::PINSRW",
50 SDTypeProfile<1, 3, [SDTCisVT<0, v8i16>, SDTCisSameAs<0,1>,
51 SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
52 def X86insrtps : SDNode<"X86ISD::INSERTPS",
53 SDTypeProfile<1, 3, [SDTCisVT<0, v4f32>, SDTCisSameAs<0,1>,
54 SDTCisVT<2, v4f32>, SDTCisPtrTy<3>]>>;
55 def X86vzmovl : SDNode<"X86ISD::VZEXT_MOVL",
56 SDTypeProfile<1, 1, [SDTCisSameAs<0,1>]>>;
57 def X86vzload : SDNode<"X86ISD::VZEXT_LOAD", SDTLoad,
58 [SDNPHasChain, SDNPMayLoad]>;
59 def X86vshl : SDNode<"X86ISD::VSHL", SDTIntShiftOp>;
60 def X86vshr : SDNode<"X86ISD::VSRL", SDTIntShiftOp>;
61 def X86cmpps : SDNode<"X86ISD::CMPPS", SDTX86VFCMP>;
62 def X86cmppd : SDNode<"X86ISD::CMPPD", SDTX86VFCMP>;
63 def X86pcmpeqb : SDNode<"X86ISD::PCMPEQB", SDTIntBinOp, [SDNPCommutative]>;
64 def X86pcmpeqw : SDNode<"X86ISD::PCMPEQW", SDTIntBinOp, [SDNPCommutative]>;
65 def X86pcmpeqd : SDNode<"X86ISD::PCMPEQD", SDTIntBinOp, [SDNPCommutative]>;
66 def X86pcmpeqq : SDNode<"X86ISD::PCMPEQQ", SDTIntBinOp, [SDNPCommutative]>;
67 def X86pcmpgtb : SDNode<"X86ISD::PCMPGTB", SDTIntBinOp>;
68 def X86pcmpgtw : SDNode<"X86ISD::PCMPGTW", SDTIntBinOp>;
69 def X86pcmpgtd : SDNode<"X86ISD::PCMPGTD", SDTIntBinOp>;
70 def X86pcmpgtq : SDNode<"X86ISD::PCMPGTQ", SDTIntBinOp>;
72 def SDTX86CmpPTest : SDTypeProfile<1, 2, [SDTCisVT<0, i32>,
75 def X86ptest : SDNode<"X86ISD::PTEST", SDTX86CmpPTest>;
77 //===----------------------------------------------------------------------===//
78 // SSE Complex Patterns
79 //===----------------------------------------------------------------------===//
81 // These are 'extloads' from a scalar to the low element of a vector, zeroing
82 // the top elements. These are used for the SSE 'ss' and 'sd' instruction
84 def sse_load_f32 : ComplexPattern<v4f32, 5, "SelectScalarSSELoad", [],
85 [SDNPHasChain, SDNPMayLoad]>;
86 def sse_load_f64 : ComplexPattern<v2f64, 5, "SelectScalarSSELoad", [],
87 [SDNPHasChain, SDNPMayLoad]>;
89 def ssmem : Operand<v4f32> {
90 let PrintMethod = "printf32mem";
91 let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm);
92 let ParserMatchClass = X86MemAsmOperand;
94 def sdmem : Operand<v2f64> {
95 let PrintMethod = "printf64mem";
96 let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm);
97 let ParserMatchClass = X86MemAsmOperand;
100 //===----------------------------------------------------------------------===//
101 // SSE pattern fragments
102 //===----------------------------------------------------------------------===//
104 def loadv4f32 : PatFrag<(ops node:$ptr), (v4f32 (load node:$ptr))>;
105 def loadv2f64 : PatFrag<(ops node:$ptr), (v2f64 (load node:$ptr))>;
106 def loadv4i32 : PatFrag<(ops node:$ptr), (v4i32 (load node:$ptr))>;
107 def loadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (load node:$ptr))>;
109 // FIXME: move this to a more appropriate place after all AVX is done.
110 def loadv8f32 : PatFrag<(ops node:$ptr), (v8f32 (load node:$ptr))>;
111 def loadv4f64 : PatFrag<(ops node:$ptr), (v4f64 (load node:$ptr))>;
112 def loadv8i32 : PatFrag<(ops node:$ptr), (v8i32 (load node:$ptr))>;
113 def loadv4i64 : PatFrag<(ops node:$ptr), (v4i64 (load node:$ptr))>;
115 // Like 'store', but always requires vector alignment.
116 def alignedstore : PatFrag<(ops node:$val, node:$ptr),
117 (store node:$val, node:$ptr), [{
118 return cast<StoreSDNode>(N)->getAlignment() >= 16;
121 // Like 'load', but always requires vector alignment.
122 def alignedload : PatFrag<(ops node:$ptr), (load node:$ptr), [{
123 return cast<LoadSDNode>(N)->getAlignment() >= 16;
126 def alignedloadfsf32 : PatFrag<(ops node:$ptr),
127 (f32 (alignedload node:$ptr))>;
128 def alignedloadfsf64 : PatFrag<(ops node:$ptr),
129 (f64 (alignedload node:$ptr))>;
130 def alignedloadv4f32 : PatFrag<(ops node:$ptr),
131 (v4f32 (alignedload node:$ptr))>;
132 def alignedloadv2f64 : PatFrag<(ops node:$ptr),
133 (v2f64 (alignedload node:$ptr))>;
134 def alignedloadv4i32 : PatFrag<(ops node:$ptr),
135 (v4i32 (alignedload node:$ptr))>;
136 def alignedloadv2i64 : PatFrag<(ops node:$ptr),
137 (v2i64 (alignedload node:$ptr))>;
139 // FIXME: move this to a more appropriate place after all AVX is done.
140 def alignedloadv8f32 : PatFrag<(ops node:$ptr),
141 (v8f32 (alignedload node:$ptr))>;
142 def alignedloadv4f64 : PatFrag<(ops node:$ptr),
143 (v4f64 (alignedload node:$ptr))>;
144 def alignedloadv8i32 : PatFrag<(ops node:$ptr),
145 (v8i32 (alignedload node:$ptr))>;
146 def alignedloadv4i64 : PatFrag<(ops node:$ptr),
147 (v4i64 (alignedload node:$ptr))>;
149 // Like 'load', but uses special alignment checks suitable for use in
150 // memory operands in most SSE instructions, which are required to
151 // be naturally aligned on some targets but not on others. If the subtarget
152 // allows unaligned accesses, match any load, though this may require
153 // setting a feature bit in the processor (on startup, for example).
154 // Opteron 10h and later implement such a feature.
155 def memop : PatFrag<(ops node:$ptr), (load node:$ptr), [{
156 return Subtarget->hasVectorUAMem()
157 || cast<LoadSDNode>(N)->getAlignment() >= 16;
160 def memopfsf32 : PatFrag<(ops node:$ptr), (f32 (memop node:$ptr))>;
161 def memopfsf64 : PatFrag<(ops node:$ptr), (f64 (memop node:$ptr))>;
162 def memopv4f32 : PatFrag<(ops node:$ptr), (v4f32 (memop node:$ptr))>;
163 def memopv2f64 : PatFrag<(ops node:$ptr), (v2f64 (memop node:$ptr))>;
164 def memopv4i32 : PatFrag<(ops node:$ptr), (v4i32 (memop node:$ptr))>;
165 def memopv2i64 : PatFrag<(ops node:$ptr), (v2i64 (memop node:$ptr))>;
166 def memopv16i8 : PatFrag<(ops node:$ptr), (v16i8 (memop node:$ptr))>;
168 // SSSE3 uses MMX registers for some instructions. They aren't aligned on a
170 // FIXME: 8 byte alignment for mmx reads is not required
171 def memop64 : PatFrag<(ops node:$ptr), (load node:$ptr), [{
172 return cast<LoadSDNode>(N)->getAlignment() >= 8;
175 def memopv8i8 : PatFrag<(ops node:$ptr), (v8i8 (memop64 node:$ptr))>;
176 def memopv4i16 : PatFrag<(ops node:$ptr), (v4i16 (memop64 node:$ptr))>;
177 def memopv8i16 : PatFrag<(ops node:$ptr), (v8i16 (memop64 node:$ptr))>;
178 def memopv2i32 : PatFrag<(ops node:$ptr), (v2i32 (memop64 node:$ptr))>;
181 // Like 'store', but requires the non-temporal bit to be set
182 def nontemporalstore : PatFrag<(ops node:$val, node:$ptr),
183 (st node:$val, node:$ptr), [{
184 if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
185 return ST->isNonTemporal();
189 def alignednontemporalstore : PatFrag<(ops node:$val, node:$ptr),
190 (st node:$val, node:$ptr), [{
191 if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
192 return ST->isNonTemporal() && !ST->isTruncatingStore() &&
193 ST->getAddressingMode() == ISD::UNINDEXED &&
194 ST->getAlignment() >= 16;
198 def unalignednontemporalstore : PatFrag<(ops node:$val, node:$ptr),
199 (st node:$val, node:$ptr), [{
200 if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
201 return ST->isNonTemporal() &&
202 ST->getAlignment() < 16;
206 def bc_v4f32 : PatFrag<(ops node:$in), (v4f32 (bitconvert node:$in))>;
207 def bc_v2f64 : PatFrag<(ops node:$in), (v2f64 (bitconvert node:$in))>;
208 def bc_v16i8 : PatFrag<(ops node:$in), (v16i8 (bitconvert node:$in))>;
209 def bc_v8i16 : PatFrag<(ops node:$in), (v8i16 (bitconvert node:$in))>;
210 def bc_v4i32 : PatFrag<(ops node:$in), (v4i32 (bitconvert node:$in))>;
211 def bc_v2i64 : PatFrag<(ops node:$in), (v2i64 (bitconvert node:$in))>;
213 def vzmovl_v2i64 : PatFrag<(ops node:$src),
214 (bitconvert (v2i64 (X86vzmovl
215 (v2i64 (scalar_to_vector (loadi64 node:$src))))))>;
216 def vzmovl_v4i32 : PatFrag<(ops node:$src),
217 (bitconvert (v4i32 (X86vzmovl
218 (v4i32 (scalar_to_vector (loadi32 node:$src))))))>;
220 def vzload_v2i64 : PatFrag<(ops node:$src),
221 (bitconvert (v2i64 (X86vzload node:$src)))>;
224 def fp32imm0 : PatLeaf<(f32 fpimm), [{
225 return N->isExactlyValue(+0.0);
228 // BYTE_imm - Transform bit immediates into byte immediates.
229 def BYTE_imm : SDNodeXForm<imm, [{
230 // Transformation function: imm >> 3
231 return getI32Imm(N->getZExtValue() >> 3);
234 // SHUFFLE_get_shuf_imm xform function: convert vector_shuffle mask to PSHUF*,
236 def SHUFFLE_get_shuf_imm : SDNodeXForm<vector_shuffle, [{
237 return getI8Imm(X86::getShuffleSHUFImmediate(N));
240 // SHUFFLE_get_pshufhw_imm xform function: convert vector_shuffle mask to
242 def SHUFFLE_get_pshufhw_imm : SDNodeXForm<vector_shuffle, [{
243 return getI8Imm(X86::getShufflePSHUFHWImmediate(N));
246 // SHUFFLE_get_pshuflw_imm xform function: convert vector_shuffle mask to
248 def SHUFFLE_get_pshuflw_imm : SDNodeXForm<vector_shuffle, [{
249 return getI8Imm(X86::getShufflePSHUFLWImmediate(N));
252 // SHUFFLE_get_palign_imm xform function: convert vector_shuffle mask to
254 def SHUFFLE_get_palign_imm : SDNodeXForm<vector_shuffle, [{
255 return getI8Imm(X86::getShufflePALIGNRImmediate(N));
258 def splat_lo : PatFrag<(ops node:$lhs, node:$rhs),
259 (vector_shuffle node:$lhs, node:$rhs), [{
260 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
261 return SVOp->isSplat() && SVOp->getSplatIndex() == 0;
264 def movddup : PatFrag<(ops node:$lhs, node:$rhs),
265 (vector_shuffle node:$lhs, node:$rhs), [{
266 return X86::isMOVDDUPMask(cast<ShuffleVectorSDNode>(N));
269 def movhlps : PatFrag<(ops node:$lhs, node:$rhs),
270 (vector_shuffle node:$lhs, node:$rhs), [{
271 return X86::isMOVHLPSMask(cast<ShuffleVectorSDNode>(N));
274 def movhlps_undef : PatFrag<(ops node:$lhs, node:$rhs),
275 (vector_shuffle node:$lhs, node:$rhs), [{
276 return X86::isMOVHLPS_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
279 def movlhps : PatFrag<(ops node:$lhs, node:$rhs),
280 (vector_shuffle node:$lhs, node:$rhs), [{
281 return X86::isMOVLHPSMask(cast<ShuffleVectorSDNode>(N));
284 def movlp : PatFrag<(ops node:$lhs, node:$rhs),
285 (vector_shuffle node:$lhs, node:$rhs), [{
286 return X86::isMOVLPMask(cast<ShuffleVectorSDNode>(N));
289 def movl : PatFrag<(ops node:$lhs, node:$rhs),
290 (vector_shuffle node:$lhs, node:$rhs), [{
291 return X86::isMOVLMask(cast<ShuffleVectorSDNode>(N));
294 def movshdup : PatFrag<(ops node:$lhs, node:$rhs),
295 (vector_shuffle node:$lhs, node:$rhs), [{
296 return X86::isMOVSHDUPMask(cast<ShuffleVectorSDNode>(N));
299 def movsldup : PatFrag<(ops node:$lhs, node:$rhs),
300 (vector_shuffle node:$lhs, node:$rhs), [{
301 return X86::isMOVSLDUPMask(cast<ShuffleVectorSDNode>(N));
304 def unpckl : PatFrag<(ops node:$lhs, node:$rhs),
305 (vector_shuffle node:$lhs, node:$rhs), [{
306 return X86::isUNPCKLMask(cast<ShuffleVectorSDNode>(N));
309 def unpckh : PatFrag<(ops node:$lhs, node:$rhs),
310 (vector_shuffle node:$lhs, node:$rhs), [{
311 return X86::isUNPCKHMask(cast<ShuffleVectorSDNode>(N));
314 def unpckl_undef : PatFrag<(ops node:$lhs, node:$rhs),
315 (vector_shuffle node:$lhs, node:$rhs), [{
316 return X86::isUNPCKL_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
319 def unpckh_undef : PatFrag<(ops node:$lhs, node:$rhs),
320 (vector_shuffle node:$lhs, node:$rhs), [{
321 return X86::isUNPCKH_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
324 def pshufd : PatFrag<(ops node:$lhs, node:$rhs),
325 (vector_shuffle node:$lhs, node:$rhs), [{
326 return X86::isPSHUFDMask(cast<ShuffleVectorSDNode>(N));
327 }], SHUFFLE_get_shuf_imm>;
329 def shufp : PatFrag<(ops node:$lhs, node:$rhs),
330 (vector_shuffle node:$lhs, node:$rhs), [{
331 return X86::isSHUFPMask(cast<ShuffleVectorSDNode>(N));
332 }], SHUFFLE_get_shuf_imm>;
334 def pshufhw : PatFrag<(ops node:$lhs, node:$rhs),
335 (vector_shuffle node:$lhs, node:$rhs), [{
336 return X86::isPSHUFHWMask(cast<ShuffleVectorSDNode>(N));
337 }], SHUFFLE_get_pshufhw_imm>;
339 def pshuflw : PatFrag<(ops node:$lhs, node:$rhs),
340 (vector_shuffle node:$lhs, node:$rhs), [{
341 return X86::isPSHUFLWMask(cast<ShuffleVectorSDNode>(N));
342 }], SHUFFLE_get_pshuflw_imm>;
344 def palign : PatFrag<(ops node:$lhs, node:$rhs),
345 (vector_shuffle node:$lhs, node:$rhs), [{
346 return X86::isPALIGNRMask(cast<ShuffleVectorSDNode>(N));
347 }], SHUFFLE_get_palign_imm>;
349 //===----------------------------------------------------------------------===//
350 // SSE scalar FP Instructions
351 //===----------------------------------------------------------------------===//
353 // CMOV* - Used to implement the SSE SELECT DAG operation. Expanded after
354 // instruction selection into a branch sequence.
355 let Uses = [EFLAGS], usesCustomInserter = 1 in {
356 def CMOV_FR32 : I<0, Pseudo,
357 (outs FR32:$dst), (ins FR32:$t, FR32:$f, i8imm:$cond),
358 "#CMOV_FR32 PSEUDO!",
359 [(set FR32:$dst, (X86cmov FR32:$t, FR32:$f, imm:$cond,
361 def CMOV_FR64 : I<0, Pseudo,
362 (outs FR64:$dst), (ins FR64:$t, FR64:$f, i8imm:$cond),
363 "#CMOV_FR64 PSEUDO!",
364 [(set FR64:$dst, (X86cmov FR64:$t, FR64:$f, imm:$cond,
366 def CMOV_V4F32 : I<0, Pseudo,
367 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
368 "#CMOV_V4F32 PSEUDO!",
370 (v4f32 (X86cmov VR128:$t, VR128:$f, imm:$cond,
372 def CMOV_V2F64 : I<0, Pseudo,
373 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
374 "#CMOV_V2F64 PSEUDO!",
376 (v2f64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
378 def CMOV_V2I64 : I<0, Pseudo,
379 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
380 "#CMOV_V2I64 PSEUDO!",
382 (v2i64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
386 //===----------------------------------------------------------------------===//
387 // SSE 1 & 2 Instructions Classes
388 //===----------------------------------------------------------------------===//
390 /// sse12_fp_scalar - SSE 1 & 2 scalar instructions class
391 multiclass sse12_fp_scalar<bits<8> opc, string OpcodeStr, SDNode OpNode,
392 RegisterClass RC, X86MemOperand x86memop> {
393 let isCommutable = 1 in {
394 def rr : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
395 OpcodeStr, [(set RC:$dst, (OpNode RC:$src1, RC:$src2))]>;
397 def rm : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
398 OpcodeStr, [(set RC:$dst, (OpNode RC:$src1, (load addr:$src2)))]>;
401 /// sse12_fp_scalar_int - SSE 1 & 2 scalar instructions intrinsics class
402 multiclass sse12_fp_scalar_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
403 string asm, string SSEVer, string FPSizeStr,
404 Operand memopr, ComplexPattern mem_cpat> {
405 def rr_Int : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
406 asm, [(set RC:$dst, (
407 !nameconcat<Intrinsic>("int_x86_sse",
408 !strconcat(SSEVer, !strconcat("_",
409 !strconcat(OpcodeStr, FPSizeStr))))
410 RC:$src1, RC:$src2))]>;
411 def rm_Int : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, memopr:$src2),
412 asm, [(set RC:$dst, (
413 !nameconcat<Intrinsic>("int_x86_sse",
414 !strconcat(SSEVer, !strconcat("_",
415 !strconcat(OpcodeStr, FPSizeStr))))
416 RC:$src1, mem_cpat:$src2))]>;
419 /// sse12_fp_packed - SSE 1 & 2 packed instructions class
420 multiclass sse12_fp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
421 RegisterClass RC, ValueType vt,
422 X86MemOperand x86memop, PatFrag mem_frag,
423 Domain d, bit MayLoad = 0> {
424 let isCommutable = 1 in
425 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
426 OpcodeStr, [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))],d>;
427 let mayLoad = MayLoad in
428 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
429 OpcodeStr, [(set RC:$dst, (OpNode RC:$src1,
430 (mem_frag addr:$src2)))],d>;
433 /// sse12_fp_packed_logical_rm - SSE 1 & 2 packed instructions class
434 multiclass sse12_fp_packed_logical_rm<bits<8> opc, RegisterClass RC, Domain d,
435 string OpcodeStr, X86MemOperand x86memop,
436 list<dag> pat_rr, list<dag> pat_rm> {
437 let isCommutable = 1 in
438 def rr : PI<opc, MRMSrcReg, (outs RC:$dst),
439 (ins RC:$src1, RC:$src2), OpcodeStr, pat_rr, d>;
440 def rm : PI<opc, MRMSrcMem, (outs RC:$dst),
441 (ins RC:$src1, x86memop:$src2), OpcodeStr, pat_rm, d>;
444 /// sse12_fp_packed_int - SSE 1 & 2 packed instructions intrinsics class
445 multiclass sse12_fp_packed_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
446 string asm, string SSEVer, string FPSizeStr,
447 X86MemOperand x86memop, PatFrag mem_frag,
449 def rr_Int : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
450 asm, [(set RC:$dst, (
451 !nameconcat<Intrinsic>("int_x86_sse",
452 !strconcat(SSEVer, !strconcat("_",
453 !strconcat(OpcodeStr, FPSizeStr))))
454 RC:$src1, RC:$src2))], d>;
455 def rm_Int : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
456 asm, [(set RC:$dst, (
457 !nameconcat<Intrinsic>("int_x86_sse",
458 !strconcat(SSEVer, !strconcat("_",
459 !strconcat(OpcodeStr, FPSizeStr))))
460 RC:$src1, (mem_frag addr:$src2)))], d>;
463 //===----------------------------------------------------------------------===//
464 // SSE 1 & 2 - Move Instructions
465 //===----------------------------------------------------------------------===//
467 class sse12_move_rr<RegisterClass RC, ValueType vt, string asm> :
468 SI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, RC:$src2), asm,
469 [(set (vt VR128:$dst), (movl VR128:$src1, (scalar_to_vector RC:$src2)))]>;
471 // Loading from memory automatically zeroing upper bits.
472 class sse12_move_rm<RegisterClass RC, X86MemOperand x86memop,
473 PatFrag mem_pat, string OpcodeStr> :
474 SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
475 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
476 [(set RC:$dst, (mem_pat addr:$src))]>;
478 // Move Instructions. Register-to-register movss/movsd is not used for FR32/64
479 // register copies because it's a partial register update; FsMOVAPSrr/FsMOVAPDrr
480 // is used instead. Register-to-register movss/movsd is not modeled as an
481 // INSERT_SUBREG because INSERT_SUBREG requires that the insert be implementable
482 // in terms of a copy, and just mentioned, we don't use movss/movsd for copies.
483 let isAsmParserOnly = 1 in {
484 def VMOVSSrr : sse12_move_rr<FR32, v4f32,
485 "movss\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XS, VEX_4V;
486 def VMOVSDrr : sse12_move_rr<FR64, v2f64,
487 "movsd\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XD, VEX_4V;
489 let canFoldAsLoad = 1, isReMaterializable = 1 in {
490 def VMOVSSrm : sse12_move_rm<FR32, f32mem, loadf32, "movss">, XS, VEX;
492 let AddedComplexity = 20 in
493 def VMOVSDrm : sse12_move_rm<FR64, f64mem, loadf64, "movsd">, XD, VEX;
497 let Constraints = "$src1 = $dst" in {
498 def MOVSSrr : sse12_move_rr<FR32, v4f32,
499 "movss\t{$src2, $dst|$dst, $src2}">, XS;
500 def MOVSDrr : sse12_move_rr<FR64, v2f64,
501 "movsd\t{$src2, $dst|$dst, $src2}">, XD;
504 let canFoldAsLoad = 1, isReMaterializable = 1 in {
505 def MOVSSrm : sse12_move_rm<FR32, f32mem, loadf32, "movss">, XS;
507 let AddedComplexity = 20 in
508 def MOVSDrm : sse12_move_rm<FR64, f64mem, loadf64, "movsd">, XD;
511 let AddedComplexity = 15 in {
512 // Extract the low 32-bit value from one vector and insert it into another.
513 def : Pat<(v4f32 (movl VR128:$src1, VR128:$src2)),
514 (MOVSSrr (v4f32 VR128:$src1),
515 (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>;
516 // Extract the low 64-bit value from one vector and insert it into another.
517 def : Pat<(v2f64 (movl VR128:$src1, VR128:$src2)),
518 (MOVSDrr (v2f64 VR128:$src1),
519 (EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>;
522 // Implicitly promote a 32-bit scalar to a vector.
523 def : Pat<(v4f32 (scalar_to_vector FR32:$src)),
524 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src, sub_ss)>;
525 // Implicitly promote a 64-bit scalar to a vector.
526 def : Pat<(v2f64 (scalar_to_vector FR64:$src)),
527 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src, sub_sd)>;
529 let AddedComplexity = 20 in {
530 // MOVSSrm zeros the high parts of the register; represent this
531 // with SUBREG_TO_REG.
532 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
533 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
534 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
535 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
536 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
537 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
538 // MOVSDrm zeros the high parts of the register; represent this
539 // with SUBREG_TO_REG.
540 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
541 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
542 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
543 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
544 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
545 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
546 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
547 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
548 def : Pat<(v2f64 (X86vzload addr:$src)),
549 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
552 // Store scalar value to memory.
553 def MOVSSmr : SSI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
554 "movss\t{$src, $dst|$dst, $src}",
555 [(store FR32:$src, addr:$dst)]>;
556 def MOVSDmr : SDI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
557 "movsd\t{$src, $dst|$dst, $src}",
558 [(store FR64:$src, addr:$dst)]>;
560 let isAsmParserOnly = 1 in {
561 def VMOVSSmr : SI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
562 "movss\t{$src, $dst|$dst, $src}",
563 [(store FR32:$src, addr:$dst)]>, XS, VEX_4V;
564 def VMOVSDmr : SI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
565 "movsd\t{$src, $dst|$dst, $src}",
566 [(store FR64:$src, addr:$dst)]>, XD, VEX_4V;
569 // Extract and store.
570 def : Pat<(store (f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
573 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
574 def : Pat<(store (f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
577 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
579 // Move Aligned/Unaligned floating point values
580 multiclass sse12_mov_packed<bits<8> opc, RegisterClass RC,
581 X86MemOperand x86memop, PatFrag ld_frag,
582 string asm, Domain d,
583 bit IsReMaterializable = 1> {
584 let neverHasSideEffects = 1 in
585 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
586 !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], d>;
587 let canFoldAsLoad = 1, isReMaterializable = IsReMaterializable in
588 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
589 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
590 [(set RC:$dst, (ld_frag addr:$src))], d>;
593 let isAsmParserOnly = 1 in {
594 defm VMOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
595 "movaps", SSEPackedSingle>, VEX;
596 defm VMOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
597 "movapd", SSEPackedDouble>, OpSize, VEX;
598 defm VMOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
599 "movups", SSEPackedSingle>, VEX;
600 defm VMOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
601 "movupd", SSEPackedDouble, 0>, OpSize, VEX;
603 defm VMOVAPSY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv8f32,
604 "movaps", SSEPackedSingle>, VEX;
605 defm VMOVAPDY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv4f64,
606 "movapd", SSEPackedDouble>, OpSize, VEX;
607 defm VMOVUPSY : sse12_mov_packed<0x10, VR256, f256mem, loadv8f32,
608 "movups", SSEPackedSingle>, VEX;
609 defm VMOVUPDY : sse12_mov_packed<0x10, VR256, f256mem, loadv4f64,
610 "movupd", SSEPackedDouble, 0>, OpSize, VEX;
612 defm MOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
613 "movaps", SSEPackedSingle>, TB;
614 defm MOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
615 "movapd", SSEPackedDouble>, TB, OpSize;
616 defm MOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
617 "movups", SSEPackedSingle>, TB;
618 defm MOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
619 "movupd", SSEPackedDouble, 0>, TB, OpSize;
621 let isAsmParserOnly = 1 in {
622 def VMOVAPSmr : VPSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
623 "movaps\t{$src, $dst|$dst, $src}",
624 [(alignedstore (v4f32 VR128:$src), addr:$dst)]>, VEX;
625 def VMOVAPDmr : VPDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
626 "movapd\t{$src, $dst|$dst, $src}",
627 [(alignedstore (v2f64 VR128:$src), addr:$dst)]>, VEX;
628 def VMOVUPSmr : VPSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
629 "movups\t{$src, $dst|$dst, $src}",
630 [(store (v4f32 VR128:$src), addr:$dst)]>, VEX;
631 def VMOVUPDmr : VPDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
632 "movupd\t{$src, $dst|$dst, $src}",
633 [(store (v2f64 VR128:$src), addr:$dst)]>, VEX;
634 def VMOVAPSYmr : VPSI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
635 "movaps\t{$src, $dst|$dst, $src}",
636 [(alignedstore (v8f32 VR256:$src), addr:$dst)]>, VEX;
637 def VMOVAPDYmr : VPDI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
638 "movapd\t{$src, $dst|$dst, $src}",
639 [(alignedstore (v4f64 VR256:$src), addr:$dst)]>, VEX;
640 def VMOVUPSYmr : VPSI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
641 "movups\t{$src, $dst|$dst, $src}",
642 [(store (v8f32 VR256:$src), addr:$dst)]>, VEX;
643 def VMOVUPDYmr : VPDI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
644 "movupd\t{$src, $dst|$dst, $src}",
645 [(store (v4f64 VR256:$src), addr:$dst)]>, VEX;
647 def MOVAPSmr : PSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
648 "movaps\t{$src, $dst|$dst, $src}",
649 [(alignedstore (v4f32 VR128:$src), addr:$dst)]>;
650 def MOVAPDmr : PDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
651 "movapd\t{$src, $dst|$dst, $src}",
652 [(alignedstore (v2f64 VR128:$src), addr:$dst)]>;
653 def MOVUPSmr : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
654 "movups\t{$src, $dst|$dst, $src}",
655 [(store (v4f32 VR128:$src), addr:$dst)]>;
656 def MOVUPDmr : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
657 "movupd\t{$src, $dst|$dst, $src}",
658 [(store (v2f64 VR128:$src), addr:$dst)]>;
660 // Intrinsic forms of MOVUPS/D load and store
661 let isAsmParserOnly = 1 in {
662 let canFoldAsLoad = 1, isReMaterializable = 1 in
663 def VMOVUPSrm_Int : VPSI<0x10, MRMSrcMem, (outs VR128:$dst),
665 "movups\t{$src, $dst|$dst, $src}",
666 [(set VR128:$dst, (int_x86_sse_loadu_ps addr:$src))]>, VEX;
667 def VMOVUPDrm_Int : VPDI<0x10, MRMSrcMem, (outs VR128:$dst),
669 "movupd\t{$src, $dst|$dst, $src}",
670 [(set VR128:$dst, (int_x86_sse2_loadu_pd addr:$src))]>, VEX;
671 def VMOVUPSmr_Int : VPSI<0x11, MRMDestMem, (outs),
672 (ins f128mem:$dst, VR128:$src),
673 "movups\t{$src, $dst|$dst, $src}",
674 [(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>, VEX;
675 def VMOVUPDmr_Int : VPDI<0x11, MRMDestMem, (outs),
676 (ins f128mem:$dst, VR128:$src),
677 "movupd\t{$src, $dst|$dst, $src}",
678 [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>, VEX;
680 let canFoldAsLoad = 1, isReMaterializable = 1 in
681 def MOVUPSrm_Int : PSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
682 "movups\t{$src, $dst|$dst, $src}",
683 [(set VR128:$dst, (int_x86_sse_loadu_ps addr:$src))]>;
684 def MOVUPDrm_Int : PDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
685 "movupd\t{$src, $dst|$dst, $src}",
686 [(set VR128:$dst, (int_x86_sse2_loadu_pd addr:$src))]>;
688 def MOVUPSmr_Int : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
689 "movups\t{$src, $dst|$dst, $src}",
690 [(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>;
691 def MOVUPDmr_Int : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
692 "movupd\t{$src, $dst|$dst, $src}",
693 [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>;
695 // Move Low/High packed floating point values
696 multiclass sse12_mov_hilo_packed<bits<8>opc, RegisterClass RC,
697 PatFrag mov_frag, string base_opc,
699 def PSrm : PI<opc, MRMSrcMem,
700 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
701 !strconcat(!strconcat(base_opc,"s"), asm_opr),
704 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))))],
705 SSEPackedSingle>, TB;
707 def PDrm : PI<opc, MRMSrcMem,
708 (outs RC:$dst), (ins RC:$src1, f64mem:$src2),
709 !strconcat(!strconcat(base_opc,"d"), asm_opr),
710 [(set RC:$dst, (v2f64 (mov_frag RC:$src1,
711 (scalar_to_vector (loadf64 addr:$src2)))))],
712 SSEPackedDouble>, TB, OpSize;
715 let isAsmParserOnly = 1, AddedComplexity = 20 in {
716 defm VMOVL : sse12_mov_hilo_packed<0x12, VR128, movlp, "movlp",
717 "\t{$src2, $src1, $dst|$dst, $src1, $src2}">, VEX_4V;
718 defm VMOVH : sse12_mov_hilo_packed<0x16, VR128, movlhps, "movhp",
719 "\t{$src2, $src1, $dst|$dst, $src1, $src2}">, VEX_4V;
721 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
722 defm MOVL : sse12_mov_hilo_packed<0x12, VR128, movlp, "movlp",
723 "\t{$src2, $dst|$dst, $src2}">;
724 defm MOVH : sse12_mov_hilo_packed<0x16, VR128, movlhps, "movhp",
725 "\t{$src2, $dst|$dst, $src2}">;
728 let isAsmParserOnly = 1 in {
729 def VMOVLPSmr : VPSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
730 "movlps\t{$src, $dst|$dst, $src}",
731 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
732 (iPTR 0))), addr:$dst)]>, VEX;
733 def VMOVLPDmr : VPDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
734 "movlpd\t{$src, $dst|$dst, $src}",
735 [(store (f64 (vector_extract (v2f64 VR128:$src),
736 (iPTR 0))), addr:$dst)]>, VEX;
738 def MOVLPSmr : PSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
739 "movlps\t{$src, $dst|$dst, $src}",
740 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
741 (iPTR 0))), addr:$dst)]>;
742 def MOVLPDmr : PDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
743 "movlpd\t{$src, $dst|$dst, $src}",
744 [(store (f64 (vector_extract (v2f64 VR128:$src),
745 (iPTR 0))), addr:$dst)]>;
747 // v2f64 extract element 1 is always custom lowered to unpack high to low
748 // and extract element 0 so the non-store version isn't too horrible.
749 let isAsmParserOnly = 1 in {
750 def VMOVHPSmr : VPSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
751 "movhps\t{$src, $dst|$dst, $src}",
752 [(store (f64 (vector_extract
753 (unpckh (bc_v2f64 (v4f32 VR128:$src)),
754 (undef)), (iPTR 0))), addr:$dst)]>,
756 def VMOVHPDmr : VPDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
757 "movhpd\t{$src, $dst|$dst, $src}",
758 [(store (f64 (vector_extract
759 (v2f64 (unpckh VR128:$src, (undef))),
760 (iPTR 0))), addr:$dst)]>,
763 def MOVHPSmr : PSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
764 "movhps\t{$src, $dst|$dst, $src}",
765 [(store (f64 (vector_extract
766 (unpckh (bc_v2f64 (v4f32 VR128:$src)),
767 (undef)), (iPTR 0))), addr:$dst)]>;
768 def MOVHPDmr : PDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
769 "movhpd\t{$src, $dst|$dst, $src}",
770 [(store (f64 (vector_extract
771 (v2f64 (unpckh VR128:$src, (undef))),
772 (iPTR 0))), addr:$dst)]>;
774 let isAsmParserOnly = 1, AddedComplexity = 20 in {
775 def VMOVLHPSrr : VPSI<0x16, MRMSrcReg, (outs VR128:$dst),
776 (ins VR128:$src1, VR128:$src2),
777 "movlhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
779 (v4f32 (movlhps VR128:$src1, VR128:$src2)))]>,
781 def VMOVHLPSrr : VPSI<0x12, MRMSrcReg, (outs VR128:$dst),
782 (ins VR128:$src1, VR128:$src2),
783 "movhlps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
785 (v4f32 (movhlps VR128:$src1, VR128:$src2)))]>,
788 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
789 def MOVLHPSrr : PSI<0x16, MRMSrcReg, (outs VR128:$dst),
790 (ins VR128:$src1, VR128:$src2),
791 "movlhps\t{$src2, $dst|$dst, $src2}",
793 (v4f32 (movlhps VR128:$src1, VR128:$src2)))]>;
794 def MOVHLPSrr : PSI<0x12, MRMSrcReg, (outs VR128:$dst),
795 (ins VR128:$src1, VR128:$src2),
796 "movhlps\t{$src2, $dst|$dst, $src2}",
798 (v4f32 (movhlps VR128:$src1, VR128:$src2)))]>;
801 def : Pat<(movlhps VR128:$src1, (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
802 (MOVHPSrm (v4i32 VR128:$src1), addr:$src2)>;
803 let AddedComplexity = 20 in {
804 def : Pat<(v4f32 (movddup VR128:$src, (undef))),
805 (MOVLHPSrr (v4f32 VR128:$src), (v4f32 VR128:$src))>;
806 def : Pat<(v2i64 (movddup VR128:$src, (undef))),
807 (MOVLHPSrr (v2i64 VR128:$src), (v2i64 VR128:$src))>;
810 //===----------------------------------------------------------------------===//
811 // SSE 1 & 2 - Conversion Instructions
812 //===----------------------------------------------------------------------===//
814 multiclass sse12_cvt_s<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
815 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
817 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
818 [(set DstRC:$dst, (OpNode SrcRC:$src))]>;
819 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
820 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))]>;
823 multiclass sse12_cvt_p<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
824 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
825 string asm, Domain d> {
826 def rr : PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
827 [(set DstRC:$dst, (OpNode SrcRC:$src))], d>;
828 def rm : PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
829 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))], d>;
832 multiclass sse12_vcvt_avx<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
833 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
835 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src),
837 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
838 (ins DstRC:$src1, x86memop:$src), asm, []>;
841 let isAsmParserOnly = 1 in {
842 defm VCVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
843 "cvttss2si\t{$src, $dst|$dst, $src}">, XS, VEX;
844 defm VCVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
845 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD, VEX;
846 defm VCVTSI2SS : sse12_vcvt_avx<0x2A, GR32, FR32, sint_to_fp, i32mem, loadi32,
847 "cvtsi2ss\t{$src, $src1, $dst|$dst, $src1, $src}">, XS,
849 defm VCVTSI2SD : sse12_vcvt_avx<0x2A, GR32, FR64, sint_to_fp, i32mem, loadi32,
850 "cvtsi2sd\t{$src, $src1, $dst|$dst, $src1, $src}">, XD,
854 defm CVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
855 "cvttss2si\t{$src, $dst|$dst, $src}">, XS;
856 defm CVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
857 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD;
858 defm CVTSI2SS : sse12_cvt_s<0x2A, GR32, FR32, sint_to_fp, i32mem, loadi32,
859 "cvtsi2ss\t{$src, $dst|$dst, $src}">, XS;
860 defm CVTSI2SD : sse12_cvt_s<0x2A, GR32, FR64, sint_to_fp, i32mem, loadi32,
861 "cvtsi2sd\t{$src, $dst|$dst, $src}">, XD;
863 // Conversion Instructions Intrinsics - Match intrinsics which expect MM
864 // and/or XMM operand(s).
865 multiclass sse12_cvt_pint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
866 Intrinsic Int, X86MemOperand x86memop, PatFrag ld_frag,
867 string asm, Domain d> {
868 def rr : PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
869 [(set DstRC:$dst, (Int SrcRC:$src))], d>;
870 def rm : PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
871 [(set DstRC:$dst, (Int (ld_frag addr:$src)))], d>;
874 multiclass sse12_cvt_sint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
875 Intrinsic Int, X86MemOperand x86memop, PatFrag ld_frag,
877 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
878 [(set DstRC:$dst, (Int SrcRC:$src))]>;
879 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
880 [(set DstRC:$dst, (Int (ld_frag addr:$src)))]>;
883 multiclass sse12_cvt_pint_3addr<bits<8> opc, RegisterClass SrcRC,
884 RegisterClass DstRC, Intrinsic Int, X86MemOperand x86memop,
885 PatFrag ld_frag, string asm, Domain d> {
886 def rr : PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src2),
887 asm, [(set DstRC:$dst, (Int DstRC:$src1, SrcRC:$src2))], d>;
888 def rm : PI<opc, MRMSrcMem, (outs DstRC:$dst),
889 (ins DstRC:$src1, x86memop:$src2), asm,
890 [(set DstRC:$dst, (Int DstRC:$src1, (ld_frag addr:$src2)))], d>;
893 multiclass sse12_cvt_sint_3addr<bits<8> opc, RegisterClass SrcRC,
894 RegisterClass DstRC, Intrinsic Int, X86MemOperand x86memop,
895 PatFrag ld_frag, string asm> {
896 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src2),
897 asm, [(set DstRC:$dst, (Int DstRC:$src1, SrcRC:$src2))]>;
898 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
899 (ins DstRC:$src1, x86memop:$src2), asm,
900 [(set DstRC:$dst, (Int DstRC:$src1, (ld_frag addr:$src2)))]>;
903 let isAsmParserOnly = 1 in {
904 defm Int_VCVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
905 f32mem, load, "cvtss2si\t{$src, $dst|$dst, $src}">, XS,
907 defm Int_VCVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
908 f128mem, load, "cvtsd2si\t{$src, $dst|$dst, $src}">, XD,
911 defm Int_CVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
912 f32mem, load, "cvtss2si\t{$src, $dst|$dst, $src}">, XS;
913 defm Int_CVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
914 f128mem, load, "cvtsd2si\t{$src, $dst|$dst, $src}">, XD;
917 let Constraints = "$src1 = $dst" in {
918 defm Int_CVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
919 int_x86_sse_cvtsi2ss, i32mem, loadi32,
920 "cvtsi2ss\t{$src2, $dst|$dst, $src2}">, XS;
921 defm Int_CVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
922 int_x86_sse2_cvtsi2sd, i32mem, loadi32,
923 "cvtsi2ss\t{$src2, $dst|$dst, $src2}">, XD;
926 // Instructions below don't have an AVX form.
927 defm Int_CVTPS2PI : sse12_cvt_pint<0x2D, VR128, VR64, int_x86_sse_cvtps2pi,
928 f64mem, load, "cvtps2pi\t{$src, $dst|$dst, $src}",
929 SSEPackedSingle>, TB;
930 defm Int_CVTPD2PI : sse12_cvt_pint<0x2D, VR128, VR64, int_x86_sse_cvtpd2pi,
931 f128mem, memop, "cvtpd2pi\t{$src, $dst|$dst, $src}",
932 SSEPackedDouble>, TB, OpSize;
933 defm Int_CVTTPS2PI : sse12_cvt_pint<0x2C, VR128, VR64, int_x86_sse_cvttps2pi,
934 f64mem, load, "cvttps2pi\t{$src, $dst|$dst, $src}",
935 SSEPackedSingle>, TB;
936 defm Int_CVTTPD2PI : sse12_cvt_pint<0x2C, VR128, VR64, int_x86_sse_cvttpd2pi,
937 f128mem, memop, "cvttpd2pi\t{$src, $dst|$dst, $src}",
938 SSEPackedDouble>, TB, OpSize;
939 defm Int_CVTPI2PD : sse12_cvt_pint<0x2A, VR64, VR128, int_x86_sse_cvtpi2pd,
940 i64mem, load, "cvtpi2pd\t{$src, $dst|$dst, $src}",
941 SSEPackedDouble>, TB, OpSize;
942 let Constraints = "$src1 = $dst" in {
943 defm Int_CVTPI2PS : sse12_cvt_pint_3addr<0x2A, VR64, VR128,
944 int_x86_sse_cvtpi2ps,
945 i64mem, load, "cvtpi2ps\t{$src2, $dst|$dst, $src2}",
946 SSEPackedSingle>, TB;
951 // Aliases for intrinsics
952 let isAsmParserOnly = 1, Pattern = []<dag> in {
953 defm Int_VCVTTSS2SI : sse12_cvt_sint_3addr<0x2C, VR128, GR32,
954 int_x86_sse_cvttss2si, f32mem, load,
955 "cvttss2si\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XS;
956 defm Int_VCVTTSD2SI : sse12_cvt_sint_3addr<0x2C, VR128, GR32,
957 int_x86_sse2_cvttsd2si, f128mem, load,
958 "cvttss2si\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XD;
960 defm Int_CVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
961 f32mem, load, "cvttss2si\t{$src, $dst|$dst, $src}">,
963 defm Int_CVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
964 f128mem, load, "cvttss2si\t{$src, $dst|$dst, $src}">,
967 let isAsmParserOnly = 1, Pattern = []<dag> in {
968 defm VCVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load,
969 "cvtss2si{l}\t{$src, $dst|$dst, $src}">, XS, VEX;
970 defm VCVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, f128mem, load,
971 "cvtdq2ps\t{$src, $dst|$dst, $src}",
972 SSEPackedSingle>, TB, VEX;
974 let Pattern = []<dag> in {
975 defm CVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load /*dummy*/,
976 "cvtss2si{l}\t{$src, $dst|$dst, $src}">, XS;
977 defm CVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, f128mem, load /*dummy*/,
978 "cvtdq2ps\t{$src, $dst|$dst, $src}",
979 SSEPackedSingle>, TB; /* PD SSE3 form is avaiable */
984 // Convert scalar double to scalar single
985 let isAsmParserOnly = 1 in {
986 def VCVTSD2SSrr : VSDI<0x5A, MRMSrcReg, (outs FR32:$dst),
987 (ins FR64:$src1, FR64:$src2),
988 "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
990 def VCVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst),
991 (ins FR64:$src1, f64mem:$src2),
992 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
993 []>, XD, Requires<[HasAVX, HasSSE2, OptForSize]>, VEX_4V;
995 def CVTSD2SSrr : SDI<0x5A, MRMSrcReg, (outs FR32:$dst), (ins FR64:$src),
996 "cvtsd2ss\t{$src, $dst|$dst, $src}",
997 [(set FR32:$dst, (fround FR64:$src))]>;
998 def CVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src),
999 "cvtsd2ss\t{$src, $dst|$dst, $src}",
1000 [(set FR32:$dst, (fround (loadf64 addr:$src)))]>, XD,
1001 Requires<[HasSSE2, OptForSize]>;
1003 let isAsmParserOnly = 1 in
1004 defm Int_VCVTSD2SS: sse12_cvt_sint_3addr<0x5A, VR128, VR128,
1005 int_x86_sse2_cvtsd2ss, f64mem, load,
1006 "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}">,
1008 let Constraints = "$src1 = $dst" in
1009 defm Int_CVTSD2SS: sse12_cvt_sint_3addr<0x5A, VR128, VR128,
1010 int_x86_sse2_cvtsd2ss, f64mem, load,
1011 "cvtsd2ss\t{$src2, $dst|$dst, $src2}">, XS;
1013 // Convert scalar single to scalar double
1014 let isAsmParserOnly = 1 in { // SSE2 instructions with XS prefix
1015 def VCVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst),
1016 (ins FR32:$src1, FR32:$src2),
1017 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1018 []>, XS, Requires<[HasAVX, HasSSE2]>, VEX_4V;
1019 def VCVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst),
1020 (ins FR32:$src1, f32mem:$src2),
1021 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1022 []>, XS, VEX_4V, Requires<[HasAVX, HasSSE2, OptForSize]>;
1024 def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src),
1025 "cvtss2sd\t{$src, $dst|$dst, $src}",
1026 [(set FR64:$dst, (fextend FR32:$src))]>, XS,
1027 Requires<[HasSSE2]>;
1028 def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src),
1029 "cvtss2sd\t{$src, $dst|$dst, $src}",
1030 [(set FR64:$dst, (extloadf32 addr:$src))]>, XS,
1031 Requires<[HasSSE2, OptForSize]>;
1033 let isAsmParserOnly = 1 in {
1034 def Int_VCVTSS2SDrr: I<0x5A, MRMSrcReg,
1035 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1036 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1037 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
1038 VR128:$src2))]>, XS, VEX_4V,
1039 Requires<[HasAVX, HasSSE2]>;
1040 def Int_VCVTSS2SDrm: I<0x5A, MRMSrcMem,
1041 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
1042 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1043 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
1044 (load addr:$src2)))]>, XS, VEX_4V,
1045 Requires<[HasAVX, HasSSE2]>;
1047 let Constraints = "$src1 = $dst" in { // SSE2 instructions with XS prefix
1048 def Int_CVTSS2SDrr: I<0x5A, MRMSrcReg,
1049 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1050 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
1051 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
1052 VR128:$src2))]>, XS,
1053 Requires<[HasSSE2]>;
1054 def Int_CVTSS2SDrm: I<0x5A, MRMSrcMem,
1055 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
1056 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
1057 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
1058 (load addr:$src2)))]>, XS,
1059 Requires<[HasSSE2]>;
1062 def : Pat<(extloadf32 addr:$src),
1063 (CVTSS2SDrr (MOVSSrm addr:$src))>,
1064 Requires<[HasSSE2, OptForSpeed]>;
1066 // Convert doubleword to packed single/double fp
1067 let isAsmParserOnly = 1 in { // SSE2 instructions without OpSize prefix
1068 def Int_VCVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1069 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
1070 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
1071 TB, VEX, Requires<[HasAVX, HasSSE2]>;
1072 def Int_VCVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
1073 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
1074 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps
1075 (bitconvert (memopv2i64 addr:$src))))]>,
1076 TB, VEX, Requires<[HasAVX, HasSSE2]>;
1078 def Int_CVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1079 "cvtdq2ps\t{$src, $dst|$dst, $src}",
1080 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
1081 TB, Requires<[HasSSE2]>;
1082 def Int_CVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
1083 "cvtdq2ps\t{$src, $dst|$dst, $src}",
1084 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps
1085 (bitconvert (memopv2i64 addr:$src))))]>,
1086 TB, Requires<[HasSSE2]>;
1088 // FIXME: why the non-intrinsic version is described as SSE3?
1089 let isAsmParserOnly = 1 in { // SSE2 instructions with XS prefix
1090 def Int_VCVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1091 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
1092 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
1093 XS, VEX, Requires<[HasAVX, HasSSE2]>;
1094 def Int_VCVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
1095 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
1096 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd
1097 (bitconvert (memopv2i64 addr:$src))))]>,
1098 XS, VEX, Requires<[HasAVX, HasSSE2]>;
1100 def Int_CVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1101 "cvtdq2pd\t{$src, $dst|$dst, $src}",
1102 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
1103 XS, Requires<[HasSSE2]>;
1104 def Int_CVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
1105 "cvtdq2pd\t{$src, $dst|$dst, $src}",
1106 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd
1107 (bitconvert (memopv2i64 addr:$src))))]>,
1108 XS, Requires<[HasSSE2]>;
1110 // Convert packed single/double fp to doubleword
1111 let isAsmParserOnly = 1 in {
1112 def VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1113 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1114 def VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1115 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1117 def CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1118 "cvtps2dq\t{$src, $dst|$dst, $src}", []>;
1119 def CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1120 "cvtps2dq\t{$src, $dst|$dst, $src}", []>;
1122 let isAsmParserOnly = 1 in {
1123 def Int_VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1124 "cvtps2dq\t{$src, $dst|$dst, $src}",
1125 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>,
1127 def Int_VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst),
1129 "cvtps2dq\t{$src, $dst|$dst, $src}",
1130 [(set VR128:$dst, (int_x86_sse2_cvtps2dq
1131 (memop addr:$src)))]>, VEX;
1133 def Int_CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1134 "cvtps2dq\t{$src, $dst|$dst, $src}",
1135 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>;
1136 def Int_CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1137 "cvtps2dq\t{$src, $dst|$dst, $src}",
1138 [(set VR128:$dst, (int_x86_sse2_cvtps2dq
1139 (memop addr:$src)))]>;
1141 let isAsmParserOnly = 1 in { // SSE2 packed instructions with XD prefix
1142 def Int_VCVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1143 "vcvtpd2dq\t{$src, $dst|$dst, $src}",
1144 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
1145 XD, VEX, Requires<[HasAVX, HasSSE2]>;
1146 def Int_VCVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1147 "vcvtpd2dq\t{$src, $dst|$dst, $src}",
1148 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
1149 (memop addr:$src)))]>,
1150 XD, VEX, Requires<[HasAVX, HasSSE2]>;
1152 def Int_CVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1153 "cvtpd2dq\t{$src, $dst|$dst, $src}",
1154 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
1155 XD, Requires<[HasSSE2]>;
1156 def Int_CVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1157 "cvtpd2dq\t{$src, $dst|$dst, $src}",
1158 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
1159 (memop addr:$src)))]>,
1160 XD, Requires<[HasSSE2]>;
1163 // Convert with truncation packed single/double fp to doubleword
1164 let isAsmParserOnly = 1 in { // SSE2 packed instructions with XS prefix
1165 def VCVTTPS2DQrr : VSSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1166 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1167 def VCVTTPS2DQrm : VSSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1168 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1170 def CVTTPS2DQrr : SSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1171 "cvttps2dq\t{$src, $dst|$dst, $src}", []>;
1172 def CVTTPS2DQrm : SSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1173 "cvttps2dq\t{$src, $dst|$dst, $src}", []>;
1176 let isAsmParserOnly = 1 in {
1177 def Int_VCVTTPS2DQrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1178 "vcvttps2dq\t{$src, $dst|$dst, $src}",
1180 (int_x86_sse2_cvttps2dq VR128:$src))]>,
1181 XS, VEX, Requires<[HasAVX, HasSSE2]>;
1182 def Int_VCVTTPS2DQrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1183 "vcvttps2dq\t{$src, $dst|$dst, $src}",
1184 [(set VR128:$dst, (int_x86_sse2_cvttps2dq
1185 (memop addr:$src)))]>,
1186 XS, VEX, Requires<[HasAVX, HasSSE2]>;
1188 def Int_CVTTPS2DQrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1189 "cvttps2dq\t{$src, $dst|$dst, $src}",
1191 (int_x86_sse2_cvttps2dq VR128:$src))]>,
1192 XS, Requires<[HasSSE2]>;
1193 def Int_CVTTPS2DQrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1194 "cvttps2dq\t{$src, $dst|$dst, $src}",
1195 [(set VR128:$dst, (int_x86_sse2_cvttps2dq
1196 (memop addr:$src)))]>,
1197 XS, Requires<[HasSSE2]>;
1199 let isAsmParserOnly = 1 in {
1200 def Int_VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst),
1202 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1203 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>,
1205 def Int_VCVTTPD2DQrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst),
1207 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1208 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
1209 (memop addr:$src)))]>, VEX;
1211 def Int_CVTTPD2DQrr : PDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1212 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1213 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>;
1214 def Int_CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst),(ins f128mem:$src),
1215 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1216 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
1217 (memop addr:$src)))]>;
1219 // Convert packed single to packed double
1220 let isAsmParserOnly = 1 in { // SSE2 instructions without OpSize prefix
1221 def VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1222 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX,
1224 def VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1225 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX,
1228 def CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1229 "cvtps2pd\t{$src, $dst|$dst, $src}", []>, TB;
1230 def CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1231 "cvtps2pd\t{$src, $dst|$dst, $src}", []>, TB;
1233 let isAsmParserOnly = 1 in {
1234 def Int_VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1235 "cvtps2pd\t{$src, $dst|$dst, $src}",
1236 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
1237 VEX, Requires<[HasAVX, HasSSE2]>;
1238 def Int_VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1239 "cvtps2pd\t{$src, $dst|$dst, $src}",
1240 [(set VR128:$dst, (int_x86_sse2_cvtps2pd
1241 (load addr:$src)))]>,
1242 VEX, Requires<[HasAVX, HasSSE2]>;
1244 def Int_CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1245 "cvtps2pd\t{$src, $dst|$dst, $src}",
1246 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
1247 TB, Requires<[HasSSE2]>;
1248 def Int_CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1249 "cvtps2pd\t{$src, $dst|$dst, $src}",
1250 [(set VR128:$dst, (int_x86_sse2_cvtps2pd
1251 (load addr:$src)))]>,
1252 TB, Requires<[HasSSE2]>;
1254 // Convert packed double to packed single
1255 let isAsmParserOnly = 1 in {
1256 def VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1257 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>, VEX;
1258 // FIXME: the memory form of this instruction should described using
1259 // use extra asm syntax
1261 def CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1262 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>;
1263 def CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1264 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>;
1267 let isAsmParserOnly = 1 in {
1268 def Int_VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1269 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1270 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
1271 def Int_VCVTPD2PSrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst),
1273 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1274 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps
1275 (memop addr:$src)))]>;
1277 def Int_CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1278 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1279 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
1280 def Int_CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1281 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1282 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps
1283 (memop addr:$src)))]>;
1285 //===----------------------------------------------------------------------===//
1286 // SSE 1 & 2 - Compare Instructions
1287 //===----------------------------------------------------------------------===//
1289 // sse12_cmp_scalar - sse 1 & 2 compare scalar instructions
1290 multiclass sse12_cmp_scalar<RegisterClass RC, X86MemOperand x86memop,
1291 string asm, string asm_alt> {
1292 def rr : SIi8<0xC2, MRMSrcReg,
1293 (outs RC:$dst), (ins RC:$src1, RC:$src, SSECC:$cc),
1296 def rm : SIi8<0xC2, MRMSrcMem,
1297 (outs RC:$dst), (ins RC:$src1, x86memop:$src, SSECC:$cc),
1299 // Accept explicit immediate argument form instead of comparison code.
1300 let isAsmParserOnly = 1 in {
1301 def rr_alt : SIi8<0xC2, MRMSrcReg,
1302 (outs RC:$dst), (ins RC:$src1, RC:$src, i8imm:$src2),
1305 def rm_alt : SIi8<0xC2, MRMSrcMem,
1306 (outs RC:$dst), (ins RC:$src1, x86memop:$src, i8imm:$src2),
1311 let neverHasSideEffects = 1, isAsmParserOnly = 1 in {
1312 defm VCMPSS : sse12_cmp_scalar<FR32, f32mem,
1313 "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}",
1314 "cmpss\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}">,
1316 defm VCMPSD : sse12_cmp_scalar<FR64, f64mem,
1317 "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}",
1318 "cmpsd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}">,
1322 let Constraints = "$src1 = $dst", neverHasSideEffects = 1 in {
1323 defm CMPSS : sse12_cmp_scalar<FR32, f32mem,
1324 "cmp${cc}ss\t{$src, $dst|$dst, $src}",
1325 "cmpss\t{$src2, $src, $dst|$dst, $src, $src2}">, XS;
1326 defm CMPSD : sse12_cmp_scalar<FR64, f64mem,
1327 "cmp${cc}sd\t{$src, $dst|$dst, $src}",
1328 "cmpsd\t{$src2, $src, $dst|$dst, $src, $src2}">, XD;
1331 multiclass sse12_cmp_scalar_int<RegisterClass RC, X86MemOperand x86memop,
1332 Intrinsic Int, string asm> {
1333 def rr : SIi8<0xC2, MRMSrcReg, (outs VR128:$dst),
1334 (ins VR128:$src1, VR128:$src, SSECC:$cc), asm,
1335 [(set VR128:$dst, (Int VR128:$src1,
1336 VR128:$src, imm:$cc))]>;
1337 def rm : SIi8<0xC2, MRMSrcMem, (outs VR128:$dst),
1338 (ins VR128:$src1, f32mem:$src, SSECC:$cc), asm,
1339 [(set VR128:$dst, (Int VR128:$src1,
1340 (load addr:$src), imm:$cc))]>;
1343 // Aliases to match intrinsics which expect XMM operand(s).
1344 let isAsmParserOnly = 1 in {
1345 defm Int_VCMPSS : sse12_cmp_scalar_int<VR128, f32mem, int_x86_sse_cmp_ss,
1346 "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}">,
1348 defm Int_VCMPSD : sse12_cmp_scalar_int<VR128, f64mem, int_x86_sse2_cmp_sd,
1349 "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}">,
1352 let Constraints = "$src1 = $dst" in {
1353 defm Int_CMPSS : sse12_cmp_scalar_int<VR128, f32mem, int_x86_sse_cmp_ss,
1354 "cmp${cc}ss\t{$src, $dst|$dst, $src}">, XS;
1355 defm Int_CMPSD : sse12_cmp_scalar_int<VR128, f64mem, int_x86_sse2_cmp_sd,
1356 "cmp${cc}sd\t{$src, $dst|$dst, $src}">, XD;
1360 // sse12_ord_cmp - Unordered/Ordered scalar fp compare and set EFLAGS
1361 multiclass sse12_ord_cmp<bits<8> opc, RegisterClass RC, SDNode OpNode,
1362 ValueType vt, X86MemOperand x86memop,
1363 PatFrag ld_frag, string OpcodeStr, Domain d> {
1364 def rr: PI<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
1365 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
1366 [(set EFLAGS, (OpNode (vt RC:$src1), RC:$src2))], d>;
1367 def rm: PI<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
1368 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
1369 [(set EFLAGS, (OpNode (vt RC:$src1),
1370 (ld_frag addr:$src2)))], d>;
1373 let Defs = [EFLAGS] in {
1374 let isAsmParserOnly = 1 in {
1375 defm VUCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
1376 "ucomiss", SSEPackedSingle>, VEX;
1377 defm VUCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
1378 "ucomisd", SSEPackedDouble>, OpSize, VEX;
1379 let Pattern = []<dag> in {
1380 defm VCOMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
1381 "comiss", SSEPackedSingle>, VEX;
1382 defm VCOMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
1383 "comisd", SSEPackedDouble>, OpSize, VEX;
1386 defm Int_VUCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
1387 load, "ucomiss", SSEPackedSingle>, VEX;
1388 defm Int_VUCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
1389 load, "ucomisd", SSEPackedDouble>, OpSize, VEX;
1391 defm Int_VCOMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem,
1392 load, "comiss", SSEPackedSingle>, VEX;
1393 defm Int_VCOMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem,
1394 load, "comisd", SSEPackedDouble>, OpSize, VEX;
1396 defm UCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
1397 "ucomiss", SSEPackedSingle>, TB;
1398 defm UCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
1399 "ucomisd", SSEPackedDouble>, TB, OpSize;
1401 let Pattern = []<dag> in {
1402 defm COMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
1403 "comiss", SSEPackedSingle>, TB;
1404 defm COMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
1405 "comisd", SSEPackedDouble>, TB, OpSize;
1408 defm Int_UCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
1409 load, "ucomiss", SSEPackedSingle>, TB;
1410 defm Int_UCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
1411 load, "ucomisd", SSEPackedDouble>, TB, OpSize;
1413 defm Int_COMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem, load,
1414 "comiss", SSEPackedSingle>, TB;
1415 defm Int_COMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem, load,
1416 "comisd", SSEPackedDouble>, TB, OpSize;
1417 } // Defs = [EFLAGS]
1419 // sse12_cmp_packed - sse 1 & 2 compared packed instructions
1420 multiclass sse12_cmp_packed<RegisterClass RC, X86MemOperand x86memop,
1421 Intrinsic Int, string asm, string asm_alt,
1423 def rri : PIi8<0xC2, MRMSrcReg,
1424 (outs RC:$dst), (ins RC:$src1, RC:$src, SSECC:$cc), asm,
1425 [(set RC:$dst, (Int RC:$src1, RC:$src, imm:$cc))], d>;
1426 def rmi : PIi8<0xC2, MRMSrcMem,
1427 (outs RC:$dst), (ins RC:$src1, f128mem:$src, SSECC:$cc), asm,
1428 [(set RC:$dst, (Int RC:$src1, (memop addr:$src), imm:$cc))], d>;
1429 // Accept explicit immediate argument form instead of comparison code.
1430 let isAsmParserOnly = 1 in {
1431 def rri_alt : PIi8<0xC2, MRMSrcReg,
1432 (outs RC:$dst), (ins RC:$src1, RC:$src, i8imm:$src2),
1434 def rmi_alt : PIi8<0xC2, MRMSrcMem,
1435 (outs RC:$dst), (ins RC:$src1, f128mem:$src, i8imm:$src2),
1440 let isAsmParserOnly = 1 in {
1441 defm VCMPPS : sse12_cmp_packed<VR128, f128mem, int_x86_sse_cmp_ps,
1442 "cmp${cc}ps\t{$src, $src1, $dst|$dst, $src1, $src}",
1443 "cmpps\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1444 SSEPackedSingle>, VEX_4V;
1445 defm VCMPPD : sse12_cmp_packed<VR128, f128mem, int_x86_sse2_cmp_pd,
1446 "cmp${cc}pd\t{$src, $src1, $dst|$dst, $src1, $src}",
1447 "cmppd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1448 SSEPackedDouble>, OpSize, VEX_4V;
1450 let Constraints = "$src1 = $dst" in {
1451 defm CMPPS : sse12_cmp_packed<VR128, f128mem, int_x86_sse_cmp_ps,
1452 "cmp${cc}ps\t{$src, $dst|$dst, $src}",
1453 "cmpps\t{$src2, $src, $dst|$dst, $src, $src2}",
1454 SSEPackedSingle>, TB;
1455 defm CMPPD : sse12_cmp_packed<VR128, f128mem, int_x86_sse2_cmp_pd,
1456 "cmp${cc}pd\t{$src, $dst|$dst, $src}",
1457 "cmppd\t{$src2, $src, $dst|$dst, $src, $src2}",
1458 SSEPackedDouble>, TB, OpSize;
1461 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
1462 (CMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
1463 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), (memop addr:$src2), imm:$cc)),
1464 (CMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
1465 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
1466 (CMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
1467 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), (memop addr:$src2), imm:$cc)),
1468 (CMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
1470 //===----------------------------------------------------------------------===//
1471 // SSE 1 & 2 - Shuffle Instructions
1472 //===----------------------------------------------------------------------===//
1474 /// sse12_shuffle - sse 1 & 2 shuffle instructions
1475 multiclass sse12_shuffle<RegisterClass RC, X86MemOperand x86memop,
1476 ValueType vt, string asm, PatFrag mem_frag,
1477 Domain d, bit IsConvertibleToThreeAddress = 0> {
1478 def rmi : PIi8<0xC6, MRMSrcMem, (outs VR128:$dst),
1479 (ins VR128:$src1, f128mem:$src2, i8imm:$src3), asm,
1480 [(set VR128:$dst, (vt (shufp:$src3
1481 VR128:$src1, (mem_frag addr:$src2))))], d>;
1482 let isConvertibleToThreeAddress = IsConvertibleToThreeAddress in
1483 def rri : PIi8<0xC6, MRMSrcReg, (outs VR128:$dst),
1484 (ins VR128:$src1, VR128:$src2, i8imm:$src3), asm,
1486 (vt (shufp:$src3 VR128:$src1, VR128:$src2)))], d>;
1489 let isAsmParserOnly = 1 in {
1490 defm VSHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
1491 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
1492 memopv4f32, SSEPackedSingle>, VEX_4V;
1493 defm VSHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
1494 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src2, $src2, $src3}",
1495 memopv2f64, SSEPackedDouble>, OpSize, VEX_4V;
1498 let Constraints = "$src1 = $dst" in {
1499 defm SHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
1500 "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1501 memopv4f32, SSEPackedSingle, 1 /* cvt to pshufd */>,
1503 defm SHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
1504 "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1505 memopv2f64, SSEPackedDouble>, TB, OpSize;
1508 //===----------------------------------------------------------------------===//
1509 // SSE 1 & 2 - Unpack Instructions
1510 //===----------------------------------------------------------------------===//
1512 /// sse12_unpack_interleave - sse 1 & 2 unpack and interleave
1513 multiclass sse12_unpack_interleave<bits<8> opc, PatFrag OpNode, ValueType vt,
1514 PatFrag mem_frag, RegisterClass RC,
1515 X86MemOperand x86memop, string asm,
1517 def rr : PI<opc, MRMSrcReg,
1518 (outs RC:$dst), (ins RC:$src1, RC:$src2),
1520 (vt (OpNode RC:$src1, RC:$src2)))], d>;
1521 def rm : PI<opc, MRMSrcMem,
1522 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
1524 (vt (OpNode RC:$src1,
1525 (mem_frag addr:$src2))))], d>;
1528 let AddedComplexity = 10 in {
1529 let isAsmParserOnly = 1 in {
1530 defm VUNPCKHPS: sse12_unpack_interleave<0x15, unpckh, v4f32, memopv4f32,
1531 VR128, f128mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1532 SSEPackedSingle>, VEX_4V;
1533 defm VUNPCKHPD: sse12_unpack_interleave<0x15, unpckh, v2f64, memopv2f64,
1534 VR128, f128mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1535 SSEPackedDouble>, OpSize, VEX_4V;
1536 defm VUNPCKLPS: sse12_unpack_interleave<0x14, unpckl, v4f32, memopv4f32,
1537 VR128, f128mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1538 SSEPackedSingle>, VEX_4V;
1539 defm VUNPCKLPD: sse12_unpack_interleave<0x14, unpckl, v2f64, memopv2f64,
1540 VR128, f128mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1541 SSEPackedDouble>, OpSize, VEX_4V;
1544 let Constraints = "$src1 = $dst" in {
1545 defm UNPCKHPS: sse12_unpack_interleave<0x15, unpckh, v4f32, memopv4f32,
1546 VR128, f128mem, "unpckhps\t{$src2, $dst|$dst, $src2}",
1547 SSEPackedSingle>, TB;
1548 defm UNPCKHPD: sse12_unpack_interleave<0x15, unpckh, v2f64, memopv2f64,
1549 VR128, f128mem, "unpckhpd\t{$src2, $dst|$dst, $src2}",
1550 SSEPackedDouble>, TB, OpSize;
1551 defm UNPCKLPS: sse12_unpack_interleave<0x14, unpckl, v4f32, memopv4f32,
1552 VR128, f128mem, "unpcklps\t{$src2, $dst|$dst, $src2}",
1553 SSEPackedSingle>, TB;
1554 defm UNPCKLPD: sse12_unpack_interleave<0x14, unpckl, v2f64, memopv2f64,
1555 VR128, f128mem, "unpcklpd\t{$src2, $dst|$dst, $src2}",
1556 SSEPackedDouble>, TB, OpSize;
1557 } // Constraints = "$src1 = $dst"
1558 } // AddedComplexity
1560 //===----------------------------------------------------------------------===//
1561 // SSE 1 & 2 - Extract Floating-Point Sign mask
1562 //===----------------------------------------------------------------------===//
1564 /// sse12_extr_sign_mask - sse 1 & 2 unpack and interleave
1565 multiclass sse12_extr_sign_mask<RegisterClass RC, Intrinsic Int, string asm,
1567 def rr : PI<0x50, MRMSrcReg, (outs GR32:$dst), (ins RC:$src),
1568 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
1569 [(set GR32:$dst, (Int RC:$src))], d>;
1573 defm MOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps, "movmskps",
1574 SSEPackedSingle>, TB;
1575 defm MOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd, "movmskpd",
1576 SSEPackedDouble>, TB, OpSize;
1578 let isAsmParserOnly = 1 in {
1579 defm VMOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps,
1580 "movmskps", SSEPackedSingle>, VEX;
1581 defm VMOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd,
1582 "movmskpd", SSEPackedDouble>, OpSize,
1586 //===----------------------------------------------------------------------===//
1587 // SSE 1 & 2 - Misc aliasing of packed SSE 1 & 2 instructions
1588 //===----------------------------------------------------------------------===//
1590 // Aliases of packed SSE1 & SSE2 instructions for scalar use. These all have
1591 // names that start with 'Fs'.
1593 // Alias instructions that map fld0 to pxor for sse.
1594 let isReMaterializable = 1, isAsCheapAsAMove = 1, isCodeGenOnly = 1,
1595 canFoldAsLoad = 1 in {
1596 // FIXME: Set encoding to pseudo!
1597 def FsFLD0SS : I<0xEF, MRMInitReg, (outs FR32:$dst), (ins), "",
1598 [(set FR32:$dst, fp32imm0)]>,
1599 Requires<[HasSSE1]>, TB, OpSize;
1600 def FsFLD0SD : I<0xEF, MRMInitReg, (outs FR64:$dst), (ins), "",
1601 [(set FR64:$dst, fpimm0)]>,
1602 Requires<[HasSSE2]>, TB, OpSize;
1605 // Alias instruction to do FR32 or FR64 reg-to-reg copy using movaps. Upper
1606 // bits are disregarded.
1607 let neverHasSideEffects = 1 in {
1608 def FsMOVAPSrr : PSI<0x28, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
1609 "movaps\t{$src, $dst|$dst, $src}", []>;
1610 def FsMOVAPDrr : PDI<0x28, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
1611 "movapd\t{$src, $dst|$dst, $src}", []>;
1614 // Alias instruction to load FR32 or FR64 from f128mem using movaps. Upper
1615 // bits are disregarded.
1616 let canFoldAsLoad = 1, isReMaterializable = 1 in {
1617 def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
1618 "movaps\t{$src, $dst|$dst, $src}",
1619 [(set FR32:$dst, (alignedloadfsf32 addr:$src))]>;
1620 def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
1621 "movapd\t{$src, $dst|$dst, $src}",
1622 [(set FR64:$dst, (alignedloadfsf64 addr:$src))]>;
1625 //===----------------------------------------------------------------------===//
1626 // SSE 1 & 2 - Logical Instructions
1627 //===----------------------------------------------------------------------===//
1629 /// sse12_fp_alias_pack_logical - SSE 1 & 2 aliased packed FP logical ops
1631 multiclass sse12_fp_alias_pack_logical<bits<8> opc, string OpcodeStr,
1632 SDNode OpNode, bit MayLoad = 0> {
1633 let isAsmParserOnly = 1 in {
1634 defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr,
1635 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"), OpNode, FR32,
1636 f32, f128mem, memopfsf32, SSEPackedSingle, MayLoad>, VEX_4V;
1638 defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr,
1639 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), OpNode, FR64,
1640 f64, f128mem, memopfsf64, SSEPackedDouble, MayLoad>, OpSize,
1644 let Constraints = "$src1 = $dst" in {
1645 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr,
1646 "ps\t{$src2, $dst|$dst, $src2}"), OpNode, FR32, f32,
1647 f128mem, memopfsf32, SSEPackedSingle, MayLoad>, TB;
1649 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr,
1650 "pd\t{$src2, $dst|$dst, $src2}"), OpNode, FR64, f64,
1651 f128mem, memopfsf64, SSEPackedDouble, MayLoad>, TB, OpSize;
1655 // Alias bitwise logical operations using SSE logical ops on packed FP values.
1656 defm FsAND : sse12_fp_alias_pack_logical<0x54, "and", X86fand>;
1657 defm FsOR : sse12_fp_alias_pack_logical<0x56, "or", X86for>;
1658 defm FsXOR : sse12_fp_alias_pack_logical<0x57, "xor", X86fxor>;
1660 let neverHasSideEffects = 1, Pattern = []<dag>, isCommutable = 0 in
1661 defm FsANDN : sse12_fp_alias_pack_logical<0x55, "andn", undef, 1>;
1663 /// sse12_fp_packed_logical - SSE 1 & 2 packed FP logical ops
1665 multiclass sse12_fp_packed_logical<bits<8> opc, string OpcodeStr,
1666 SDNode OpNode, int HasPat = 0,
1667 list<list<dag>> Pattern = []> {
1668 let isAsmParserOnly = 1 in {
1669 defm V#NAME#PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
1670 !strconcat(OpcodeStr, "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1672 !if(HasPat, Pattern[0], // rr
1673 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1,
1675 !if(HasPat, Pattern[2], // rm
1676 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
1677 (memopv2i64 addr:$src2)))])>,
1680 defm V#NAME#PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
1681 !strconcat(OpcodeStr, "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1683 !if(HasPat, Pattern[1], // rr
1684 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1687 !if(HasPat, Pattern[3], // rm
1688 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1689 (memopv2i64 addr:$src2)))])>,
1692 let Constraints = "$src1 = $dst" in {
1693 defm PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
1694 !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"), f128mem,
1695 !if(HasPat, Pattern[0], // rr
1696 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1,
1698 !if(HasPat, Pattern[2], // rm
1699 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
1700 (memopv2i64 addr:$src2)))])>, TB;
1702 defm PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
1703 !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"), f128mem,
1704 !if(HasPat, Pattern[1], // rr
1705 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1708 !if(HasPat, Pattern[3], // rm
1709 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1710 (memopv2i64 addr:$src2)))])>,
1715 defm AND : sse12_fp_packed_logical<0x54, "and", and>;
1716 defm OR : sse12_fp_packed_logical<0x56, "or", or>;
1717 defm XOR : sse12_fp_packed_logical<0x57, "xor", xor>;
1718 let isCommutable = 0 in
1719 defm ANDN : sse12_fp_packed_logical<0x55, "andn", undef /* dummy */, 1, [
1721 [(set VR128:$dst, (v2i64 (and (xor VR128:$src1,
1722 (bc_v2i64 (v4i32 immAllOnesV))),
1725 [(set VR128:$dst, (and (vnot (bc_v2i64 (v2f64 VR128:$src1))),
1726 (bc_v2i64 (v2f64 VR128:$src2))))],
1728 [(set VR128:$dst, (v2i64 (and (xor (bc_v2i64 (v4f32 VR128:$src1)),
1729 (bc_v2i64 (v4i32 immAllOnesV))),
1730 (memopv2i64 addr:$src2))))],
1732 [(set VR128:$dst, (and (vnot (bc_v2i64 (v2f64 VR128:$src1))),
1733 (memopv2i64 addr:$src2)))]]>;
1735 //===----------------------------------------------------------------------===//
1736 // SSE 1 & 2 - Arithmetic Instructions
1737 //===----------------------------------------------------------------------===//
1739 /// basic_sse12_fp_binop_rm - SSE 1 & 2 binops come in both scalar and
1742 /// In addition, we also have a special variant of the scalar form here to
1743 /// represent the associated intrinsic operation. This form is unlike the
1744 /// plain scalar form, in that it takes an entire vector (instead of a scalar)
1745 /// and leaves the top elements unmodified (therefore these cannot be commuted).
1747 /// These three forms can each be reg+reg or reg+mem.
1749 multiclass basic_sse12_fp_binop_rm<bits<8> opc, string OpcodeStr,
1752 let isAsmParserOnly = 1 in {
1753 defm V#NAME#SS : sse12_fp_scalar<opc,
1754 !strconcat(OpcodeStr, "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1755 OpNode, FR32, f32mem>, XS, VEX_4V;
1757 defm V#NAME#SD : sse12_fp_scalar<opc,
1758 !strconcat(OpcodeStr, "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1759 OpNode, FR64, f64mem>, XD, VEX_4V;
1761 defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr,
1762 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"), OpNode,
1763 VR128, v4f32, f128mem, memopv4f32, SSEPackedSingle>,
1766 defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr,
1767 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), OpNode,
1768 VR128, v2f64, f128mem, memopv2f64, SSEPackedDouble>,
1771 defm V#NAME#SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
1772 !strconcat(OpcodeStr, "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1773 "", "_ss", ssmem, sse_load_f32>, XS, VEX_4V;
1775 defm V#NAME#SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
1776 !strconcat(OpcodeStr, "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1777 "2", "_sd", sdmem, sse_load_f64>, XD, VEX_4V;
1780 let Constraints = "$src1 = $dst" in {
1781 defm SS : sse12_fp_scalar<opc,
1782 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
1783 OpNode, FR32, f32mem>, XS;
1785 defm SD : sse12_fp_scalar<opc,
1786 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
1787 OpNode, FR64, f64mem>, XD;
1789 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr,
1790 "ps\t{$src2, $dst|$dst, $src2}"), OpNode, VR128, v4f32,
1791 f128mem, memopv4f32, SSEPackedSingle>, TB;
1793 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr,
1794 "pd\t{$src2, $dst|$dst, $src2}"), OpNode, VR128, v2f64,
1795 f128mem, memopv2f64, SSEPackedDouble>, TB, OpSize;
1797 defm SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
1798 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
1799 "", "_ss", ssmem, sse_load_f32>, XS;
1801 defm SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
1802 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
1803 "2", "_sd", sdmem, sse_load_f64>, XD;
1807 // Arithmetic instructions
1808 defm ADD : basic_sse12_fp_binop_rm<0x58, "add", fadd>;
1809 defm MUL : basic_sse12_fp_binop_rm<0x59, "mul", fmul>;
1811 let isCommutable = 0 in {
1812 defm SUB : basic_sse12_fp_binop_rm<0x5C, "sub", fsub>;
1813 defm DIV : basic_sse12_fp_binop_rm<0x5E, "div", fdiv>;
1816 /// sse12_fp_binop_rm - Other SSE 1 & 2 binops
1818 /// This multiclass is like basic_sse12_fp_binop_rm, with the addition of
1819 /// instructions for a full-vector intrinsic form. Operations that map
1820 /// onto C operators don't use this form since they just use the plain
1821 /// vector form instead of having a separate vector intrinsic form.
1823 multiclass sse12_fp_binop_rm<bits<8> opc, string OpcodeStr,
1826 let isAsmParserOnly = 1 in {
1827 // Scalar operation, reg+reg.
1828 defm V#NAME#SS : sse12_fp_scalar<opc,
1829 !strconcat(OpcodeStr, "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1830 OpNode, FR32, f32mem>, XS, VEX_4V;
1832 defm V#NAME#SD : sse12_fp_scalar<opc,
1833 !strconcat(OpcodeStr, "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1834 OpNode, FR64, f64mem>, XD, VEX_4V;
1836 defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr,
1837 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"), OpNode,
1838 VR128, v4f32, f128mem, memopv4f32, SSEPackedSingle>,
1841 defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr,
1842 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), OpNode,
1843 VR128, v2f64, f128mem, memopv2f64, SSEPackedDouble>,
1846 defm V#NAME#SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
1847 !strconcat(OpcodeStr, "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1848 "", "_ss", ssmem, sse_load_f32>, XS, VEX_4V;
1850 defm V#NAME#SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
1851 !strconcat(OpcodeStr, "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1852 "2", "_sd", sdmem, sse_load_f64>, XD, VEX_4V;
1854 defm V#NAME#PS : sse12_fp_packed_int<opc, OpcodeStr, VR128,
1855 !strconcat(OpcodeStr, "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1856 "", "_ps", f128mem, memopv4f32, SSEPackedSingle>, VEX_4V;
1858 defm V#NAME#PD : sse12_fp_packed_int<opc, OpcodeStr, VR128,
1859 !strconcat(OpcodeStr, "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1860 "2", "_pd", f128mem, memopv2f64, SSEPackedDouble>, OpSize,
1864 let Constraints = "$src1 = $dst" in {
1865 // Scalar operation, reg+reg.
1866 defm SS : sse12_fp_scalar<opc,
1867 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
1868 OpNode, FR32, f32mem>, XS;
1869 defm SD : sse12_fp_scalar<opc,
1870 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
1871 OpNode, FR64, f64mem>, XD;
1872 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr,
1873 "ps\t{$src2, $dst|$dst, $src2}"), OpNode, VR128, v4f32,
1874 f128mem, memopv4f32, SSEPackedSingle>, TB;
1876 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr,
1877 "pd\t{$src2, $dst|$dst, $src2}"), OpNode, VR128, v2f64,
1878 f128mem, memopv2f64, SSEPackedDouble>, TB, OpSize;
1880 defm SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
1881 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
1882 "", "_ss", ssmem, sse_load_f32>, XS;
1884 defm SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
1885 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
1886 "2", "_sd", sdmem, sse_load_f64>, XD;
1888 defm PS : sse12_fp_packed_int<opc, OpcodeStr, VR128,
1889 !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
1890 "", "_ps", f128mem, memopv4f32, SSEPackedSingle>, TB;
1892 defm PD : sse12_fp_packed_int<opc, OpcodeStr, VR128,
1893 !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
1894 "2", "_pd", f128mem, memopv2f64, SSEPackedDouble>, TB, OpSize;
1898 let isCommutable = 0 in {
1899 defm MAX : sse12_fp_binop_rm<0x5F, "max", X86fmax>;
1900 defm MIN : sse12_fp_binop_rm<0x5D, "min", X86fmin>;
1904 /// In addition, we also have a special variant of the scalar form here to
1905 /// represent the associated intrinsic operation. This form is unlike the
1906 /// plain scalar form, in that it takes an entire vector (instead of a
1907 /// scalar) and leaves the top elements undefined.
1909 /// And, we have a special variant form for a full-vector intrinsic form.
1911 /// sse1_fp_unop_s - SSE1 unops in scalar form.
1912 multiclass sse1_fp_unop_s<bits<8> opc, string OpcodeStr,
1913 SDNode OpNode, Intrinsic F32Int> {
1914 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
1915 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1916 [(set FR32:$dst, (OpNode FR32:$src))]>;
1917 def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
1918 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1919 [(set FR32:$dst, (OpNode (load addr:$src)))]>, XS,
1920 Requires<[HasSSE1, OptForSize]>;
1921 def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1922 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1923 [(set VR128:$dst, (F32Int VR128:$src))]>;
1924 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins ssmem:$src),
1925 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1926 [(set VR128:$dst, (F32Int sse_load_f32:$src))]>;
1929 /// sse1_fp_unop_p - SSE1 unops in scalar form.
1930 multiclass sse1_fp_unop_p<bits<8> opc, string OpcodeStr,
1931 SDNode OpNode, Intrinsic V4F32Int> {
1932 def PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1933 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1934 [(set VR128:$dst, (v4f32 (OpNode VR128:$src)))]>;
1935 def PSm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1936 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1937 [(set VR128:$dst, (OpNode (memopv4f32 addr:$src)))]>;
1938 def PSr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1939 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1940 [(set VR128:$dst, (V4F32Int VR128:$src))]>;
1941 def PSm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1942 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1943 [(set VR128:$dst, (V4F32Int (memopv4f32 addr:$src)))]>;
1946 /// sse1_fp_unop_s_avx - AVX SSE1 unops in scalar form.
1947 multiclass sse1_fp_unop_s_avx<bits<8> opc, string OpcodeStr,
1948 SDNode OpNode, Intrinsic F32Int> {
1949 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
1950 !strconcat(!strconcat("v", OpcodeStr),
1951 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1952 def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f32mem:$src2),
1953 !strconcat(!strconcat("v", OpcodeStr),
1954 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1955 []>, XS, Requires<[HasAVX, HasSSE1, OptForSize]>;
1956 def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst),
1957 (ins VR128:$src1, VR128:$src2),
1958 !strconcat(!strconcat("v", OpcodeStr),
1959 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1960 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst),
1961 (ins VR128:$src1, ssmem:$src2),
1962 !strconcat(!strconcat("v", OpcodeStr),
1963 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1966 /// sse2_fp_unop_s - SSE2 unops in scalar form.
1967 multiclass sse2_fp_unop_s<bits<8> opc, string OpcodeStr,
1968 SDNode OpNode, Intrinsic F64Int> {
1969 def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
1970 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1971 [(set FR64:$dst, (OpNode FR64:$src))]>;
1972 def SDm : SDI<opc, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
1973 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1974 [(set FR64:$dst, (OpNode (load addr:$src)))]>;
1975 def SDr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1976 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1977 [(set VR128:$dst, (F64Int VR128:$src))]>;
1978 def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins sdmem:$src),
1979 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1980 [(set VR128:$dst, (F64Int sse_load_f64:$src))]>;
1983 /// sse2_fp_unop_p - SSE2 unops in vector forms.
1984 multiclass sse2_fp_unop_p<bits<8> opc, string OpcodeStr,
1985 SDNode OpNode, Intrinsic V2F64Int> {
1986 def PDr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1987 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1988 [(set VR128:$dst, (v2f64 (OpNode VR128:$src)))]>;
1989 def PDm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1990 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1991 [(set VR128:$dst, (OpNode (memopv2f64 addr:$src)))]>;
1992 def PDr_Int : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1993 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1994 [(set VR128:$dst, (V2F64Int VR128:$src))]>;
1995 def PDm_Int : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1996 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1997 [(set VR128:$dst, (V2F64Int (memopv2f64 addr:$src)))]>;
2000 /// sse2_fp_unop_s_avx - AVX SSE2 unops in scalar form.
2001 multiclass sse2_fp_unop_s_avx<bits<8> opc, string OpcodeStr,
2002 SDNode OpNode, Intrinsic F64Int> {
2003 def SDr : VSDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
2004 !strconcat(OpcodeStr,
2005 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
2006 def SDm : VSDI<opc, MRMSrcMem, (outs FR64:$dst),
2007 (ins FR64:$src1, f64mem:$src2),
2008 !strconcat(OpcodeStr,
2009 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
2010 def SDr_Int : VSDI<opc, MRMSrcReg, (outs VR128:$dst),
2011 (ins VR128:$src1, VR128:$src2),
2012 !strconcat(OpcodeStr, "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2014 def SDm_Int : VSDI<opc, MRMSrcMem, (outs VR128:$dst),
2015 (ins VR128:$src1, sdmem:$src2),
2016 !strconcat(OpcodeStr, "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2020 let isAsmParserOnly = 1 in {
2022 let Predicates = [HasAVX, HasSSE2] in {
2023 defm VSQRT : sse2_fp_unop_s_avx<0x51, "sqrt", fsqrt, int_x86_sse2_sqrt_sd>,
2026 defm VSQRT : sse2_fp_unop_p<0x51, "vsqrt", fsqrt, int_x86_sse2_sqrt_pd>, VEX;
2029 let Predicates = [HasAVX, HasSSE1] in {
2030 defm VSQRT : sse1_fp_unop_s_avx<0x51, "sqrt", fsqrt, int_x86_sse_sqrt_ss>,
2032 defm VSQRT : sse1_fp_unop_p<0x51, "vsqrt", fsqrt, int_x86_sse_sqrt_ps>, VEX;
2033 // Reciprocal approximations. Note that these typically require refinement
2034 // in order to obtain suitable precision.
2035 defm VRSQRT : sse1_fp_unop_s_avx<0x52, "rsqrt", X86frsqrt,
2036 int_x86_sse_rsqrt_ss>, VEX_4V;
2037 defm VRSQRT : sse1_fp_unop_p<0x52, "vrsqrt", X86frsqrt, int_x86_sse_rsqrt_ps>,
2039 defm VRCP : sse1_fp_unop_s_avx<0x53, "rcp", X86frcp, int_x86_sse_rcp_ss>,
2041 defm VRCP : sse1_fp_unop_p<0x53, "vrcp", X86frcp, int_x86_sse_rcp_ps>,
2047 defm SQRT : sse1_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse_sqrt_ss>,
2048 sse1_fp_unop_p<0x51, "sqrt", fsqrt, int_x86_sse_sqrt_ps>,
2049 sse2_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse2_sqrt_sd>,
2050 sse2_fp_unop_p<0x51, "sqrt", fsqrt, int_x86_sse2_sqrt_pd>;
2052 // Reciprocal approximations. Note that these typically require refinement
2053 // in order to obtain suitable precision.
2054 defm RSQRT : sse1_fp_unop_s<0x52, "rsqrt", X86frsqrt, int_x86_sse_rsqrt_ss>,
2055 sse1_fp_unop_p<0x52, "rsqrt", X86frsqrt, int_x86_sse_rsqrt_ps>;
2056 defm RCP : sse1_fp_unop_s<0x53, "rcp", X86frcp, int_x86_sse_rcp_ss>,
2057 sse1_fp_unop_p<0x53, "rcp", X86frcp, int_x86_sse_rcp_ps>;
2059 // There is no f64 version of the reciprocal approximation instructions.
2061 //===----------------------------------------------------------------------===//
2062 // SSE 1 & 2 - Non-temporal stores
2063 //===----------------------------------------------------------------------===//
2065 let isAsmParserOnly = 1 in {
2066 def VMOVNTPSmr_Int : VPSI<0x2B, MRMDestMem, (outs),
2067 (ins i128mem:$dst, VR128:$src),
2068 "movntps\t{$src, $dst|$dst, $src}",
2069 [(int_x86_sse_movnt_ps addr:$dst, VR128:$src)]>, VEX;
2070 def VMOVNTPDmr_Int : VPDI<0x2B, MRMDestMem, (outs),
2071 (ins i128mem:$dst, VR128:$src),
2072 "movntpd\t{$src, $dst|$dst, $src}",
2073 [(int_x86_sse2_movnt_pd addr:$dst, VR128:$src)]>, VEX;
2075 let ExeDomain = SSEPackedInt in
2076 def VMOVNTDQmr_Int : VPDI<0xE7, MRMDestMem, (outs),
2077 (ins f128mem:$dst, VR128:$src),
2078 "movntdq\t{$src, $dst|$dst, $src}",
2079 [(int_x86_sse2_movnt_dq addr:$dst, VR128:$src)]>, VEX;
2081 let AddedComplexity = 400 in { // Prefer non-temporal versions
2082 def VMOVNTPSmr : VPSI<0x2B, MRMDestMem, (outs),
2083 (ins f128mem:$dst, VR128:$src),
2084 "movntps\t{$src, $dst|$dst, $src}",
2085 [(alignednontemporalstore (v4f32 VR128:$src),
2087 def VMOVNTPDmr : VPDI<0x2B, MRMDestMem, (outs),
2088 (ins f128mem:$dst, VR128:$src),
2089 "movntpd\t{$src, $dst|$dst, $src}",
2090 [(alignednontemporalstore (v2f64 VR128:$src),
2092 def VMOVNTDQ_64mr : VPDI<0xE7, MRMDestMem, (outs),
2093 (ins f128mem:$dst, VR128:$src),
2094 "movntdq\t{$src, $dst|$dst, $src}",
2095 [(alignednontemporalstore (v2f64 VR128:$src),
2097 let ExeDomain = SSEPackedInt in
2098 def VMOVNTDQmr : VPDI<0xE7, MRMDestMem, (outs),
2099 (ins f128mem:$dst, VR128:$src),
2100 "movntdq\t{$src, $dst|$dst, $src}",
2101 [(alignednontemporalstore (v4f32 VR128:$src),
2106 def MOVNTPSmr_Int : PSI<0x2B, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2107 "movntps\t{$src, $dst|$dst, $src}",
2108 [(int_x86_sse_movnt_ps addr:$dst, VR128:$src)]>;
2109 def MOVNTPDmr_Int : PDI<0x2B, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2110 "movntpd\t{$src, $dst|$dst, $src}",
2111 [(int_x86_sse2_movnt_pd addr:$dst, VR128:$src)]>;
2113 let ExeDomain = SSEPackedInt in
2114 def MOVNTDQmr_Int : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2115 "movntdq\t{$src, $dst|$dst, $src}",
2116 [(int_x86_sse2_movnt_dq addr:$dst, VR128:$src)]>;
2118 let AddedComplexity = 400 in { // Prefer non-temporal versions
2119 def MOVNTPSmr : PSI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2120 "movntps\t{$src, $dst|$dst, $src}",
2121 [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)]>;
2122 def MOVNTPDmr : PDI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2123 "movntpd\t{$src, $dst|$dst, $src}",
2124 [(alignednontemporalstore(v2f64 VR128:$src), addr:$dst)]>;
2126 def MOVNTDQ_64mr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2127 "movntdq\t{$src, $dst|$dst, $src}",
2128 [(alignednontemporalstore (v2f64 VR128:$src), addr:$dst)]>;
2130 let ExeDomain = SSEPackedInt in
2131 def MOVNTDQmr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2132 "movntdq\t{$src, $dst|$dst, $src}",
2133 [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)]>;
2135 // There is no AVX form for instructions below this point
2136 def MOVNTImr : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
2137 "movnti\t{$src, $dst|$dst, $src}",
2138 [(nontemporalstore (i32 GR32:$src), addr:$dst)]>,
2139 TB, Requires<[HasSSE2]>;
2141 def MOVNTI_64mr : RI<0xC3, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
2142 "movnti\t{$src, $dst|$dst, $src}",
2143 [(nontemporalstore (i64 GR64:$src), addr:$dst)]>,
2144 TB, Requires<[HasSSE2]>;
2147 def MOVNTImr_Int : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
2148 "movnti\t{$src, $dst|$dst, $src}",
2149 [(int_x86_sse2_movnt_i addr:$dst, GR32:$src)]>,
2150 TB, Requires<[HasSSE2]>;
2152 //===----------------------------------------------------------------------===//
2153 // SSE 1 & 2 - Misc Instructions (No AVX form)
2154 //===----------------------------------------------------------------------===//
2156 // Prefetch intrinsic.
2157 def PREFETCHT0 : PSI<0x18, MRM1m, (outs), (ins i8mem:$src),
2158 "prefetcht0\t$src", [(prefetch addr:$src, imm, (i32 3))]>;
2159 def PREFETCHT1 : PSI<0x18, MRM2m, (outs), (ins i8mem:$src),
2160 "prefetcht1\t$src", [(prefetch addr:$src, imm, (i32 2))]>;
2161 def PREFETCHT2 : PSI<0x18, MRM3m, (outs), (ins i8mem:$src),
2162 "prefetcht2\t$src", [(prefetch addr:$src, imm, (i32 1))]>;
2163 def PREFETCHNTA : PSI<0x18, MRM0m, (outs), (ins i8mem:$src),
2164 "prefetchnta\t$src", [(prefetch addr:$src, imm, (i32 0))]>;
2166 // Load, store, and memory fence
2167 def SFENCE : I<0xAE, MRM_F8, (outs), (ins), "sfence", [(int_x86_sse_sfence)]>,
2168 TB, Requires<[HasSSE1]>;
2170 // Alias instructions that map zero vector to pxor / xorp* for sse.
2171 // We set canFoldAsLoad because this can be converted to a constant-pool
2172 // load of an all-zeros value if folding it would be beneficial.
2173 // FIXME: Change encoding to pseudo!
2174 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
2175 isCodeGenOnly = 1 in {
2176 def V_SET0PS : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2177 [(set VR128:$dst, (v4f32 immAllZerosV))]>;
2178 def V_SET0PD : PDI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2179 [(set VR128:$dst, (v2f64 immAllZerosV))]>;
2180 let ExeDomain = SSEPackedInt in
2181 def V_SET0PI : PDI<0xEF, MRMInitReg, (outs VR128:$dst), (ins), "",
2182 [(set VR128:$dst, (v4i32 immAllZerosV))]>;
2185 def : Pat<(v2i64 immAllZerosV), (V_SET0PI)>;
2186 def : Pat<(v8i16 immAllZerosV), (V_SET0PI)>;
2187 def : Pat<(v16i8 immAllZerosV), (V_SET0PI)>;
2189 def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
2190 (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
2192 //===----------------------------------------------------------------------===//
2193 // SSE 1 & 2 - Load/Store XCSR register
2194 //===----------------------------------------------------------------------===//
2196 let isAsmParserOnly = 1 in {
2197 def VLDMXCSR : VPSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
2198 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>, VEX;
2199 def VSTMXCSR : VPSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
2200 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>, VEX;
2203 def LDMXCSR : PSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
2204 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>;
2205 def STMXCSR : PSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
2206 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>;
2208 //===---------------------------------------------------------------------===//
2209 // SSE2 - Move Aligned/Unaligned Packed Integer Instructions
2210 //===---------------------------------------------------------------------===//
2211 let ExeDomain = SSEPackedInt in { // SSE integer instructions
2213 let isAsmParserOnly = 1 in {
2214 let neverHasSideEffects = 1 in
2215 def VMOVDQArr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2216 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2217 def VMOVDQUrr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2218 "movdqu\t{$src, $dst|$dst, $src}", []>, XS, VEX;
2220 let canFoldAsLoad = 1, mayLoad = 1 in {
2221 def VMOVDQArm : VPDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2222 "movdqa\t{$src, $dst|$dst, $src}",
2223 [/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/]>,
2225 def VMOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2226 "vmovdqu\t{$src, $dst|$dst, $src}",
2227 [/*(set VR128:$dst, (loadv2i64 addr:$src))*/]>,
2228 XS, VEX, Requires<[HasAVX, HasSSE2]>;
2231 let mayStore = 1 in {
2232 def VMOVDQAmr : VPDI<0x7F, MRMDestMem, (outs),
2233 (ins i128mem:$dst, VR128:$src),
2234 "movdqa\t{$src, $dst|$dst, $src}",
2235 [/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/]>, VEX;
2236 def VMOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2237 "vmovdqu\t{$src, $dst|$dst, $src}",
2238 [/*(store (v2i64 VR128:$src), addr:$dst)*/]>,
2239 XS, VEX, Requires<[HasAVX, HasSSE2]>;
2243 let neverHasSideEffects = 1 in
2244 def MOVDQArr : PDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2245 "movdqa\t{$src, $dst|$dst, $src}", []>;
2247 let canFoldAsLoad = 1, mayLoad = 1 in {
2248 def MOVDQArm : PDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2249 "movdqa\t{$src, $dst|$dst, $src}",
2250 [/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/]>;
2251 def MOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2252 "movdqu\t{$src, $dst|$dst, $src}",
2253 [/*(set VR128:$dst, (loadv2i64 addr:$src))*/]>,
2254 XS, Requires<[HasSSE2]>;
2257 let mayStore = 1 in {
2258 def MOVDQAmr : PDI<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2259 "movdqa\t{$src, $dst|$dst, $src}",
2260 [/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/]>;
2261 def MOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2262 "movdqu\t{$src, $dst|$dst, $src}",
2263 [/*(store (v2i64 VR128:$src), addr:$dst)*/]>,
2264 XS, Requires<[HasSSE2]>;
2267 // Intrinsic forms of MOVDQU load and store
2268 let isAsmParserOnly = 1 in {
2269 let canFoldAsLoad = 1 in
2270 def VMOVDQUrm_Int : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2271 "vmovdqu\t{$src, $dst|$dst, $src}",
2272 [(set VR128:$dst, (int_x86_sse2_loadu_dq addr:$src))]>,
2273 XS, VEX, Requires<[HasAVX, HasSSE2]>;
2274 def VMOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2275 "vmovdqu\t{$src, $dst|$dst, $src}",
2276 [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
2277 XS, VEX, Requires<[HasAVX, HasSSE2]>;
2280 let canFoldAsLoad = 1 in
2281 def MOVDQUrm_Int : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2282 "movdqu\t{$src, $dst|$dst, $src}",
2283 [(set VR128:$dst, (int_x86_sse2_loadu_dq addr:$src))]>,
2284 XS, Requires<[HasSSE2]>;
2285 def MOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2286 "movdqu\t{$src, $dst|$dst, $src}",
2287 [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
2288 XS, Requires<[HasSSE2]>;
2290 } // ExeDomain = SSEPackedInt
2292 //===---------------------------------------------------------------------===//
2293 // SSE2 - Packed Integer Arithmetic Instructions
2294 //===---------------------------------------------------------------------===//
2296 let ExeDomain = SSEPackedInt in { // SSE integer instructions
2298 multiclass PDI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId,
2299 bit IsCommutable = 0, bit Is2Addr = 1> {
2300 let isCommutable = IsCommutable in
2301 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2302 (ins VR128:$src1, VR128:$src2),
2304 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2305 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2306 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
2307 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2308 (ins VR128:$src1, i128mem:$src2),
2310 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2311 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2312 [(set VR128:$dst, (IntId VR128:$src1,
2313 (bitconvert (memopv2i64 addr:$src2))))]>;
2316 multiclass PDI_binop_rmi_int<bits<8> opc, bits<8> opc2, Format ImmForm,
2317 string OpcodeStr, Intrinsic IntId,
2318 Intrinsic IntId2, bit Is2Addr = 1> {
2319 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2320 (ins VR128:$src1, VR128:$src2),
2322 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2323 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2324 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
2325 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2326 (ins VR128:$src1, i128mem:$src2),
2328 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2329 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2330 [(set VR128:$dst, (IntId VR128:$src1,
2331 (bitconvert (memopv2i64 addr:$src2))))]>;
2332 def ri : PDIi8<opc2, ImmForm, (outs VR128:$dst),
2333 (ins VR128:$src1, i32i8imm:$src2),
2335 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2336 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2337 [(set VR128:$dst, (IntId2 VR128:$src1, (i32 imm:$src2)))]>;
2340 /// PDI_binop_rm - Simple SSE2 binary operator.
2341 multiclass PDI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
2342 ValueType OpVT, bit IsCommutable = 0, bit Is2Addr = 1> {
2343 let isCommutable = IsCommutable in
2344 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2345 (ins VR128:$src1, VR128:$src2),
2347 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2348 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2349 [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]>;
2350 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2351 (ins VR128:$src1, i128mem:$src2),
2353 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2354 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2355 [(set VR128:$dst, (OpVT (OpNode VR128:$src1,
2356 (bitconvert (memopv2i64 addr:$src2)))))]>;
2359 /// PDI_binop_rm_v2i64 - Simple SSE2 binary operator whose type is v2i64.
2361 /// FIXME: we could eliminate this and use PDI_binop_rm instead if tblgen knew
2362 /// to collapse (bitconvert VT to VT) into its operand.
2364 multiclass PDI_binop_rm_v2i64<bits<8> opc, string OpcodeStr, SDNode OpNode,
2365 bit IsCommutable = 0, bit Is2Addr = 1> {
2366 let isCommutable = IsCommutable in
2367 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2368 (ins VR128:$src1, VR128:$src2),
2370 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2371 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2372 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))]>;
2373 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2374 (ins VR128:$src1, i128mem:$src2),
2376 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2377 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2378 [(set VR128:$dst, (OpNode VR128:$src1, (memopv2i64 addr:$src2)))]>;
2381 } // ExeDomain = SSEPackedInt
2383 // 128-bit Integer Arithmetic
2385 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE2] in {
2386 defm VPADDB : PDI_binop_rm<0xFC, "vpaddb", add, v16i8, 1, 0 /*3addr*/>, VEX_4V;
2387 defm VPADDW : PDI_binop_rm<0xFD, "vpaddw", add, v8i16, 1, 0>, VEX_4V;
2388 defm VPADDD : PDI_binop_rm<0xFE, "vpaddd", add, v4i32, 1, 0>, VEX_4V;
2389 defm VPADDQ : PDI_binop_rm_v2i64<0xD4, "vpaddq", add, 1, 0>, VEX_4V;
2390 defm VPMULLW : PDI_binop_rm<0xD5, "vpmullw", mul, v8i16, 1, 0>, VEX_4V;
2391 defm VPSUBB : PDI_binop_rm<0xF8, "vpsubb", sub, v16i8, 0, 0>, VEX_4V;
2392 defm VPSUBW : PDI_binop_rm<0xF9, "vpsubw", sub, v8i16, 0, 0>, VEX_4V;
2393 defm VPSUBD : PDI_binop_rm<0xFA, "vpsubd", sub, v4i32, 0, 0>, VEX_4V;
2394 defm VPSUBQ : PDI_binop_rm_v2i64<0xFB, "vpsubq", sub, 0, 0>, VEX_4V;
2397 defm VPSUBSB : PDI_binop_rm_int<0xE8, "vpsubsb" , int_x86_sse2_psubs_b, 0, 0>,
2399 defm VPSUBSW : PDI_binop_rm_int<0xE9, "vpsubsw" , int_x86_sse2_psubs_w, 0, 0>,
2401 defm VPSUBUSB : PDI_binop_rm_int<0xD8, "vpsubusb", int_x86_sse2_psubus_b, 0, 0>,
2403 defm VPSUBUSW : PDI_binop_rm_int<0xD9, "vpsubusw", int_x86_sse2_psubus_w, 0, 0>,
2405 defm VPADDSB : PDI_binop_rm_int<0xEC, "vpaddsb" , int_x86_sse2_padds_b, 1, 0>,
2407 defm VPADDSW : PDI_binop_rm_int<0xED, "vpaddsw" , int_x86_sse2_padds_w, 1, 0>,
2409 defm VPADDUSB : PDI_binop_rm_int<0xDC, "vpaddusb", int_x86_sse2_paddus_b, 1, 0>,
2411 defm VPADDUSW : PDI_binop_rm_int<0xDD, "vpaddusw", int_x86_sse2_paddus_w, 1, 0>,
2413 defm VPMULHUW : PDI_binop_rm_int<0xE4, "vpmulhuw", int_x86_sse2_pmulhu_w, 1, 0>,
2415 defm VPMULHW : PDI_binop_rm_int<0xE5, "vpmulhw" , int_x86_sse2_pmulh_w, 1, 0>,
2417 defm VPMULUDQ : PDI_binop_rm_int<0xF4, "vpmuludq", int_x86_sse2_pmulu_dq, 1, 0>,
2419 defm VPMADDWD : PDI_binop_rm_int<0xF5, "vpmaddwd", int_x86_sse2_pmadd_wd, 1, 0>,
2421 defm VPAVGB : PDI_binop_rm_int<0xE0, "vpavgb", int_x86_sse2_pavg_b, 1, 0>,
2423 defm VPAVGW : PDI_binop_rm_int<0xE3, "vpavgw", int_x86_sse2_pavg_w, 1, 0>,
2425 defm VPMINUB : PDI_binop_rm_int<0xDA, "vpminub", int_x86_sse2_pminu_b, 1, 0>,
2427 defm VPMINSW : PDI_binop_rm_int<0xEA, "vpminsw", int_x86_sse2_pmins_w, 1, 0>,
2429 defm VPMAXUB : PDI_binop_rm_int<0xDE, "vpmaxub", int_x86_sse2_pmaxu_b, 1, 0>,
2431 defm VPMAXSW : PDI_binop_rm_int<0xEE, "vpmaxsw", int_x86_sse2_pmaxs_w, 1, 0>,
2433 defm VPSADBW : PDI_binop_rm_int<0xF6, "vpsadbw", int_x86_sse2_psad_bw, 1, 0>,
2437 let Constraints = "$src1 = $dst" in {
2438 defm PADDB : PDI_binop_rm<0xFC, "paddb", add, v16i8, 1>;
2439 defm PADDW : PDI_binop_rm<0xFD, "paddw", add, v8i16, 1>;
2440 defm PADDD : PDI_binop_rm<0xFE, "paddd", add, v4i32, 1>;
2441 defm PADDQ : PDI_binop_rm_v2i64<0xD4, "paddq", add, 1>;
2442 defm PMULLW : PDI_binop_rm<0xD5, "pmullw", mul, v8i16, 1>;
2443 defm PSUBB : PDI_binop_rm<0xF8, "psubb", sub, v16i8>;
2444 defm PSUBW : PDI_binop_rm<0xF9, "psubw", sub, v8i16>;
2445 defm PSUBD : PDI_binop_rm<0xFA, "psubd", sub, v4i32>;
2446 defm PSUBQ : PDI_binop_rm_v2i64<0xFB, "psubq", sub>;
2449 defm PSUBSB : PDI_binop_rm_int<0xE8, "psubsb" , int_x86_sse2_psubs_b>;
2450 defm PSUBSW : PDI_binop_rm_int<0xE9, "psubsw" , int_x86_sse2_psubs_w>;
2451 defm PSUBUSB : PDI_binop_rm_int<0xD8, "psubusb", int_x86_sse2_psubus_b>;
2452 defm PSUBUSW : PDI_binop_rm_int<0xD9, "psubusw", int_x86_sse2_psubus_w>;
2453 defm PADDSB : PDI_binop_rm_int<0xEC, "paddsb" , int_x86_sse2_padds_b, 1>;
2454 defm PADDSW : PDI_binop_rm_int<0xED, "paddsw" , int_x86_sse2_padds_w, 1>;
2455 defm PADDUSB : PDI_binop_rm_int<0xDC, "paddusb", int_x86_sse2_paddus_b, 1>;
2456 defm PADDUSW : PDI_binop_rm_int<0xDD, "paddusw", int_x86_sse2_paddus_w, 1>;
2457 defm PMULHUW : PDI_binop_rm_int<0xE4, "pmulhuw", int_x86_sse2_pmulhu_w, 1>;
2458 defm PMULHW : PDI_binop_rm_int<0xE5, "pmulhw" , int_x86_sse2_pmulh_w, 1>;
2459 defm PMULUDQ : PDI_binop_rm_int<0xF4, "pmuludq", int_x86_sse2_pmulu_dq, 1>;
2460 defm PMADDWD : PDI_binop_rm_int<0xF5, "pmaddwd", int_x86_sse2_pmadd_wd, 1>;
2461 defm PAVGB : PDI_binop_rm_int<0xE0, "pavgb", int_x86_sse2_pavg_b, 1>;
2462 defm PAVGW : PDI_binop_rm_int<0xE3, "pavgw", int_x86_sse2_pavg_w, 1>;
2463 defm PMINUB : PDI_binop_rm_int<0xDA, "pminub", int_x86_sse2_pminu_b, 1>;
2464 defm PMINSW : PDI_binop_rm_int<0xEA, "pminsw", int_x86_sse2_pmins_w, 1>;
2465 defm PMAXUB : PDI_binop_rm_int<0xDE, "pmaxub", int_x86_sse2_pmaxu_b, 1>;
2466 defm PMAXSW : PDI_binop_rm_int<0xEE, "pmaxsw", int_x86_sse2_pmaxs_w, 1>;
2467 defm PSADBW : PDI_binop_rm_int<0xF6, "psadbw", int_x86_sse2_psad_bw, 1>;
2469 } // Constraints = "$src1 = $dst"
2471 //===---------------------------------------------------------------------===//
2472 // SSE2 - Packed Integer Logical Instructions
2473 //===---------------------------------------------------------------------===//
2475 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE2] in {
2476 defm VPSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "vpsllw",
2477 int_x86_sse2_psll_w, int_x86_sse2_pslli_w, 0>,
2479 defm VPSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "vpslld",
2480 int_x86_sse2_psll_d, int_x86_sse2_pslli_d, 0>,
2482 defm VPSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "vpsllq",
2483 int_x86_sse2_psll_q, int_x86_sse2_pslli_q, 0>,
2486 defm VPSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "vpsrlw",
2487 int_x86_sse2_psrl_w, int_x86_sse2_psrli_w, 0>,
2489 defm VPSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "vpsrld",
2490 int_x86_sse2_psrl_d, int_x86_sse2_psrli_d, 0>,
2492 defm VPSRLQ : PDI_binop_rmi_int<0xD3, 0x73, MRM2r, "vpsrlq",
2493 int_x86_sse2_psrl_q, int_x86_sse2_psrli_q, 0>,
2496 defm VPSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "vpsraw",
2497 int_x86_sse2_psra_w, int_x86_sse2_psrai_w, 0>,
2499 defm VPSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "vpsrad",
2500 int_x86_sse2_psra_d, int_x86_sse2_psrai_d, 0>,
2503 defm VPAND : PDI_binop_rm_v2i64<0xDB, "vpand", and, 1, 0>, VEX_4V;
2504 defm VPOR : PDI_binop_rm_v2i64<0xEB, "vpor" , or, 1, 0>, VEX_4V;
2505 defm VPXOR : PDI_binop_rm_v2i64<0xEF, "vpxor", xor, 1, 0>, VEX_4V;
2507 let ExeDomain = SSEPackedInt in {
2508 let neverHasSideEffects = 1 in {
2509 // 128-bit logical shifts.
2510 def VPSLLDQri : PDIi8<0x73, MRM7r,
2511 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2512 "vpslldq\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
2514 def VPSRLDQri : PDIi8<0x73, MRM3r,
2515 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2516 "vpsrldq\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
2518 // PSRADQri doesn't exist in SSE[1-3].
2520 def VPANDNrr : PDI<0xDF, MRMSrcReg,
2521 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2522 "vpandn\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2523 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
2524 VR128:$src2)))]>, VEX_4V;
2526 def VPANDNrm : PDI<0xDF, MRMSrcMem,
2527 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2528 "vpandn\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2529 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
2530 (memopv2i64 addr:$src2))))]>,
2535 let Constraints = "$src1 = $dst" in {
2536 defm PSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "psllw",
2537 int_x86_sse2_psll_w, int_x86_sse2_pslli_w>;
2538 defm PSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "pslld",
2539 int_x86_sse2_psll_d, int_x86_sse2_pslli_d>;
2540 defm PSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "psllq",
2541 int_x86_sse2_psll_q, int_x86_sse2_pslli_q>;
2543 defm PSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "psrlw",
2544 int_x86_sse2_psrl_w, int_x86_sse2_psrli_w>;
2545 defm PSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "psrld",
2546 int_x86_sse2_psrl_d, int_x86_sse2_psrli_d>;
2547 defm PSRLQ : PDI_binop_rmi_int<0xD3, 0x73, MRM2r, "psrlq",
2548 int_x86_sse2_psrl_q, int_x86_sse2_psrli_q>;
2550 defm PSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "psraw",
2551 int_x86_sse2_psra_w, int_x86_sse2_psrai_w>;
2552 defm PSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "psrad",
2553 int_x86_sse2_psra_d, int_x86_sse2_psrai_d>;
2555 defm PAND : PDI_binop_rm_v2i64<0xDB, "pand", and, 1>;
2556 defm POR : PDI_binop_rm_v2i64<0xEB, "por" , or, 1>;
2557 defm PXOR : PDI_binop_rm_v2i64<0xEF, "pxor", xor, 1>;
2559 let ExeDomain = SSEPackedInt in {
2560 let neverHasSideEffects = 1 in {
2561 // 128-bit logical shifts.
2562 def PSLLDQri : PDIi8<0x73, MRM7r,
2563 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2564 "pslldq\t{$src2, $dst|$dst, $src2}", []>;
2565 def PSRLDQri : PDIi8<0x73, MRM3r,
2566 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2567 "psrldq\t{$src2, $dst|$dst, $src2}", []>;
2568 // PSRADQri doesn't exist in SSE[1-3].
2570 def PANDNrr : PDI<0xDF, MRMSrcReg,
2571 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2572 "pandn\t{$src2, $dst|$dst, $src2}",
2573 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
2576 def PANDNrm : PDI<0xDF, MRMSrcMem,
2577 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2578 "pandn\t{$src2, $dst|$dst, $src2}",
2579 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
2580 (memopv2i64 addr:$src2))))]>;
2582 } // Constraints = "$src1 = $dst"
2584 let Predicates = [HasSSE2] in {
2585 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
2586 (v2i64 (PSLLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2587 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
2588 (v2i64 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2589 def : Pat<(int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2),
2590 (v2i64 (PSLLDQri VR128:$src1, imm:$src2))>;
2591 def : Pat<(int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2),
2592 (v2i64 (PSRLDQri VR128:$src1, imm:$src2))>;
2593 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
2594 (v2f64 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2596 // Shift up / down and insert zero's.
2597 def : Pat<(v2i64 (X86vshl VR128:$src, (i8 imm:$amt))),
2598 (v2i64 (PSLLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2599 def : Pat<(v2i64 (X86vshr VR128:$src, (i8 imm:$amt))),
2600 (v2i64 (PSRLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2603 //===---------------------------------------------------------------------===//
2604 // SSE2 - Packed Integer Comparison Instructions
2605 //===---------------------------------------------------------------------===//
2607 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE2] in {
2608 defm VPCMPEQB : PDI_binop_rm_int<0x74, "vpcmpeqb", int_x86_sse2_pcmpeq_b, 1,
2610 defm VPCMPEQW : PDI_binop_rm_int<0x75, "vpcmpeqw", int_x86_sse2_pcmpeq_w, 1,
2612 defm VPCMPEQD : PDI_binop_rm_int<0x76, "vpcmpeqd", int_x86_sse2_pcmpeq_d, 1,
2614 defm VPCMPGTB : PDI_binop_rm_int<0x64, "vpcmpgtb", int_x86_sse2_pcmpgt_b, 0,
2616 defm VPCMPGTW : PDI_binop_rm_int<0x65, "vpcmpgtw", int_x86_sse2_pcmpgt_w, 0,
2618 defm VPCMPGTD : PDI_binop_rm_int<0x66, "vpcmpgtd", int_x86_sse2_pcmpgt_d, 0,
2622 let Constraints = "$src1 = $dst" in {
2623 defm PCMPEQB : PDI_binop_rm_int<0x74, "pcmpeqb", int_x86_sse2_pcmpeq_b, 1>;
2624 defm PCMPEQW : PDI_binop_rm_int<0x75, "pcmpeqw", int_x86_sse2_pcmpeq_w, 1>;
2625 defm PCMPEQD : PDI_binop_rm_int<0x76, "pcmpeqd", int_x86_sse2_pcmpeq_d, 1>;
2626 defm PCMPGTB : PDI_binop_rm_int<0x64, "pcmpgtb", int_x86_sse2_pcmpgt_b>;
2627 defm PCMPGTW : PDI_binop_rm_int<0x65, "pcmpgtw", int_x86_sse2_pcmpgt_w>;
2628 defm PCMPGTD : PDI_binop_rm_int<0x66, "pcmpgtd", int_x86_sse2_pcmpgt_d>;
2629 } // Constraints = "$src1 = $dst"
2631 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, VR128:$src2)),
2632 (PCMPEQBrr VR128:$src1, VR128:$src2)>;
2633 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, (memop addr:$src2))),
2634 (PCMPEQBrm VR128:$src1, addr:$src2)>;
2635 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, VR128:$src2)),
2636 (PCMPEQWrr VR128:$src1, VR128:$src2)>;
2637 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, (memop addr:$src2))),
2638 (PCMPEQWrm VR128:$src1, addr:$src2)>;
2639 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, VR128:$src2)),
2640 (PCMPEQDrr VR128:$src1, VR128:$src2)>;
2641 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, (memop addr:$src2))),
2642 (PCMPEQDrm VR128:$src1, addr:$src2)>;
2644 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, VR128:$src2)),
2645 (PCMPGTBrr VR128:$src1, VR128:$src2)>;
2646 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, (memop addr:$src2))),
2647 (PCMPGTBrm VR128:$src1, addr:$src2)>;
2648 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, VR128:$src2)),
2649 (PCMPGTWrr VR128:$src1, VR128:$src2)>;
2650 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, (memop addr:$src2))),
2651 (PCMPGTWrm VR128:$src1, addr:$src2)>;
2652 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, VR128:$src2)),
2653 (PCMPGTDrr VR128:$src1, VR128:$src2)>;
2654 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, (memop addr:$src2))),
2655 (PCMPGTDrm VR128:$src1, addr:$src2)>;
2657 //===---------------------------------------------------------------------===//
2658 // SSE2 - Packed Integer Pack Instructions
2659 //===---------------------------------------------------------------------===//
2661 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE2] in {
2662 defm VPACKSSWB : PDI_binop_rm_int<0x63, "vpacksswb", int_x86_sse2_packsswb_128,
2664 defm VPACKSSDW : PDI_binop_rm_int<0x6B, "vpackssdw", int_x86_sse2_packssdw_128,
2666 defm VPACKUSWB : PDI_binop_rm_int<0x67, "vpackuswb", int_x86_sse2_packuswb_128,
2670 let Constraints = "$src1 = $dst" in {
2671 defm PACKSSWB : PDI_binop_rm_int<0x63, "packsswb", int_x86_sse2_packsswb_128>;
2672 defm PACKSSDW : PDI_binop_rm_int<0x6B, "packssdw", int_x86_sse2_packssdw_128>;
2673 defm PACKUSWB : PDI_binop_rm_int<0x67, "packuswb", int_x86_sse2_packuswb_128>;
2674 } // Constraints = "$src1 = $dst"
2676 //===---------------------------------------------------------------------===//
2677 // SSE2 - Packed Integer Shuffle Instructions
2678 //===---------------------------------------------------------------------===//
2680 let ExeDomain = SSEPackedInt in {
2681 multiclass sse2_pshuffle<string OpcodeStr, ValueType vt, PatFrag pshuf_frag,
2683 def ri : Ii8<0x70, MRMSrcReg,
2684 (outs VR128:$dst), (ins VR128:$src1, i8imm:$src2),
2685 !strconcat(OpcodeStr,
2686 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2687 [(set VR128:$dst, (vt (pshuf_frag:$src2 VR128:$src1,
2689 def mi : Ii8<0x70, MRMSrcMem,
2690 (outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
2691 !strconcat(OpcodeStr,
2692 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2693 [(set VR128:$dst, (vt (pshuf_frag:$src2
2694 (bc_frag (memopv2i64 addr:$src1)),
2697 } // ExeDomain = SSEPackedInt
2699 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE2] in {
2700 let AddedComplexity = 5 in
2701 defm VPSHUFD : sse2_pshuffle<"vpshufd", v4i32, pshufd, bc_v4i32>, OpSize,
2704 // SSE2 with ImmT == Imm8 and XS prefix.
2705 defm VPSHUFHW : sse2_pshuffle<"vpshufhw", v8i16, pshufhw, bc_v8i16>, XS,
2708 // SSE2 with ImmT == Imm8 and XD prefix.
2709 defm VPSHUFLW : sse2_pshuffle<"vpshuflw", v8i16, pshuflw, bc_v8i16>, XD,
2713 let Predicates = [HasSSE2] in {
2714 let AddedComplexity = 5 in
2715 defm PSHUFD : sse2_pshuffle<"pshufd", v4i32, pshufd, bc_v4i32>, TB, OpSize;
2717 // SSE2 with ImmT == Imm8 and XS prefix.
2718 defm PSHUFHW : sse2_pshuffle<"pshufhw", v8i16, pshufhw, bc_v8i16>, XS;
2720 // SSE2 with ImmT == Imm8 and XD prefix.
2721 defm PSHUFLW : sse2_pshuffle<"pshuflw", v8i16, pshuflw, bc_v8i16>, XD;
2724 //===---------------------------------------------------------------------===//
2725 // SSE2 - Packed Integer Unpack Instructions
2726 //===---------------------------------------------------------------------===//
2728 let ExeDomain = SSEPackedInt in {
2729 multiclass sse2_unpack<bits<8> opc, string OpcodeStr, ValueType vt,
2730 PatFrag unp_frag, PatFrag bc_frag, bit Is2Addr = 1> {
2731 def rr : PDI<opc, MRMSrcReg,
2732 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2734 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
2735 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2736 [(set VR128:$dst, (vt (unp_frag VR128:$src1, VR128:$src2)))]>;
2737 def rm : PDI<opc, MRMSrcMem,
2738 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2740 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
2741 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2742 [(set VR128:$dst, (unp_frag VR128:$src1,
2743 (bc_frag (memopv2i64
2747 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE2] in {
2748 defm VPUNPCKLBW : sse2_unpack<0x60, "vpunpcklbw", v16i8, unpckl, bc_v16i8,
2750 defm VPUNPCKLWD : sse2_unpack<0x61, "vpunpcklwd", v8i16, unpckl, bc_v8i16,
2752 defm VPUNPCKLDQ : sse2_unpack<0x62, "vpunpckldq", v4i32, unpckl, bc_v4i32,
2755 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2756 /// knew to collapse (bitconvert VT to VT) into its operand.
2757 def VPUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
2758 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2759 "vpunpcklqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2761 (v2i64 (unpckl VR128:$src1, VR128:$src2)))]>, VEX_4V;
2762 def VPUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
2763 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2764 "vpunpcklqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2766 (v2i64 (unpckl VR128:$src1,
2767 (memopv2i64 addr:$src2))))]>, VEX_4V;
2769 defm VPUNPCKHBW : sse2_unpack<0x68, "vpunpckhbw", v16i8, unpckh, bc_v16i8,
2771 defm VPUNPCKHWD : sse2_unpack<0x69, "vpunpckhwd", v8i16, unpckh, bc_v8i16,
2773 defm VPUNPCKHDQ : sse2_unpack<0x6A, "vpunpckhdq", v4i32, unpckh, bc_v4i32,
2776 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2777 /// knew to collapse (bitconvert VT to VT) into its operand.
2778 def VPUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
2779 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2780 "vpunpckhqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2782 (v2i64 (unpckh VR128:$src1, VR128:$src2)))]>, VEX_4V;
2783 def VPUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
2784 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2785 "vpunpckhqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2787 (v2i64 (unpckh VR128:$src1,
2788 (memopv2i64 addr:$src2))))]>, VEX_4V;
2791 let Constraints = "$src1 = $dst" in {
2792 defm PUNPCKLBW : sse2_unpack<0x60, "punpcklbw", v16i8, unpckl, bc_v16i8>;
2793 defm PUNPCKLWD : sse2_unpack<0x61, "punpcklwd", v8i16, unpckl, bc_v8i16>;
2794 defm PUNPCKLDQ : sse2_unpack<0x62, "punpckldq", v4i32, unpckl, bc_v4i32>;
2796 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2797 /// knew to collapse (bitconvert VT to VT) into its operand.
2798 def PUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
2799 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2800 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
2802 (v2i64 (unpckl VR128:$src1, VR128:$src2)))]>;
2803 def PUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
2804 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2805 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
2807 (v2i64 (unpckl VR128:$src1,
2808 (memopv2i64 addr:$src2))))]>;
2810 defm PUNPCKHBW : sse2_unpack<0x68, "punpckhbw", v16i8, unpckh, bc_v16i8>;
2811 defm PUNPCKHWD : sse2_unpack<0x69, "punpckhwd", v8i16, unpckh, bc_v8i16>;
2812 defm PUNPCKHDQ : sse2_unpack<0x6A, "punpckhdq", v4i32, unpckh, bc_v4i32>;
2814 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2815 /// knew to collapse (bitconvert VT to VT) into its operand.
2816 def PUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
2817 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2818 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
2820 (v2i64 (unpckh VR128:$src1, VR128:$src2)))]>;
2821 def PUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
2822 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2823 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
2825 (v2i64 (unpckh VR128:$src1,
2826 (memopv2i64 addr:$src2))))]>;
2829 } // ExeDomain = SSEPackedInt
2831 //===---------------------------------------------------------------------===//
2832 // SSE2 - Packed Integer Extract and Insert
2833 //===---------------------------------------------------------------------===//
2835 let ExeDomain = SSEPackedInt in {
2836 multiclass sse2_pinsrw<bit Is2Addr = 1> {
2837 def rri : Ii8<0xC4, MRMSrcReg,
2838 (outs VR128:$dst), (ins VR128:$src1,
2839 GR32:$src2, i32i8imm:$src3),
2841 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2842 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
2844 (X86pinsrw VR128:$src1, GR32:$src2, imm:$src3))]>;
2845 def rmi : Ii8<0xC4, MRMSrcMem,
2846 (outs VR128:$dst), (ins VR128:$src1,
2847 i16mem:$src2, i32i8imm:$src3),
2849 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2850 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
2852 (X86pinsrw VR128:$src1, (extloadi16 addr:$src2),
2857 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE2] in
2858 def VPEXTRWri : Ii8<0xC5, MRMSrcReg,
2859 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
2860 "vpextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2861 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
2862 imm:$src2))]>, OpSize, VEX;
2863 def PEXTRWri : PDIi8<0xC5, MRMSrcReg,
2864 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
2865 "pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2866 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
2870 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE2] in
2871 defm PINSRW : sse2_pinsrw<0>, OpSize, VEX_4V;
2873 let Constraints = "$src1 = $dst" in
2874 defm VPINSRW : sse2_pinsrw, TB, OpSize;
2876 } // ExeDomain = SSEPackedInt
2878 //===---------------------------------------------------------------------===//
2879 // SSE2 - Packed Mask Creation
2880 //===---------------------------------------------------------------------===//
2882 let ExeDomain = SSEPackedInt in {
2884 let isAsmParserOnly = 1 in
2885 def VPMOVMSKBrr : VPDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
2886 "pmovmskb\t{$src, $dst|$dst, $src}",
2887 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>, VEX;
2888 def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
2889 "pmovmskb\t{$src, $dst|$dst, $src}",
2890 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>;
2892 } // ExeDomain = SSEPackedInt
2894 //===---------------------------------------------------------------------===//
2895 // SSE2 - Conditional Store
2896 //===---------------------------------------------------------------------===//
2898 let ExeDomain = SSEPackedInt in {
2900 let isAsmParserOnly = 1 in {
2902 def VMASKMOVDQU : VPDI<0xF7, MRMSrcReg, (outs),
2903 (ins VR128:$src, VR128:$mask),
2904 "maskmovdqu\t{$mask, $src|$src, $mask}",
2905 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>, VEX;
2907 def VMASKMOVDQU64 : VPDI<0xF7, MRMSrcReg, (outs),
2908 (ins VR128:$src, VR128:$mask),
2909 "maskmovdqu\t{$mask, $src|$src, $mask}",
2910 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>, VEX;
2914 def MASKMOVDQU : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
2915 "maskmovdqu\t{$mask, $src|$src, $mask}",
2916 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>;
2918 def MASKMOVDQU64 : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
2919 "maskmovdqu\t{$mask, $src|$src, $mask}",
2920 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>;
2922 } // ExeDomain = SSEPackedInt
2924 //===---------------------------------------------------------------------===//
2925 // SSE2 - Move Doubleword
2926 //===---------------------------------------------------------------------===//
2928 // Move Int Doubleword to Packed Double Int
2929 let isAsmParserOnly = 1 in {
2930 def VMOVDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
2931 "movd\t{$src, $dst|$dst, $src}",
2933 (v4i32 (scalar_to_vector GR32:$src)))]>, VEX;
2934 def VMOVDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
2935 "movd\t{$src, $dst|$dst, $src}",
2937 (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>,
2940 def MOVDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
2941 "movd\t{$src, $dst|$dst, $src}",
2943 (v4i32 (scalar_to_vector GR32:$src)))]>;
2944 def MOVDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
2945 "movd\t{$src, $dst|$dst, $src}",
2947 (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>;
2950 // Move Int Doubleword to Single Scalar
2951 let isAsmParserOnly = 1 in {
2952 def VMOVDI2SSrr : VPDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
2953 "movd\t{$src, $dst|$dst, $src}",
2954 [(set FR32:$dst, (bitconvert GR32:$src))]>, VEX;
2956 def VMOVDI2SSrm : VPDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
2957 "movd\t{$src, $dst|$dst, $src}",
2958 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>,
2961 def MOVDI2SSrr : PDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
2962 "movd\t{$src, $dst|$dst, $src}",
2963 [(set FR32:$dst, (bitconvert GR32:$src))]>;
2965 def MOVDI2SSrm : PDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
2966 "movd\t{$src, $dst|$dst, $src}",
2967 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>;
2969 // Move Packed Doubleword Int to Packed Double Int
2970 let isAsmParserOnly = 1 in {
2971 def VMOVPDI2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
2972 "movd\t{$src, $dst|$dst, $src}",
2973 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
2975 def VMOVPDI2DImr : VPDI<0x7E, MRMDestMem, (outs),
2976 (ins i32mem:$dst, VR128:$src),
2977 "movd\t{$src, $dst|$dst, $src}",
2978 [(store (i32 (vector_extract (v4i32 VR128:$src),
2979 (iPTR 0))), addr:$dst)]>, VEX;
2981 def MOVPDI2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
2982 "movd\t{$src, $dst|$dst, $src}",
2983 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
2985 def MOVPDI2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src),
2986 "movd\t{$src, $dst|$dst, $src}",
2987 [(store (i32 (vector_extract (v4i32 VR128:$src),
2988 (iPTR 0))), addr:$dst)]>;
2990 // Move Scalar Single to Double Int
2991 let isAsmParserOnly = 1 in {
2992 def VMOVSS2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
2993 "movd\t{$src, $dst|$dst, $src}",
2994 [(set GR32:$dst, (bitconvert FR32:$src))]>, VEX;
2995 def VMOVSS2DImr : VPDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
2996 "movd\t{$src, $dst|$dst, $src}",
2997 [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>, VEX;
2999 def MOVSS2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
3000 "movd\t{$src, $dst|$dst, $src}",
3001 [(set GR32:$dst, (bitconvert FR32:$src))]>;
3002 def MOVSS2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
3003 "movd\t{$src, $dst|$dst, $src}",
3004 [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>;
3006 // movd / movq to XMM register zero-extends
3007 let AddedComplexity = 15, isAsmParserOnly = 1 in {
3008 def VMOVZDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
3009 "movd\t{$src, $dst|$dst, $src}",
3010 [(set VR128:$dst, (v4i32 (X86vzmovl
3011 (v4i32 (scalar_to_vector GR32:$src)))))]>,
3013 def VMOVZQI2PQIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3014 "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
3015 [(set VR128:$dst, (v2i64 (X86vzmovl
3016 (v2i64 (scalar_to_vector GR64:$src)))))]>,
3019 let AddedComplexity = 15 in {
3020 def MOVZDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
3021 "movd\t{$src, $dst|$dst, $src}",
3022 [(set VR128:$dst, (v4i32 (X86vzmovl
3023 (v4i32 (scalar_to_vector GR32:$src)))))]>;
3024 def MOVZQI2PQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3025 "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
3026 [(set VR128:$dst, (v2i64 (X86vzmovl
3027 (v2i64 (scalar_to_vector GR64:$src)))))]>;
3030 let AddedComplexity = 20 in {
3031 let isAsmParserOnly = 1 in
3032 def VMOVZDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
3033 "movd\t{$src, $dst|$dst, $src}",
3035 (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
3036 (loadi32 addr:$src))))))]>,
3038 def MOVZDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
3039 "movd\t{$src, $dst|$dst, $src}",
3041 (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
3042 (loadi32 addr:$src))))))]>;
3044 def : Pat<(v4i32 (X86vzmovl (loadv4i32 addr:$src))),
3045 (MOVZDI2PDIrm addr:$src)>;
3046 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
3047 (MOVZDI2PDIrm addr:$src)>;
3048 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
3049 (MOVZDI2PDIrm addr:$src)>;
3052 //===---------------------------------------------------------------------===//
3053 // SSE2 - Move Quadword
3054 //===---------------------------------------------------------------------===//
3056 // Move Quadword Int to Packed Quadword Int
3057 let isAsmParserOnly = 1 in
3058 def VMOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3059 "vmovq\t{$src, $dst|$dst, $src}",
3061 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
3062 VEX, Requires<[HasAVX, HasSSE2]>;
3063 def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3064 "movq\t{$src, $dst|$dst, $src}",
3066 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
3067 Requires<[HasSSE2]>; // SSE2 instruction with XS Prefix
3069 // Move Packed Quadword Int to Quadword Int
3070 let isAsmParserOnly = 1 in
3071 def VMOVPQI2QImr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3072 "movq\t{$src, $dst|$dst, $src}",
3073 [(store (i64 (vector_extract (v2i64 VR128:$src),
3074 (iPTR 0))), addr:$dst)]>, VEX;
3075 def MOVPQI2QImr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3076 "movq\t{$src, $dst|$dst, $src}",
3077 [(store (i64 (vector_extract (v2i64 VR128:$src),
3078 (iPTR 0))), addr:$dst)]>;
3080 def : Pat<(f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
3081 (f64 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
3083 // Store / copy lower 64-bits of a XMM register.
3084 let isAsmParserOnly = 1 in
3085 def VMOVLQ128mr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3086 "movq\t{$src, $dst|$dst, $src}",
3087 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>, VEX;
3088 def MOVLQ128mr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3089 "movq\t{$src, $dst|$dst, $src}",
3090 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>;
3092 let AddedComplexity = 20, isAsmParserOnly = 1 in
3093 def VMOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3094 "vmovq\t{$src, $dst|$dst, $src}",
3096 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
3097 (loadi64 addr:$src))))))]>,
3098 XS, VEX, Requires<[HasAVX, HasSSE2]>;
3100 let AddedComplexity = 20 in {
3101 def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3102 "movq\t{$src, $dst|$dst, $src}",
3104 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
3105 (loadi64 addr:$src))))))]>,
3106 XS, Requires<[HasSSE2]>;
3108 def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
3109 (MOVZQI2PQIrm addr:$src)>;
3110 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4f32 addr:$src)))),
3111 (MOVZQI2PQIrm addr:$src)>;
3112 def : Pat<(v2i64 (X86vzload addr:$src)), (MOVZQI2PQIrm addr:$src)>;
3115 // Moving from XMM to XMM and clear upper 64 bits. Note, there is a bug in
3116 // IA32 document. movq xmm1, xmm2 does clear the high bits.
3117 let isAsmParserOnly = 1, AddedComplexity = 15 in
3118 def VMOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3119 "vmovq\t{$src, $dst|$dst, $src}",
3120 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
3121 XS, VEX, Requires<[HasAVX, HasSSE2]>;
3122 let AddedComplexity = 15 in
3123 def MOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3124 "movq\t{$src, $dst|$dst, $src}",
3125 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
3126 XS, Requires<[HasSSE2]>;
3128 let AddedComplexity = 20, isAsmParserOnly = 1 in
3129 def VMOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3130 "vmovq\t{$src, $dst|$dst, $src}",
3131 [(set VR128:$dst, (v2i64 (X86vzmovl
3132 (loadv2i64 addr:$src))))]>,
3133 XS, VEX, Requires<[HasAVX, HasSSE2]>;
3134 let AddedComplexity = 20 in {
3135 def MOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3136 "movq\t{$src, $dst|$dst, $src}",
3137 [(set VR128:$dst, (v2i64 (X86vzmovl
3138 (loadv2i64 addr:$src))))]>,
3139 XS, Requires<[HasSSE2]>;
3141 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4i32 addr:$src)))),
3142 (MOVZPQILo2PQIrm addr:$src)>;
3145 // Instructions to match in the assembler
3146 let isAsmParserOnly = 1 in {
3147 // This instructions is in fact an alias to movd with 64 bit dst
3148 def VMOVQs64rr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3149 "movq\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
3150 def VMOVQd64rr : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
3151 "movq\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
3154 // Instructions for the disassembler
3155 // xr = XMM register
3158 let isAsmParserOnly = 1 in
3159 def VMOVQxrxr: I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3160 "vmovq\t{$src, $dst|$dst, $src}", []>, VEX, XS;
3161 def MOVQxrxr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3162 "movq\t{$src, $dst|$dst, $src}", []>, XS;
3164 //===---------------------------------------------------------------------===//
3165 // SSE2 - Misc Instructions
3166 //===---------------------------------------------------------------------===//
3169 def CLFLUSH : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
3170 "clflush\t$src", [(int_x86_sse2_clflush addr:$src)]>,
3171 TB, Requires<[HasSSE2]>;
3173 // Load, store, and memory fence
3174 def LFENCE : I<0xAE, MRM_E8, (outs), (ins),
3175 "lfence", [(int_x86_sse2_lfence)]>, TB, Requires<[HasSSE2]>;
3176 def MFENCE : I<0xAE, MRM_F0, (outs), (ins),
3177 "mfence", [(int_x86_sse2_mfence)]>, TB, Requires<[HasSSE2]>;
3179 // Pause. This "instruction" is encoded as "rep; nop", so even though it
3180 // was introduced with SSE2, it's backward compatible.
3181 def PAUSE : I<0x90, RawFrm, (outs), (ins), "pause", []>, REP;
3183 //TODO: custom lower this so as to never even generate the noop
3184 def : Pat<(membarrier (i8 imm), (i8 imm), (i8 imm), (i8 imm),
3186 def : Pat<(membarrier (i8 0), (i8 0), (i8 0), (i8 1), (i8 1)), (SFENCE)>;
3187 def : Pat<(membarrier (i8 1), (i8 0), (i8 0), (i8 0), (i8 1)), (LFENCE)>;
3188 def : Pat<(membarrier (i8 imm), (i8 imm), (i8 imm), (i8 imm),
3191 // Alias instructions that map zero vector to pxor / xorp* for sse.
3192 // We set canFoldAsLoad because this can be converted to a constant-pool
3193 // load of an all-ones value if folding it would be beneficial.
3194 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
3195 isCodeGenOnly = 1, ExeDomain = SSEPackedInt in
3196 // FIXME: Change encoding to pseudo.
3197 def V_SETALLONES : PDI<0x76, MRMInitReg, (outs VR128:$dst), (ins), "",
3198 [(set VR128:$dst, (v4i32 immAllOnesV))]>;
3200 //===---------------------------------------------------------------------===//
3201 // SSE3 - Conversion Instructions
3202 //===---------------------------------------------------------------------===//
3204 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE3] in {
3205 def VCVTPD2DQrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3206 "vcvtpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
3207 def VCVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3208 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3209 def VCVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3210 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3213 def CVTPD2DQrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3214 "cvtpd2dq\t{$src, $dst|$dst, $src}", []>;
3215 def CVTPD2DQrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3216 "cvtpd2dq\t{$src, $dst|$dst, $src}", []>;
3217 def CVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3218 "cvtdq2pd\t{$src, $dst|$dst, $src}", []>;
3219 def CVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3220 "cvtdq2pd\t{$src, $dst|$dst, $src}", []>;
3222 //===---------------------------------------------------------------------===//
3223 // SSE3 - Move Instructions
3224 //===---------------------------------------------------------------------===//
3226 // Replicate Single FP
3227 multiclass sse3_replicate_sfp<bits<8> op, PatFrag rep_frag, string OpcodeStr> {
3228 def rr : S3SI<op, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3229 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3230 [(set VR128:$dst, (v4f32 (rep_frag
3231 VR128:$src, (undef))))]>;
3232 def rm : S3SI<op, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3233 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3234 [(set VR128:$dst, (rep_frag
3235 (memopv4f32 addr:$src), (undef)))]>;
3238 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE3] in {
3239 defm VMOVSHDUP : sse3_replicate_sfp<0x16, movshdup, "vmovshdup">, VEX;
3240 defm VMOVSLDUP : sse3_replicate_sfp<0x12, movsldup, "vmovsldup">, VEX;
3242 defm MOVSHDUP : sse3_replicate_sfp<0x16, movshdup, "movshdup">;
3243 defm MOVSLDUP : sse3_replicate_sfp<0x12, movsldup, "movsldup">;
3245 // Replicate Double FP
3246 multiclass sse3_replicate_dfp<string OpcodeStr> {
3247 def rr : S3DI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3248 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3249 [(set VR128:$dst,(v2f64 (movddup VR128:$src, (undef))))]>;
3250 def rm : S3DI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
3251 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3253 (v2f64 (movddup (scalar_to_vector (loadf64 addr:$src)),
3257 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE3] in
3258 defm VMOVDDUP : sse3_replicate_dfp<"vmovddup">, VEX;
3259 defm MOVDDUP : sse3_replicate_dfp<"movddup">;
3261 // Move Unaligned Integer
3262 let isAsmParserOnly = 1 in
3263 def VLDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3264 "vlddqu\t{$src, $dst|$dst, $src}",
3265 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>, VEX;
3266 def LDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3267 "lddqu\t{$src, $dst|$dst, $src}",
3268 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>;
3270 def : Pat<(movddup (bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src)))),
3272 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3274 // Several Move patterns
3275 let AddedComplexity = 5 in {
3276 def : Pat<(movddup (memopv2f64 addr:$src), (undef)),
3277 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3278 def : Pat<(movddup (bc_v4f32 (memopv2f64 addr:$src)), (undef)),
3279 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3280 def : Pat<(movddup (memopv2i64 addr:$src), (undef)),
3281 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3282 def : Pat<(movddup (bc_v4i32 (memopv2i64 addr:$src)), (undef)),
3283 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3286 // vector_shuffle v1, <undef> <1, 1, 3, 3>
3287 let AddedComplexity = 15 in
3288 def : Pat<(v4i32 (movshdup VR128:$src, (undef))),
3289 (MOVSHDUPrr VR128:$src)>, Requires<[HasSSE3]>;
3290 let AddedComplexity = 20 in
3291 def : Pat<(v4i32 (movshdup (bc_v4i32 (memopv2i64 addr:$src)), (undef))),
3292 (MOVSHDUPrm addr:$src)>, Requires<[HasSSE3]>;
3294 // vector_shuffle v1, <undef> <0, 0, 2, 2>
3295 let AddedComplexity = 15 in
3296 def : Pat<(v4i32 (movsldup VR128:$src, (undef))),
3297 (MOVSLDUPrr VR128:$src)>, Requires<[HasSSE3]>;
3298 let AddedComplexity = 20 in
3299 def : Pat<(v4i32 (movsldup (bc_v4i32 (memopv2i64 addr:$src)), (undef))),
3300 (MOVSLDUPrm addr:$src)>, Requires<[HasSSE3]>;
3302 //===---------------------------------------------------------------------===//
3303 // SSE3 - Arithmetic
3304 //===---------------------------------------------------------------------===//
3306 multiclass sse3_addsub<Intrinsic Int, string OpcodeStr, bit Is2Addr = 1> {
3307 def rr : I<0xD0, MRMSrcReg,
3308 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
3310 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3311 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3312 [(set VR128:$dst, (Int VR128:$src1,
3314 def rm : I<0xD0, MRMSrcMem,
3315 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
3317 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3318 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3319 [(set VR128:$dst, (Int VR128:$src1,
3320 (memop addr:$src2)))]>;
3324 let isAsmParserOnly = 1, Predicates = [HasSSE3, HasAVX],
3325 ExeDomain = SSEPackedDouble in {
3326 defm VADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "vaddsubps", 0>, XD,
3328 defm VADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "vaddsubpd", 0>, OpSize,
3331 let Constraints = "$src1 = $dst", Predicates = [HasSSE3],
3332 ExeDomain = SSEPackedDouble in {
3333 defm ADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "addsubps">, XD;
3334 defm ADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "addsubpd">, TB, OpSize;
3337 //===---------------------------------------------------------------------===//
3338 // SSE3 Instructions
3339 //===---------------------------------------------------------------------===//
3342 class S3D_Intrr<bits<8> o, string OpcodeStr, Intrinsic IntId, bit Is2Addr = 1>
3343 : S3DI<o, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
3345 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3346 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3347 [(set VR128:$dst, (v4f32 (IntId VR128:$src1, VR128:$src2)))]>;
3348 class S3D_Intrm<bits<8> o, string OpcodeStr, Intrinsic IntId, bit Is2Addr = 1>
3349 : S3DI<o, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
3351 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3352 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3353 [(set VR128:$dst, (v4f32 (IntId VR128:$src1, (memop addr:$src2))))]>;
3354 class S3_Intrr<bits<8> o, string OpcodeStr, Intrinsic IntId, bit Is2Addr = 1>
3355 : S3I<o, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
3357 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3358 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3359 [(set VR128:$dst, (v2f64 (IntId VR128:$src1, VR128:$src2)))]>;
3360 class S3_Intrm<bits<8> o, string OpcodeStr, Intrinsic IntId, bit Is2Addr = 1>
3361 : S3I<o, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
3363 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3364 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3365 [(set VR128:$dst, (v2f64 (IntId VR128:$src1, (memopv2f64 addr:$src2))))]>;
3367 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE3] in {
3368 def VHADDPSrr : S3D_Intrr<0x7C, "vhaddps", int_x86_sse3_hadd_ps, 0>, VEX_4V;
3369 def VHADDPSrm : S3D_Intrm<0x7C, "vhaddps", int_x86_sse3_hadd_ps, 0>, VEX_4V;
3370 def VHADDPDrr : S3_Intrr <0x7C, "vhaddpd", int_x86_sse3_hadd_pd, 0>, VEX_4V;
3371 def VHADDPDrm : S3_Intrm <0x7C, "vhaddpd", int_x86_sse3_hadd_pd, 0>, VEX_4V;
3372 def VHSUBPSrr : S3D_Intrr<0x7D, "vhsubps", int_x86_sse3_hsub_ps, 0>, VEX_4V;
3373 def VHSUBPSrm : S3D_Intrm<0x7D, "vhsubps", int_x86_sse3_hsub_ps, 0>, VEX_4V;
3374 def VHSUBPDrr : S3_Intrr <0x7D, "vhsubpd", int_x86_sse3_hsub_pd, 0>, VEX_4V;
3375 def VHSUBPDrm : S3_Intrm <0x7D, "vhsubpd", int_x86_sse3_hsub_pd, 0>, VEX_4V;
3378 let Constraints = "$src1 = $dst" in {
3379 def HADDPSrr : S3D_Intrr<0x7C, "haddps", int_x86_sse3_hadd_ps>;
3380 def HADDPSrm : S3D_Intrm<0x7C, "haddps", int_x86_sse3_hadd_ps>;
3381 def HADDPDrr : S3_Intrr <0x7C, "haddpd", int_x86_sse3_hadd_pd>;
3382 def HADDPDrm : S3_Intrm <0x7C, "haddpd", int_x86_sse3_hadd_pd>;
3383 def HSUBPSrr : S3D_Intrr<0x7D, "hsubps", int_x86_sse3_hsub_ps>;
3384 def HSUBPSrm : S3D_Intrm<0x7D, "hsubps", int_x86_sse3_hsub_ps>;
3385 def HSUBPDrr : S3_Intrr <0x7D, "hsubpd", int_x86_sse3_hsub_pd>;
3386 def HSUBPDrm : S3_Intrm <0x7D, "hsubpd", int_x86_sse3_hsub_pd>;
3389 //===---------------------------------------------------------------------===//
3390 // SSSE3 - Packed Absolute Instructions
3391 //===---------------------------------------------------------------------===//
3393 /// SS3I_unop_rm_int - Simple SSSE3 unary op whose type can be v*{i8,i16,i32}.
3394 multiclass SS3I_unop_rm_int<bits<8> opc, string OpcodeStr,
3395 PatFrag mem_frag64, PatFrag mem_frag128,
3396 Intrinsic IntId64, Intrinsic IntId128> {
3397 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst), (ins VR64:$src),
3398 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3399 [(set VR64:$dst, (IntId64 VR64:$src))]>;
3401 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst), (ins i64mem:$src),
3402 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3404 (IntId64 (bitconvert (mem_frag64 addr:$src))))]>;
3406 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
3408 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3409 [(set VR128:$dst, (IntId128 VR128:$src))]>,
3412 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
3414 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3417 (bitconvert (mem_frag128 addr:$src))))]>, OpSize;
3420 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE3] in {
3421 defm VPABSB : SS3I_unop_rm_int<0x1C, "vpabsb", memopv8i8, memopv16i8,
3422 int_x86_ssse3_pabs_b,
3423 int_x86_ssse3_pabs_b_128>, VEX;
3424 defm VPABSW : SS3I_unop_rm_int<0x1D, "vpabsw", memopv4i16, memopv8i16,
3425 int_x86_ssse3_pabs_w,
3426 int_x86_ssse3_pabs_w_128>, VEX;
3427 defm VPABSD : SS3I_unop_rm_int<0x1E, "vpabsd", memopv2i32, memopv4i32,
3428 int_x86_ssse3_pabs_d,
3429 int_x86_ssse3_pabs_d_128>, VEX;
3432 defm PABSB : SS3I_unop_rm_int<0x1C, "pabsb", memopv8i8, memopv16i8,
3433 int_x86_ssse3_pabs_b,
3434 int_x86_ssse3_pabs_b_128>;
3435 defm PABSW : SS3I_unop_rm_int<0x1D, "pabsw", memopv4i16, memopv8i16,
3436 int_x86_ssse3_pabs_w,
3437 int_x86_ssse3_pabs_w_128>;
3438 defm PABSD : SS3I_unop_rm_int<0x1E, "pabsd", memopv2i32, memopv4i32,
3439 int_x86_ssse3_pabs_d,
3440 int_x86_ssse3_pabs_d_128>;
3442 //===---------------------------------------------------------------------===//
3443 // SSSE3 - Packed Binary Operator Instructions
3444 //===---------------------------------------------------------------------===//
3446 /// SS3I_binop_rm_int - Simple SSSE3 bin op whose type can be v*{i8,i16,i32}.
3447 multiclass SS3I_binop_rm_int<bits<8> opc, string OpcodeStr,
3448 PatFrag mem_frag64, PatFrag mem_frag128,
3449 Intrinsic IntId64, Intrinsic IntId128,
3451 let isCommutable = 1 in
3452 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
3453 (ins VR64:$src1, VR64:$src2),
3455 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3456 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3457 [(set VR64:$dst, (IntId64 VR64:$src1, VR64:$src2))]>;
3458 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
3459 (ins VR64:$src1, i64mem:$src2),
3461 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3462 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3464 (IntId64 VR64:$src1,
3465 (bitconvert (memopv8i8 addr:$src2))))]>;
3467 let isCommutable = 1 in
3468 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
3469 (ins VR128:$src1, VR128:$src2),
3471 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3472 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3473 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
3475 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
3476 (ins VR128:$src1, i128mem:$src2),
3478 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3479 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3481 (IntId128 VR128:$src1,
3482 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
3485 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE3] in {
3486 let isCommutable = 0 in {
3487 defm VPHADDW : SS3I_binop_rm_int<0x01, "vphaddw", memopv4i16, memopv8i16,
3488 int_x86_ssse3_phadd_w,
3489 int_x86_ssse3_phadd_w_128, 0>, VEX_4V;
3490 defm VPHADDD : SS3I_binop_rm_int<0x02, "vphaddd", memopv2i32, memopv4i32,
3491 int_x86_ssse3_phadd_d,
3492 int_x86_ssse3_phadd_d_128, 0>, VEX_4V;
3493 defm VPHADDSW : SS3I_binop_rm_int<0x03, "vphaddsw", memopv4i16, memopv8i16,
3494 int_x86_ssse3_phadd_sw,
3495 int_x86_ssse3_phadd_sw_128, 0>, VEX_4V;
3496 defm VPHSUBW : SS3I_binop_rm_int<0x05, "vphsubw", memopv4i16, memopv8i16,
3497 int_x86_ssse3_phsub_w,
3498 int_x86_ssse3_phsub_w_128, 0>, VEX_4V;
3499 defm VPHSUBD : SS3I_binop_rm_int<0x06, "vphsubd", memopv2i32, memopv4i32,
3500 int_x86_ssse3_phsub_d,
3501 int_x86_ssse3_phsub_d_128, 0>, VEX_4V;
3502 defm VPHSUBSW : SS3I_binop_rm_int<0x07, "vphsubsw", memopv4i16, memopv8i16,
3503 int_x86_ssse3_phsub_sw,
3504 int_x86_ssse3_phsub_sw_128, 0>, VEX_4V;
3505 defm VPMADDUBSW : SS3I_binop_rm_int<0x04, "vpmaddubsw", memopv8i8, memopv16i8,
3506 int_x86_ssse3_pmadd_ub_sw,
3507 int_x86_ssse3_pmadd_ub_sw_128, 0>, VEX_4V;
3508 defm VPSHUFB : SS3I_binop_rm_int<0x00, "vpshufb", memopv8i8, memopv16i8,
3509 int_x86_ssse3_pshuf_b,
3510 int_x86_ssse3_pshuf_b_128, 0>, VEX_4V;
3511 defm VPSIGNB : SS3I_binop_rm_int<0x08, "vpsignb", memopv8i8, memopv16i8,
3512 int_x86_ssse3_psign_b,
3513 int_x86_ssse3_psign_b_128, 0>, VEX_4V;
3514 defm VPSIGNW : SS3I_binop_rm_int<0x09, "vpsignw", memopv4i16, memopv8i16,
3515 int_x86_ssse3_psign_w,
3516 int_x86_ssse3_psign_w_128, 0>, VEX_4V;
3517 defm VPSIGND : SS3I_binop_rm_int<0x0A, "vpsignd", memopv2i32, memopv4i32,
3518 int_x86_ssse3_psign_d,
3519 int_x86_ssse3_psign_d_128, 0>, VEX_4V;
3521 defm VPMULHRSW : SS3I_binop_rm_int<0x0B, "vpmulhrsw", memopv4i16, memopv8i16,
3522 int_x86_ssse3_pmul_hr_sw,
3523 int_x86_ssse3_pmul_hr_sw_128, 0>, VEX_4V;
3526 // None of these have i8 immediate fields.
3527 let ImmT = NoImm, Constraints = "$src1 = $dst" in {
3528 let isCommutable = 0 in {
3529 defm PHADDW : SS3I_binop_rm_int<0x01, "phaddw", memopv4i16, memopv8i16,
3530 int_x86_ssse3_phadd_w,
3531 int_x86_ssse3_phadd_w_128>;
3532 defm PHADDD : SS3I_binop_rm_int<0x02, "phaddd", memopv2i32, memopv4i32,
3533 int_x86_ssse3_phadd_d,
3534 int_x86_ssse3_phadd_d_128>;
3535 defm PHADDSW : SS3I_binop_rm_int<0x03, "phaddsw", memopv4i16, memopv8i16,
3536 int_x86_ssse3_phadd_sw,
3537 int_x86_ssse3_phadd_sw_128>;
3538 defm PHSUBW : SS3I_binop_rm_int<0x05, "phsubw", memopv4i16, memopv8i16,
3539 int_x86_ssse3_phsub_w,
3540 int_x86_ssse3_phsub_w_128>;
3541 defm PHSUBD : SS3I_binop_rm_int<0x06, "phsubd", memopv2i32, memopv4i32,
3542 int_x86_ssse3_phsub_d,
3543 int_x86_ssse3_phsub_d_128>;
3544 defm PHSUBSW : SS3I_binop_rm_int<0x07, "phsubsw", memopv4i16, memopv8i16,
3545 int_x86_ssse3_phsub_sw,
3546 int_x86_ssse3_phsub_sw_128>;
3547 defm PMADDUBSW : SS3I_binop_rm_int<0x04, "pmaddubsw", memopv8i8, memopv16i8,
3548 int_x86_ssse3_pmadd_ub_sw,
3549 int_x86_ssse3_pmadd_ub_sw_128>;
3550 defm PSHUFB : SS3I_binop_rm_int<0x00, "pshufb", memopv8i8, memopv16i8,
3551 int_x86_ssse3_pshuf_b,
3552 int_x86_ssse3_pshuf_b_128>;
3553 defm PSIGNB : SS3I_binop_rm_int<0x08, "psignb", memopv8i8, memopv16i8,
3554 int_x86_ssse3_psign_b,
3555 int_x86_ssse3_psign_b_128>;
3556 defm PSIGNW : SS3I_binop_rm_int<0x09, "psignw", memopv4i16, memopv8i16,
3557 int_x86_ssse3_psign_w,
3558 int_x86_ssse3_psign_w_128>;
3559 defm PSIGND : SS3I_binop_rm_int<0x0A, "psignd", memopv2i32, memopv4i32,
3560 int_x86_ssse3_psign_d,
3561 int_x86_ssse3_psign_d_128>;
3563 defm PMULHRSW : SS3I_binop_rm_int<0x0B, "pmulhrsw", memopv4i16, memopv8i16,
3564 int_x86_ssse3_pmul_hr_sw,
3565 int_x86_ssse3_pmul_hr_sw_128>;
3568 def : Pat<(X86pshufb VR128:$src, VR128:$mask),
3569 (PSHUFBrr128 VR128:$src, VR128:$mask)>, Requires<[HasSSSE3]>;
3570 def : Pat<(X86pshufb VR128:$src, (bc_v16i8 (memopv2i64 addr:$mask))),
3571 (PSHUFBrm128 VR128:$src, addr:$mask)>, Requires<[HasSSSE3]>;
3573 //===---------------------------------------------------------------------===//
3574 // SSSE3 - Packed Align Instruction Patterns
3575 //===---------------------------------------------------------------------===//
3577 multiclass sse3_palign<string asm, bit Is2Addr = 1> {
3578 def R64rr : SS3AI<0x0F, MRMSrcReg, (outs VR64:$dst),
3579 (ins VR64:$src1, VR64:$src2, i8imm:$src3),
3581 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3583 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
3585 def R64rm : SS3AI<0x0F, MRMSrcMem, (outs VR64:$dst),
3586 (ins VR64:$src1, i64mem:$src2, i8imm:$src3),
3588 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3590 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
3593 def R128rr : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
3594 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
3596 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3598 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
3600 def R128rm : SS3AI<0x0F, MRMSrcMem, (outs VR128:$dst),
3601 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
3603 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3605 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
3609 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE3] in
3610 defm VPALIGN : sse3_palign<"vpalignr", 0>, VEX_4V;
3611 let Constraints = "$src1 = $dst" in
3612 defm PALIGN : sse3_palign<"palignr">;
3614 let AddedComplexity = 5 in {
3616 def : Pat<(v1i64 (palign:$src3 VR64:$src1, VR64:$src2)),
3617 (PALIGNR64rr VR64:$src2, VR64:$src1,
3618 (SHUFFLE_get_palign_imm VR64:$src3))>,
3619 Requires<[HasSSSE3]>;
3620 def : Pat<(v2i32 (palign:$src3 VR64:$src1, VR64:$src2)),
3621 (PALIGNR64rr VR64:$src2, VR64:$src1,
3622 (SHUFFLE_get_palign_imm VR64:$src3))>,
3623 Requires<[HasSSSE3]>;
3624 def : Pat<(v4i16 (palign:$src3 VR64:$src1, VR64:$src2)),
3625 (PALIGNR64rr VR64:$src2, VR64:$src1,
3626 (SHUFFLE_get_palign_imm VR64:$src3))>,
3627 Requires<[HasSSSE3]>;
3628 def : Pat<(v8i8 (palign:$src3 VR64:$src1, VR64:$src2)),
3629 (PALIGNR64rr VR64:$src2, VR64:$src1,
3630 (SHUFFLE_get_palign_imm VR64:$src3))>,
3631 Requires<[HasSSSE3]>;
3633 def : Pat<(v4i32 (palign:$src3 VR128:$src1, VR128:$src2)),
3634 (PALIGNR128rr VR128:$src2, VR128:$src1,
3635 (SHUFFLE_get_palign_imm VR128:$src3))>,
3636 Requires<[HasSSSE3]>;
3637 def : Pat<(v4f32 (palign:$src3 VR128:$src1, VR128:$src2)),
3638 (PALIGNR128rr VR128:$src2, VR128:$src1,
3639 (SHUFFLE_get_palign_imm VR128:$src3))>,
3640 Requires<[HasSSSE3]>;
3641 def : Pat<(v8i16 (palign:$src3 VR128:$src1, VR128:$src2)),
3642 (PALIGNR128rr VR128:$src2, VR128:$src1,
3643 (SHUFFLE_get_palign_imm VR128:$src3))>,
3644 Requires<[HasSSSE3]>;
3645 def : Pat<(v16i8 (palign:$src3 VR128:$src1, VR128:$src2)),
3646 (PALIGNR128rr VR128:$src2, VR128:$src1,
3647 (SHUFFLE_get_palign_imm VR128:$src3))>,
3648 Requires<[HasSSSE3]>;
3651 //===---------------------------------------------------------------------===//
3652 // SSSE3 Misc Instructions
3653 //===---------------------------------------------------------------------===//
3655 // Thread synchronization
3656 def MONITOR : I<0x01, MRM_C8, (outs), (ins), "monitor",
3657 [(int_x86_sse3_monitor EAX, ECX, EDX)]>,TB, Requires<[HasSSE3]>;
3658 def MWAIT : I<0x01, MRM_C9, (outs), (ins), "mwait",
3659 [(int_x86_sse3_mwait ECX, EAX)]>, TB, Requires<[HasSSE3]>;
3661 //===---------------------------------------------------------------------===//
3662 // Non-Instruction Patterns
3663 //===---------------------------------------------------------------------===//
3665 // extload f32 -> f64. This matches load+fextend because we have a hack in
3666 // the isel (PreprocessForFPConvert) that can introduce loads after dag
3668 // Since these loads aren't folded into the fextend, we have to match it
3670 let Predicates = [HasSSE2] in
3671 def : Pat<(fextend (loadf32 addr:$src)),
3672 (CVTSS2SDrm addr:$src)>;
3675 let Predicates = [HasSSE2] in {
3676 def : Pat<(v2i64 (bitconvert (v4i32 VR128:$src))), (v2i64 VR128:$src)>;
3677 def : Pat<(v2i64 (bitconvert (v8i16 VR128:$src))), (v2i64 VR128:$src)>;
3678 def : Pat<(v2i64 (bitconvert (v16i8 VR128:$src))), (v2i64 VR128:$src)>;
3679 def : Pat<(v2i64 (bitconvert (v2f64 VR128:$src))), (v2i64 VR128:$src)>;
3680 def : Pat<(v2i64 (bitconvert (v4f32 VR128:$src))), (v2i64 VR128:$src)>;
3681 def : Pat<(v4i32 (bitconvert (v2i64 VR128:$src))), (v4i32 VR128:$src)>;
3682 def : Pat<(v4i32 (bitconvert (v8i16 VR128:$src))), (v4i32 VR128:$src)>;
3683 def : Pat<(v4i32 (bitconvert (v16i8 VR128:$src))), (v4i32 VR128:$src)>;
3684 def : Pat<(v4i32 (bitconvert (v2f64 VR128:$src))), (v4i32 VR128:$src)>;
3685 def : Pat<(v4i32 (bitconvert (v4f32 VR128:$src))), (v4i32 VR128:$src)>;
3686 def : Pat<(v8i16 (bitconvert (v2i64 VR128:$src))), (v8i16 VR128:$src)>;
3687 def : Pat<(v8i16 (bitconvert (v4i32 VR128:$src))), (v8i16 VR128:$src)>;
3688 def : Pat<(v8i16 (bitconvert (v16i8 VR128:$src))), (v8i16 VR128:$src)>;
3689 def : Pat<(v8i16 (bitconvert (v2f64 VR128:$src))), (v8i16 VR128:$src)>;
3690 def : Pat<(v8i16 (bitconvert (v4f32 VR128:$src))), (v8i16 VR128:$src)>;
3691 def : Pat<(v16i8 (bitconvert (v2i64 VR128:$src))), (v16i8 VR128:$src)>;
3692 def : Pat<(v16i8 (bitconvert (v4i32 VR128:$src))), (v16i8 VR128:$src)>;
3693 def : Pat<(v16i8 (bitconvert (v8i16 VR128:$src))), (v16i8 VR128:$src)>;
3694 def : Pat<(v16i8 (bitconvert (v2f64 VR128:$src))), (v16i8 VR128:$src)>;
3695 def : Pat<(v16i8 (bitconvert (v4f32 VR128:$src))), (v16i8 VR128:$src)>;
3696 def : Pat<(v4f32 (bitconvert (v2i64 VR128:$src))), (v4f32 VR128:$src)>;
3697 def : Pat<(v4f32 (bitconvert (v4i32 VR128:$src))), (v4f32 VR128:$src)>;
3698 def : Pat<(v4f32 (bitconvert (v8i16 VR128:$src))), (v4f32 VR128:$src)>;
3699 def : Pat<(v4f32 (bitconvert (v16i8 VR128:$src))), (v4f32 VR128:$src)>;
3700 def : Pat<(v4f32 (bitconvert (v2f64 VR128:$src))), (v4f32 VR128:$src)>;
3701 def : Pat<(v2f64 (bitconvert (v2i64 VR128:$src))), (v2f64 VR128:$src)>;
3702 def : Pat<(v2f64 (bitconvert (v4i32 VR128:$src))), (v2f64 VR128:$src)>;
3703 def : Pat<(v2f64 (bitconvert (v8i16 VR128:$src))), (v2f64 VR128:$src)>;
3704 def : Pat<(v2f64 (bitconvert (v16i8 VR128:$src))), (v2f64 VR128:$src)>;
3705 def : Pat<(v2f64 (bitconvert (v4f32 VR128:$src))), (v2f64 VR128:$src)>;
3708 // Move scalar to XMM zero-extended
3709 // movd to XMM register zero-extends
3710 let AddedComplexity = 15 in {
3711 // Zeroing a VR128 then do a MOVS{S|D} to the lower bits.
3712 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))),
3713 (MOVSDrr (v2f64 (V_SET0PS)), FR64:$src)>;
3714 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),
3715 (MOVSSrr (v4f32 (V_SET0PS)), FR32:$src)>;
3716 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
3717 (MOVSSrr (v4f32 (V_SET0PS)),
3718 (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss)))>;
3719 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
3720 (MOVSSrr (v4i32 (V_SET0PI)),
3721 (EXTRACT_SUBREG (v4i32 VR128:$src), sub_ss))>;
3724 // Splat v2f64 / v2i64
3725 let AddedComplexity = 10 in {
3726 def : Pat<(splat_lo (v2f64 VR128:$src), (undef)),
3727 (UNPCKLPDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3728 def : Pat<(unpckh (v2f64 VR128:$src), (undef)),
3729 (UNPCKHPDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3730 def : Pat<(splat_lo (v2i64 VR128:$src), (undef)),
3731 (PUNPCKLQDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3732 def : Pat<(unpckh (v2i64 VR128:$src), (undef)),
3733 (PUNPCKHQDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3736 // Special unary SHUFPSrri case.
3737 def : Pat<(v4f32 (pshufd:$src3 VR128:$src1, (undef))),
3738 (SHUFPSrri VR128:$src1, VR128:$src1,
3739 (SHUFFLE_get_shuf_imm VR128:$src3))>;
3740 let AddedComplexity = 5 in
3741 def : Pat<(v4f32 (pshufd:$src2 VR128:$src1, (undef))),
3742 (PSHUFDri VR128:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3743 Requires<[HasSSE2]>;
3744 // Special unary SHUFPDrri case.
3745 def : Pat<(v2i64 (pshufd:$src3 VR128:$src1, (undef))),
3746 (SHUFPDrri VR128:$src1, VR128:$src1,
3747 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3748 Requires<[HasSSE2]>;
3749 // Special unary SHUFPDrri case.
3750 def : Pat<(v2f64 (pshufd:$src3 VR128:$src1, (undef))),
3751 (SHUFPDrri VR128:$src1, VR128:$src1,
3752 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3753 Requires<[HasSSE2]>;
3754 // Unary v4f32 shuffle with PSHUF* in order to fold a load.
3755 def : Pat<(pshufd:$src2 (bc_v4i32 (memopv4f32 addr:$src1)), (undef)),
3756 (PSHUFDmi addr:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3757 Requires<[HasSSE2]>;
3759 // Special binary v4i32 shuffle cases with SHUFPS.
3760 def : Pat<(v4i32 (shufp:$src3 VR128:$src1, (v4i32 VR128:$src2))),
3761 (SHUFPSrri VR128:$src1, VR128:$src2,
3762 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3763 Requires<[HasSSE2]>;
3764 def : Pat<(v4i32 (shufp:$src3 VR128:$src1, (bc_v4i32 (memopv2i64 addr:$src2)))),
3765 (SHUFPSrmi VR128:$src1, addr:$src2,
3766 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3767 Requires<[HasSSE2]>;
3768 // Special binary v2i64 shuffle cases using SHUFPDrri.
3769 def : Pat<(v2i64 (shufp:$src3 VR128:$src1, VR128:$src2)),
3770 (SHUFPDrri VR128:$src1, VR128:$src2,
3771 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3772 Requires<[HasSSE2]>;
3774 // vector_shuffle v1, <undef>, <0, 0, 1, 1, ...>
3775 let AddedComplexity = 15 in {
3776 def : Pat<(v4i32 (unpckl_undef:$src2 VR128:$src, (undef))),
3777 (PSHUFDri VR128:$src, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3778 Requires<[OptForSpeed, HasSSE2]>;
3779 def : Pat<(v4f32 (unpckl_undef:$src2 VR128:$src, (undef))),
3780 (PSHUFDri VR128:$src, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3781 Requires<[OptForSpeed, HasSSE2]>;
3783 let AddedComplexity = 10 in {
3784 def : Pat<(v4f32 (unpckl_undef VR128:$src, (undef))),
3785 (UNPCKLPSrr VR128:$src, VR128:$src)>;
3786 def : Pat<(v16i8 (unpckl_undef VR128:$src, (undef))),
3787 (PUNPCKLBWrr VR128:$src, VR128:$src)>;
3788 def : Pat<(v8i16 (unpckl_undef VR128:$src, (undef))),
3789 (PUNPCKLWDrr VR128:$src, VR128:$src)>;
3790 def : Pat<(v4i32 (unpckl_undef VR128:$src, (undef))),
3791 (PUNPCKLDQrr VR128:$src, VR128:$src)>;
3794 // vector_shuffle v1, <undef>, <2, 2, 3, 3, ...>
3795 let AddedComplexity = 15 in {
3796 def : Pat<(v4i32 (unpckh_undef:$src2 VR128:$src, (undef))),
3797 (PSHUFDri VR128:$src, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3798 Requires<[OptForSpeed, HasSSE2]>;
3799 def : Pat<(v4f32 (unpckh_undef:$src2 VR128:$src, (undef))),
3800 (PSHUFDri VR128:$src, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3801 Requires<[OptForSpeed, HasSSE2]>;
3803 let AddedComplexity = 10 in {
3804 def : Pat<(v4f32 (unpckh_undef VR128:$src, (undef))),
3805 (UNPCKHPSrr VR128:$src, VR128:$src)>;
3806 def : Pat<(v16i8 (unpckh_undef VR128:$src, (undef))),
3807 (PUNPCKHBWrr VR128:$src, VR128:$src)>;
3808 def : Pat<(v8i16 (unpckh_undef VR128:$src, (undef))),
3809 (PUNPCKHWDrr VR128:$src, VR128:$src)>;
3810 def : Pat<(v4i32 (unpckh_undef VR128:$src, (undef))),
3811 (PUNPCKHDQrr VR128:$src, VR128:$src)>;
3814 let AddedComplexity = 20 in {
3815 // vector_shuffle v1, v2 <0, 1, 4, 5> using MOVLHPS
3816 def : Pat<(v4i32 (movlhps VR128:$src1, VR128:$src2)),
3817 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
3819 // vector_shuffle v1, v2 <6, 7, 2, 3> using MOVHLPS
3820 def : Pat<(v4i32 (movhlps VR128:$src1, VR128:$src2)),
3821 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
3823 // vector_shuffle v1, undef <2, ?, ?, ?> using MOVHLPS
3824 def : Pat<(v4f32 (movhlps_undef VR128:$src1, (undef))),
3825 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
3826 def : Pat<(v4i32 (movhlps_undef VR128:$src1, (undef))),
3827 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
3830 let AddedComplexity = 20 in {
3831 // vector_shuffle v1, (load v2) <4, 5, 2, 3> using MOVLPS
3832 def : Pat<(v4f32 (movlp VR128:$src1, (load addr:$src2))),
3833 (MOVLPSrm VR128:$src1, addr:$src2)>;
3834 def : Pat<(v2f64 (movlp VR128:$src1, (load addr:$src2))),
3835 (MOVLPDrm VR128:$src1, addr:$src2)>;
3836 def : Pat<(v4i32 (movlp VR128:$src1, (load addr:$src2))),
3837 (MOVLPSrm VR128:$src1, addr:$src2)>;
3838 def : Pat<(v2i64 (movlp VR128:$src1, (load addr:$src2))),
3839 (MOVLPDrm VR128:$src1, addr:$src2)>;
3842 // (store (vector_shuffle (load addr), v2, <4, 5, 2, 3>), addr) using MOVLPS
3843 def : Pat<(store (v4f32 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
3844 (MOVLPSmr addr:$src1, VR128:$src2)>;
3845 def : Pat<(store (v2f64 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
3846 (MOVLPDmr addr:$src1, VR128:$src2)>;
3847 def : Pat<(store (v4i32 (movlp (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)),
3849 (MOVLPSmr addr:$src1, VR128:$src2)>;
3850 def : Pat<(store (v2i64 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
3851 (MOVLPDmr addr:$src1, VR128:$src2)>;
3853 let AddedComplexity = 15 in {
3854 // Setting the lowest element in the vector.
3855 def : Pat<(v4i32 (movl VR128:$src1, VR128:$src2)),
3856 (MOVSSrr (v4i32 VR128:$src1),
3857 (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_ss))>;
3858 def : Pat<(v2i64 (movl VR128:$src1, VR128:$src2)),
3859 (MOVSDrr (v2i64 VR128:$src1),
3860 (EXTRACT_SUBREG (v2i64 VR128:$src2), sub_sd))>;
3862 // vector_shuffle v1, v2 <4, 5, 2, 3> using movsd
3863 def : Pat<(v4f32 (movlp VR128:$src1, VR128:$src2)),
3864 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, sub_sd))>,
3865 Requires<[HasSSE2]>;
3866 def : Pat<(v4i32 (movlp VR128:$src1, VR128:$src2)),
3867 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, sub_sd))>,
3868 Requires<[HasSSE2]>;
3871 // vector_shuffle v1, v2 <4, 5, 2, 3> using SHUFPSrri (we prefer movsd, but
3872 // fall back to this for SSE1)
3873 def : Pat<(v4f32 (movlp:$src3 VR128:$src1, (v4f32 VR128:$src2))),
3874 (SHUFPSrri VR128:$src2, VR128:$src1,
3875 (SHUFFLE_get_shuf_imm VR128:$src3))>;
3877 // Set lowest element and zero upper elements.
3878 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
3879 (MOVZPQILo2PQIrr VR128:$src)>, Requires<[HasSSE2]>;
3881 // Some special case pandn patterns.
3882 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v4i32 immAllOnesV))),
3884 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3885 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v8i16 immAllOnesV))),
3887 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3888 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v16i8 immAllOnesV))),
3890 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3892 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v4i32 immAllOnesV))),
3893 (memop addr:$src2))),
3894 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
3895 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v8i16 immAllOnesV))),
3896 (memop addr:$src2))),
3897 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
3898 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v16i8 immAllOnesV))),
3899 (memop addr:$src2))),
3900 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
3902 // vector -> vector casts
3903 def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
3904 (Int_CVTDQ2PSrr VR128:$src)>, Requires<[HasSSE2]>;
3905 def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
3906 (Int_CVTTPS2DQrr VR128:$src)>, Requires<[HasSSE2]>;
3907 def : Pat<(v2f64 (sint_to_fp (v2i32 VR64:$src))),
3908 (Int_CVTPI2PDrr VR64:$src)>, Requires<[HasSSE2]>;
3909 def : Pat<(v2i32 (fp_to_sint (v2f64 VR128:$src))),
3910 (Int_CVTTPD2PIrr VR128:$src)>, Requires<[HasSSE2]>;
3912 // Use movaps / movups for SSE integer load / store (one byte shorter).
3913 def : Pat<(alignedloadv4i32 addr:$src),
3914 (MOVAPSrm addr:$src)>;
3915 def : Pat<(loadv4i32 addr:$src),
3916 (MOVUPSrm addr:$src)>;
3917 def : Pat<(alignedloadv2i64 addr:$src),
3918 (MOVAPSrm addr:$src)>;
3919 def : Pat<(loadv2i64 addr:$src),
3920 (MOVUPSrm addr:$src)>;
3922 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
3923 (MOVAPSmr addr:$dst, VR128:$src)>;
3924 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
3925 (MOVAPSmr addr:$dst, VR128:$src)>;
3926 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
3927 (MOVAPSmr addr:$dst, VR128:$src)>;
3928 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
3929 (MOVAPSmr addr:$dst, VR128:$src)>;
3930 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
3931 (MOVUPSmr addr:$dst, VR128:$src)>;
3932 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
3933 (MOVUPSmr addr:$dst, VR128:$src)>;
3934 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
3935 (MOVUPSmr addr:$dst, VR128:$src)>;
3936 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
3937 (MOVUPSmr addr:$dst, VR128:$src)>;
3939 //===----------------------------------------------------------------------===//
3940 // SSE4.1 - Packed Move with Sign/Zero Extend
3941 //===----------------------------------------------------------------------===//
3943 multiclass SS41I_binop_rm_int8<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
3944 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3945 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3946 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
3948 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3949 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3951 (IntId (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))]>,
3955 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in {
3956 defm VPMOVSXBW : SS41I_binop_rm_int8<0x20, "vpmovsxbw", int_x86_sse41_pmovsxbw>,
3958 defm VPMOVSXWD : SS41I_binop_rm_int8<0x23, "vpmovsxwd", int_x86_sse41_pmovsxwd>,
3960 defm VPMOVSXDQ : SS41I_binop_rm_int8<0x25, "vpmovsxdq", int_x86_sse41_pmovsxdq>,
3962 defm VPMOVZXBW : SS41I_binop_rm_int8<0x30, "vpmovzxbw", int_x86_sse41_pmovzxbw>,
3964 defm VPMOVZXWD : SS41I_binop_rm_int8<0x33, "vpmovzxwd", int_x86_sse41_pmovzxwd>,
3966 defm VPMOVZXDQ : SS41I_binop_rm_int8<0x35, "vpmovzxdq", int_x86_sse41_pmovzxdq>,
3970 defm PMOVSXBW : SS41I_binop_rm_int8<0x20, "pmovsxbw", int_x86_sse41_pmovsxbw>;
3971 defm PMOVSXWD : SS41I_binop_rm_int8<0x23, "pmovsxwd", int_x86_sse41_pmovsxwd>;
3972 defm PMOVSXDQ : SS41I_binop_rm_int8<0x25, "pmovsxdq", int_x86_sse41_pmovsxdq>;
3973 defm PMOVZXBW : SS41I_binop_rm_int8<0x30, "pmovzxbw", int_x86_sse41_pmovzxbw>;
3974 defm PMOVZXWD : SS41I_binop_rm_int8<0x33, "pmovzxwd", int_x86_sse41_pmovzxwd>;
3975 defm PMOVZXDQ : SS41I_binop_rm_int8<0x35, "pmovzxdq", int_x86_sse41_pmovzxdq>;
3977 // Common patterns involving scalar load.
3978 def : Pat<(int_x86_sse41_pmovsxbw (vzmovl_v2i64 addr:$src)),
3979 (PMOVSXBWrm addr:$src)>, Requires<[HasSSE41]>;
3980 def : Pat<(int_x86_sse41_pmovsxbw (vzload_v2i64 addr:$src)),
3981 (PMOVSXBWrm addr:$src)>, Requires<[HasSSE41]>;
3983 def : Pat<(int_x86_sse41_pmovsxwd (vzmovl_v2i64 addr:$src)),
3984 (PMOVSXWDrm addr:$src)>, Requires<[HasSSE41]>;
3985 def : Pat<(int_x86_sse41_pmovsxwd (vzload_v2i64 addr:$src)),
3986 (PMOVSXWDrm addr:$src)>, Requires<[HasSSE41]>;
3988 def : Pat<(int_x86_sse41_pmovsxdq (vzmovl_v2i64 addr:$src)),
3989 (PMOVSXDQrm addr:$src)>, Requires<[HasSSE41]>;
3990 def : Pat<(int_x86_sse41_pmovsxdq (vzload_v2i64 addr:$src)),
3991 (PMOVSXDQrm addr:$src)>, Requires<[HasSSE41]>;
3993 def : Pat<(int_x86_sse41_pmovzxbw (vzmovl_v2i64 addr:$src)),
3994 (PMOVZXBWrm addr:$src)>, Requires<[HasSSE41]>;
3995 def : Pat<(int_x86_sse41_pmovzxbw (vzload_v2i64 addr:$src)),
3996 (PMOVZXBWrm addr:$src)>, Requires<[HasSSE41]>;
3998 def : Pat<(int_x86_sse41_pmovzxwd (vzmovl_v2i64 addr:$src)),
3999 (PMOVZXWDrm addr:$src)>, Requires<[HasSSE41]>;
4000 def : Pat<(int_x86_sse41_pmovzxwd (vzload_v2i64 addr:$src)),
4001 (PMOVZXWDrm addr:$src)>, Requires<[HasSSE41]>;
4003 def : Pat<(int_x86_sse41_pmovzxdq (vzmovl_v2i64 addr:$src)),
4004 (PMOVZXDQrm addr:$src)>, Requires<[HasSSE41]>;
4005 def : Pat<(int_x86_sse41_pmovzxdq (vzload_v2i64 addr:$src)),
4006 (PMOVZXDQrm addr:$src)>, Requires<[HasSSE41]>;
4009 multiclass SS41I_binop_rm_int4<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4010 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4011 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4012 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
4014 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
4015 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4017 (IntId (bitconvert (v4i32 (scalar_to_vector (loadi32 addr:$src))))))]>,
4021 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in {
4022 defm VPMOVSXBD : SS41I_binop_rm_int4<0x21, "vpmovsxbd", int_x86_sse41_pmovsxbd>,
4024 defm VPMOVSXWQ : SS41I_binop_rm_int4<0x24, "vpmovsxwq", int_x86_sse41_pmovsxwq>,
4026 defm VPMOVZXBD : SS41I_binop_rm_int4<0x31, "vpmovzxbd", int_x86_sse41_pmovzxbd>,
4028 defm VPMOVZXWQ : SS41I_binop_rm_int4<0x34, "vpmovzxwq", int_x86_sse41_pmovzxwq>,
4032 defm PMOVSXBD : SS41I_binop_rm_int4<0x21, "pmovsxbd", int_x86_sse41_pmovsxbd>;
4033 defm PMOVSXWQ : SS41I_binop_rm_int4<0x24, "pmovsxwq", int_x86_sse41_pmovsxwq>;
4034 defm PMOVZXBD : SS41I_binop_rm_int4<0x31, "pmovzxbd", int_x86_sse41_pmovzxbd>;
4035 defm PMOVZXWQ : SS41I_binop_rm_int4<0x34, "pmovzxwq", int_x86_sse41_pmovzxwq>;
4037 // Common patterns involving scalar load
4038 def : Pat<(int_x86_sse41_pmovsxbd (vzmovl_v4i32 addr:$src)),
4039 (PMOVSXBDrm addr:$src)>, Requires<[HasSSE41]>;
4040 def : Pat<(int_x86_sse41_pmovsxwq (vzmovl_v4i32 addr:$src)),
4041 (PMOVSXWQrm addr:$src)>, Requires<[HasSSE41]>;
4043 def : Pat<(int_x86_sse41_pmovzxbd (vzmovl_v4i32 addr:$src)),
4044 (PMOVZXBDrm addr:$src)>, Requires<[HasSSE41]>;
4045 def : Pat<(int_x86_sse41_pmovzxwq (vzmovl_v4i32 addr:$src)),
4046 (PMOVZXWQrm addr:$src)>, Requires<[HasSSE41]>;
4049 multiclass SS41I_binop_rm_int2<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4050 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4051 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4052 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
4054 // Expecting a i16 load any extended to i32 value.
4055 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i16mem:$src),
4056 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4057 [(set VR128:$dst, (IntId (bitconvert
4058 (v4i32 (scalar_to_vector (loadi16_anyext addr:$src))))))]>,
4062 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in {
4063 defm VPMOVSXBQ : SS41I_binop_rm_int2<0x22, "vpmovsxbq", int_x86_sse41_pmovsxbq>,
4065 defm VPMOVZXBQ : SS41I_binop_rm_int2<0x32, "vpmovzxbq", int_x86_sse41_pmovzxbq>,
4068 defm PMOVSXBQ : SS41I_binop_rm_int2<0x22, "pmovsxbq", int_x86_sse41_pmovsxbq>;
4069 defm PMOVZXBQ : SS41I_binop_rm_int2<0x32, "pmovzxbq", int_x86_sse41_pmovzxbq>;
4071 // Common patterns involving scalar load
4072 def : Pat<(int_x86_sse41_pmovsxbq
4073 (bitconvert (v4i32 (X86vzmovl
4074 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
4075 (PMOVSXBQrm addr:$src)>, Requires<[HasSSE41]>;
4077 def : Pat<(int_x86_sse41_pmovzxbq
4078 (bitconvert (v4i32 (X86vzmovl
4079 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
4080 (PMOVZXBQrm addr:$src)>, Requires<[HasSSE41]>;
4082 //===----------------------------------------------------------------------===//
4083 // SSE4.1 - Extract Instructions
4084 //===----------------------------------------------------------------------===//
4086 /// SS41I_binop_ext8 - SSE 4.1 extract 8 bits to 32 bit reg or 8 bit mem
4087 multiclass SS41I_extract8<bits<8> opc, string OpcodeStr> {
4088 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4089 (ins VR128:$src1, i32i8imm:$src2),
4090 !strconcat(OpcodeStr,
4091 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4092 [(set GR32:$dst, (X86pextrb (v16i8 VR128:$src1), imm:$src2))]>,
4094 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4095 (ins i8mem:$dst, VR128:$src1, i32i8imm:$src2),
4096 !strconcat(OpcodeStr,
4097 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4100 // There's an AssertZext in the way of writing the store pattern
4101 // (store (i8 (trunc (X86pextrb (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
4104 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in
4105 defm VPEXTRB : SS41I_extract8<0x14, "vpextrb">, VEX;
4107 defm PEXTRB : SS41I_extract8<0x14, "pextrb">;
4110 /// SS41I_extract16 - SSE 4.1 extract 16 bits to memory destination
4111 multiclass SS41I_extract16<bits<8> opc, string OpcodeStr> {
4112 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4113 (ins i16mem:$dst, VR128:$src1, i32i8imm:$src2),
4114 !strconcat(OpcodeStr,
4115 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4118 // There's an AssertZext in the way of writing the store pattern
4119 // (store (i16 (trunc (X86pextrw (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
4122 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in
4123 defm VPEXTRW : SS41I_extract16<0x15, "vpextrw">, VEX;
4125 defm PEXTRW : SS41I_extract16<0x15, "pextrw">;
4128 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
4129 multiclass SS41I_extract32<bits<8> opc, string OpcodeStr> {
4130 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4131 (ins VR128:$src1, i32i8imm:$src2),
4132 !strconcat(OpcodeStr,
4133 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4135 (extractelt (v4i32 VR128:$src1), imm:$src2))]>, OpSize;
4136 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4137 (ins i32mem:$dst, VR128:$src1, i32i8imm:$src2),
4138 !strconcat(OpcodeStr,
4139 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4140 [(store (extractelt (v4i32 VR128:$src1), imm:$src2),
4141 addr:$dst)]>, OpSize;
4144 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in
4145 defm VPEXTRD : SS41I_extract32<0x16, "vpextrd">, VEX;
4147 defm PEXTRD : SS41I_extract32<0x16, "pextrd">;
4149 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
4150 multiclass SS41I_extract64<bits<8> opc, string OpcodeStr> {
4151 def rr : SS4AIi8<opc, MRMDestReg, (outs GR64:$dst),
4152 (ins VR128:$src1, i32i8imm:$src2),
4153 !strconcat(OpcodeStr,
4154 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4156 (extractelt (v2i64 VR128:$src1), imm:$src2))]>, OpSize, REX_W;
4157 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4158 (ins i64mem:$dst, VR128:$src1, i32i8imm:$src2),
4159 !strconcat(OpcodeStr,
4160 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4161 [(store (extractelt (v2i64 VR128:$src1), imm:$src2),
4162 addr:$dst)]>, OpSize, REX_W;
4165 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in
4166 defm VPEXTRQ : SS41I_extract64<0x16, "vpextrq">, VEX, VEX_W;
4168 defm PEXTRQ : SS41I_extract64<0x16, "pextrq">;
4170 /// SS41I_extractf32 - SSE 4.1 extract 32 bits fp value to int reg or memory
4172 multiclass SS41I_extractf32<bits<8> opc, string OpcodeStr> {
4173 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4174 (ins VR128:$src1, i32i8imm:$src2),
4175 !strconcat(OpcodeStr,
4176 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4178 (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2))]>,
4180 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4181 (ins f32mem:$dst, VR128:$src1, i32i8imm:$src2),
4182 !strconcat(OpcodeStr,
4183 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4184 [(store (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2),
4185 addr:$dst)]>, OpSize;
4188 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in
4189 defm VEXTRACTPS : SS41I_extractf32<0x17, "vextractps">, VEX;
4190 defm EXTRACTPS : SS41I_extractf32<0x17, "extractps">;
4192 // Also match an EXTRACTPS store when the store is done as f32 instead of i32.
4193 def : Pat<(store (f32 (bitconvert (extractelt (bc_v4i32 (v4f32 VR128:$src1)),
4196 (EXTRACTPSmr addr:$dst, VR128:$src1, imm:$src2)>,
4197 Requires<[HasSSE41]>;
4199 //===----------------------------------------------------------------------===//
4200 // SSE4.1 - Insert Instructions
4201 //===----------------------------------------------------------------------===//
4203 multiclass SS41I_insert8<bits<8> opc, string asm, bit Is2Addr = 1> {
4204 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4205 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
4207 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4209 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4211 (X86pinsrb VR128:$src1, GR32:$src2, imm:$src3))]>, OpSize;
4212 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4213 (ins VR128:$src1, i8mem:$src2, i32i8imm:$src3),
4215 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4217 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4219 (X86pinsrb VR128:$src1, (extloadi8 addr:$src2),
4220 imm:$src3))]>, OpSize;
4223 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in
4224 defm VPINSRB : SS41I_insert8<0x20, "vpinsrb", 0>, VEX_4V;
4225 let Constraints = "$src1 = $dst" in
4226 defm PINSRB : SS41I_insert8<0x20, "pinsrb">;
4228 multiclass SS41I_insert32<bits<8> opc, string asm, bit Is2Addr = 1> {
4229 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4230 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
4232 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4234 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4236 (v4i32 (insertelt VR128:$src1, GR32:$src2, imm:$src3)))]>,
4238 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4239 (ins VR128:$src1, i32mem:$src2, i32i8imm:$src3),
4241 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4243 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4245 (v4i32 (insertelt VR128:$src1, (loadi32 addr:$src2),
4246 imm:$src3)))]>, OpSize;
4249 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in
4250 defm VPINSRD : SS41I_insert32<0x22, "vpinsrd", 0>, VEX_4V;
4251 let Constraints = "$src1 = $dst" in
4252 defm PINSRD : SS41I_insert32<0x22, "pinsrd">;
4254 multiclass SS41I_insert64<bits<8> opc, string asm, bit Is2Addr = 1> {
4255 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4256 (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
4258 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4260 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4262 (v2i64 (insertelt VR128:$src1, GR64:$src2, imm:$src3)))]>,
4264 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4265 (ins VR128:$src1, i64mem:$src2, i32i8imm:$src3),
4267 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4269 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4271 (v2i64 (insertelt VR128:$src1, (loadi64 addr:$src2),
4272 imm:$src3)))]>, OpSize;
4275 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in
4276 defm VPINSRQ : SS41I_insert64<0x22, "vpinsrq", 0>, VEX_4V, VEX_W;
4277 let Constraints = "$src1 = $dst" in
4278 defm PINSRQ : SS41I_insert64<0x22, "pinsrq">, REX_W;
4280 // insertps has a few different modes, there's the first two here below which
4281 // are optimized inserts that won't zero arbitrary elements in the destination
4282 // vector. The next one matches the intrinsic and could zero arbitrary elements
4283 // in the target vector.
4284 multiclass SS41I_insertf32<bits<8> opc, string asm, bit Is2Addr = 1> {
4285 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4286 (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4288 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4290 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4292 (X86insrtps VR128:$src1, VR128:$src2, imm:$src3))]>,
4294 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4295 (ins VR128:$src1, f32mem:$src2, i32i8imm:$src3),
4297 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4299 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4301 (X86insrtps VR128:$src1,
4302 (v4f32 (scalar_to_vector (loadf32 addr:$src2))),
4303 imm:$src3))]>, OpSize;
4306 let Constraints = "$src1 = $dst" in
4307 defm INSERTPS : SS41I_insertf32<0x21, "insertps">;
4308 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in
4309 defm VINSERTPS : SS41I_insertf32<0x21, "vinsertps", 0>, VEX_4V;
4311 def : Pat<(int_x86_sse41_insertps VR128:$src1, VR128:$src2, imm:$src3),
4312 (INSERTPSrr VR128:$src1, VR128:$src2, imm:$src3)>;
4314 //===----------------------------------------------------------------------===//
4315 // SSE4.1 - Round Instructions
4316 //===----------------------------------------------------------------------===//
4318 multiclass sse41_fp_unop_rm<bits<8> opcps, bits<8> opcpd,
4321 Intrinsic V2F64Int> {
4322 // Intrinsic operation, reg.
4323 // Vector intrinsic operation, reg
4324 def PSr_Int : SS4AIi8<opcps, MRMSrcReg,
4325 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
4326 !strconcat(OpcodeStr,
4327 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4328 [(set VR128:$dst, (V4F32Int VR128:$src1, imm:$src2))]>,
4331 // Vector intrinsic operation, mem
4332 def PSm_Int : Ii8<opcps, MRMSrcMem,
4333 (outs VR128:$dst), (ins f128mem:$src1, i32i8imm:$src2),
4334 !strconcat(OpcodeStr,
4335 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4337 (V4F32Int (memopv4f32 addr:$src1),imm:$src2))]>,
4339 Requires<[HasSSE41]>;
4341 // Vector intrinsic operation, reg
4342 def PDr_Int : SS4AIi8<opcpd, MRMSrcReg,
4343 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
4344 !strconcat(OpcodeStr,
4345 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4346 [(set VR128:$dst, (V2F64Int VR128:$src1, imm:$src2))]>,
4349 // Vector intrinsic operation, mem
4350 def PDm_Int : SS4AIi8<opcpd, MRMSrcMem,
4351 (outs VR128:$dst), (ins f128mem:$src1, i32i8imm:$src2),
4352 !strconcat(OpcodeStr,
4353 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4355 (V2F64Int (memopv2f64 addr:$src1),imm:$src2))]>,
4359 multiclass sse41_fp_unop_rm_avx<bits<8> opcps, bits<8> opcpd,
4361 // Intrinsic operation, reg.
4362 // Vector intrinsic operation, reg
4363 def PSr : SS4AIi8<opcps, MRMSrcReg,
4364 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
4365 !strconcat(OpcodeStr,
4366 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4369 // Vector intrinsic operation, mem
4370 def PSm : Ii8<opcps, MRMSrcMem,
4371 (outs VR128:$dst), (ins f128mem:$src1, i32i8imm:$src2),
4372 !strconcat(OpcodeStr,
4373 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4374 []>, TA, OpSize, Requires<[HasSSE41]>;
4376 // Vector intrinsic operation, reg
4377 def PDr : SS4AIi8<opcpd, MRMSrcReg,
4378 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
4379 !strconcat(OpcodeStr,
4380 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4383 // Vector intrinsic operation, mem
4384 def PDm : SS4AIi8<opcpd, MRMSrcMem,
4385 (outs VR128:$dst), (ins f128mem:$src1, i32i8imm:$src2),
4386 !strconcat(OpcodeStr,
4387 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4391 multiclass sse41_fp_binop_rm<bits<8> opcss, bits<8> opcsd,
4394 Intrinsic F64Int, bit Is2Addr = 1> {
4395 // Intrinsic operation, reg.
4396 def SSr_Int : SS4AIi8<opcss, MRMSrcReg,
4397 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4399 !strconcat(OpcodeStr,
4400 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4401 !strconcat(OpcodeStr,
4402 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4403 [(set VR128:$dst, (F32Int VR128:$src1, VR128:$src2, imm:$src3))]>,
4406 // Intrinsic operation, mem.
4407 def SSm_Int : SS4AIi8<opcss, MRMSrcMem,
4408 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
4410 !strconcat(OpcodeStr,
4411 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4412 !strconcat(OpcodeStr,
4413 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4415 (F32Int VR128:$src1, sse_load_f32:$src2, imm:$src3))]>,
4418 // Intrinsic operation, reg.
4419 def SDr_Int : SS4AIi8<opcsd, MRMSrcReg,
4420 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4422 !strconcat(OpcodeStr,
4423 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4424 !strconcat(OpcodeStr,
4425 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4426 [(set VR128:$dst, (F64Int VR128:$src1, VR128:$src2, imm:$src3))]>,
4429 // Intrinsic operation, mem.
4430 def SDm_Int : SS4AIi8<opcsd, MRMSrcMem,
4431 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
4433 !strconcat(OpcodeStr,
4434 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4435 !strconcat(OpcodeStr,
4436 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4438 (F64Int VR128:$src1, sse_load_f64:$src2, imm:$src3))]>,
4442 multiclass sse41_fp_binop_rm_avx<bits<8> opcss, bits<8> opcsd,
4444 // Intrinsic operation, reg.
4445 def SSr : SS4AIi8<opcss, MRMSrcReg,
4446 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4447 !strconcat(OpcodeStr,
4448 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4451 // Intrinsic operation, mem.
4452 def SSm : SS4AIi8<opcss, MRMSrcMem,
4453 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
4454 !strconcat(OpcodeStr,
4455 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4458 // Intrinsic operation, reg.
4459 def SDr : SS4AIi8<opcsd, MRMSrcReg,
4460 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4461 !strconcat(OpcodeStr,
4462 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4465 // Intrinsic operation, mem.
4466 def SDm : SS4AIi8<opcsd, MRMSrcMem,
4467 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
4468 !strconcat(OpcodeStr,
4469 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4473 // FP round - roundss, roundps, roundsd, roundpd
4474 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in {
4476 defm VROUND : sse41_fp_unop_rm<0x08, 0x09, "vround",
4477 int_x86_sse41_round_ps, int_x86_sse41_round_pd>,
4479 defm VROUND : sse41_fp_binop_rm<0x0A, 0x0B, "vround",
4480 int_x86_sse41_round_ss, int_x86_sse41_round_sd,
4482 // Instructions for the assembler
4483 defm VROUND : sse41_fp_unop_rm_avx<0x08, 0x09, "vround">, VEX;
4484 defm VROUND : sse41_fp_binop_rm_avx<0x0A, 0x0B, "vround">, VEX_4V;
4487 defm ROUND : sse41_fp_unop_rm<0x08, 0x09, "round",
4488 int_x86_sse41_round_ps, int_x86_sse41_round_pd>;
4489 let Constraints = "$src1 = $dst" in
4490 defm ROUND : sse41_fp_binop_rm<0x0A, 0x0B, "round",
4491 int_x86_sse41_round_ss, int_x86_sse41_round_sd>;
4493 //===----------------------------------------------------------------------===//
4494 // SSE4.1 - Misc Instructions
4495 //===----------------------------------------------------------------------===//
4497 // SS41I_unop_rm_int_v16 - SSE 4.1 unary operator whose type is v8i16.
4498 multiclass SS41I_unop_rm_int_v16<bits<8> opc, string OpcodeStr,
4499 Intrinsic IntId128> {
4500 def rr128 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4502 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4503 [(set VR128:$dst, (IntId128 VR128:$src))]>, OpSize;
4504 def rm128 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4506 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4509 (bitconvert (memopv8i16 addr:$src))))]>, OpSize;
4512 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in
4513 defm VPHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "vphminposuw",
4514 int_x86_sse41_phminposuw>, VEX;
4515 defm PHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "phminposuw",
4516 int_x86_sse41_phminposuw>;
4518 /// SS41I_binop_rm_int - Simple SSE 4.1 binary operator
4519 multiclass SS41I_binop_rm_int<bits<8> opc, string OpcodeStr,
4520 Intrinsic IntId128, bit Is2Addr = 1> {
4521 let isCommutable = 1 in
4522 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4523 (ins VR128:$src1, VR128:$src2),
4525 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4526 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4527 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>, OpSize;
4528 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4529 (ins VR128:$src1, i128mem:$src2),
4531 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4532 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4534 (IntId128 VR128:$src1,
4535 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
4538 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in {
4539 let isCommutable = 0 in
4540 defm VPACKUSDW : SS41I_binop_rm_int<0x2B, "vpackusdw", int_x86_sse41_packusdw,
4542 defm VPCMPEQQ : SS41I_binop_rm_int<0x29, "vpcmpeqq", int_x86_sse41_pcmpeqq,
4544 defm VPMINSB : SS41I_binop_rm_int<0x38, "vpminsb", int_x86_sse41_pminsb,
4546 defm VPMINSD : SS41I_binop_rm_int<0x39, "vpminsd", int_x86_sse41_pminsd,
4548 defm VPMINUD : SS41I_binop_rm_int<0x3B, "vpminud", int_x86_sse41_pminud,
4550 defm VPMINUW : SS41I_binop_rm_int<0x3A, "vpminuw", int_x86_sse41_pminuw,
4552 defm VPMAXSB : SS41I_binop_rm_int<0x3C, "vpmaxsb", int_x86_sse41_pmaxsb,
4554 defm VPMAXSD : SS41I_binop_rm_int<0x3D, "vpmaxsd", int_x86_sse41_pmaxsd,
4556 defm VPMAXUD : SS41I_binop_rm_int<0x3F, "vpmaxud", int_x86_sse41_pmaxud,
4558 defm VPMAXUW : SS41I_binop_rm_int<0x3E, "vpmaxuw", int_x86_sse41_pmaxuw,
4560 defm VPMULDQ : SS41I_binop_rm_int<0x28, "vpmuldq", int_x86_sse41_pmuldq,
4564 let Constraints = "$src1 = $dst" in {
4565 let isCommutable = 0 in
4566 defm PACKUSDW : SS41I_binop_rm_int<0x2B, "packusdw", int_x86_sse41_packusdw>;
4567 defm PCMPEQQ : SS41I_binop_rm_int<0x29, "pcmpeqq", int_x86_sse41_pcmpeqq>;
4568 defm PMINSB : SS41I_binop_rm_int<0x38, "pminsb", int_x86_sse41_pminsb>;
4569 defm PMINSD : SS41I_binop_rm_int<0x39, "pminsd", int_x86_sse41_pminsd>;
4570 defm PMINUD : SS41I_binop_rm_int<0x3B, "pminud", int_x86_sse41_pminud>;
4571 defm PMINUW : SS41I_binop_rm_int<0x3A, "pminuw", int_x86_sse41_pminuw>;
4572 defm PMAXSB : SS41I_binop_rm_int<0x3C, "pmaxsb", int_x86_sse41_pmaxsb>;
4573 defm PMAXSD : SS41I_binop_rm_int<0x3D, "pmaxsd", int_x86_sse41_pmaxsd>;
4574 defm PMAXUD : SS41I_binop_rm_int<0x3F, "pmaxud", int_x86_sse41_pmaxud>;
4575 defm PMAXUW : SS41I_binop_rm_int<0x3E, "pmaxuw", int_x86_sse41_pmaxuw>;
4576 defm PMULDQ : SS41I_binop_rm_int<0x28, "pmuldq", int_x86_sse41_pmuldq>;
4579 def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, VR128:$src2)),
4580 (PCMPEQQrr VR128:$src1, VR128:$src2)>;
4581 def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, (memop addr:$src2))),
4582 (PCMPEQQrm VR128:$src1, addr:$src2)>;
4584 /// SS48I_binop_rm - Simple SSE41 binary operator.
4585 multiclass SS48I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
4586 ValueType OpVT, bit Is2Addr = 1> {
4587 let isCommutable = 1 in
4588 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4589 (ins VR128:$src1, VR128:$src2),
4591 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4592 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4593 [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]>,
4595 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4596 (ins VR128:$src1, i128mem:$src2),
4598 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4599 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4600 [(set VR128:$dst, (OpNode VR128:$src1,
4601 (bc_v4i32 (memopv2i64 addr:$src2))))]>,
4605 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in
4606 defm VPMULLD : SS48I_binop_rm<0x40, "vpmulld", mul, v4i32, 0>, VEX_4V;
4607 let Constraints = "$src1 = $dst" in
4608 defm PMULLD : SS48I_binop_rm<0x40, "pmulld", mul, v4i32>;
4610 /// SS41I_binop_rmi_int - SSE 4.1 binary operator with 8-bit immediate
4611 multiclass SS41I_binop_rmi_int<bits<8> opc, string OpcodeStr,
4612 Intrinsic IntId128, bit Is2Addr = 1> {
4613 let isCommutable = 1 in
4614 def rri : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4615 (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4617 !strconcat(OpcodeStr,
4618 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4619 !strconcat(OpcodeStr,
4620 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4622 (IntId128 VR128:$src1, VR128:$src2, imm:$src3))]>,
4624 def rmi : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4625 (ins VR128:$src1, i128mem:$src2, i32i8imm:$src3),
4627 !strconcat(OpcodeStr,
4628 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4629 !strconcat(OpcodeStr,
4630 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4632 (IntId128 VR128:$src1,
4633 (bitconvert (memopv16i8 addr:$src2)), imm:$src3))]>,
4637 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in {
4638 let isCommutable = 0 in {
4639 defm VBLENDPS : SS41I_binop_rmi_int<0x0C, "vblendps", int_x86_sse41_blendps,
4641 defm VBLENDPD : SS41I_binop_rmi_int<0x0D, "vblendpd", int_x86_sse41_blendpd,
4643 defm VPBLENDW : SS41I_binop_rmi_int<0x0E, "vpblendw", int_x86_sse41_pblendw,
4645 defm VMPSADBW : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_sse41_mpsadbw,
4648 defm VDPPS : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_sse41_dpps,
4650 defm VDPPD : SS41I_binop_rmi_int<0x41, "vdppd", int_x86_sse41_dppd,
4654 let Constraints = "$src1 = $dst" in {
4655 let isCommutable = 0 in {
4656 defm BLENDPS : SS41I_binop_rmi_int<0x0C, "blendps", int_x86_sse41_blendps>;
4657 defm BLENDPD : SS41I_binop_rmi_int<0x0D, "blendpd", int_x86_sse41_blendpd>;
4658 defm PBLENDW : SS41I_binop_rmi_int<0x0E, "pblendw", int_x86_sse41_pblendw>;
4659 defm MPSADBW : SS41I_binop_rmi_int<0x42, "mpsadbw", int_x86_sse41_mpsadbw>;
4661 defm DPPS : SS41I_binop_rmi_int<0x40, "dpps", int_x86_sse41_dpps>;
4662 defm DPPD : SS41I_binop_rmi_int<0x41, "dppd", int_x86_sse41_dppd>;
4665 /// SS41I_quaternary_int_avx - AVX SSE 4.1 with 4 operators
4666 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in {
4667 multiclass SS41I_quaternary_int_avx<bits<8> opc, string OpcodeStr> {
4668 def rr : I<opc, MRMSrcReg, (outs VR128:$dst),
4669 (ins VR128:$src1, VR128:$src2, VR128:$src3),
4670 !strconcat(OpcodeStr,
4671 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4672 [], SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
4674 def rm : I<opc, MRMSrcMem, (outs VR128:$dst),
4675 (ins VR128:$src1, i128mem:$src2, VR128:$src3),
4676 !strconcat(OpcodeStr,
4677 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4678 [], SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
4682 defm VBLENDVPD : SS41I_quaternary_int_avx<0x4B, "vblendvpd">;
4683 defm VBLENDVPS : SS41I_quaternary_int_avx<0x4A, "vblendvps">;
4684 defm VPBLENDVB : SS41I_quaternary_int_avx<0x4C, "vpblendvb">;
4686 /// SS41I_ternary_int - SSE 4.1 ternary operator
4687 let Uses = [XMM0], Constraints = "$src1 = $dst" in {
4688 multiclass SS41I_ternary_int<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4689 def rr0 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4690 (ins VR128:$src1, VR128:$src2),
4691 !strconcat(OpcodeStr,
4692 "\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}"),
4693 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2, XMM0))]>,
4696 def rm0 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4697 (ins VR128:$src1, i128mem:$src2),
4698 !strconcat(OpcodeStr,
4699 "\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}"),
4702 (bitconvert (memopv16i8 addr:$src2)), XMM0))]>, OpSize;
4706 defm BLENDVPD : SS41I_ternary_int<0x15, "blendvpd", int_x86_sse41_blendvpd>;
4707 defm BLENDVPS : SS41I_ternary_int<0x14, "blendvps", int_x86_sse41_blendvps>;
4708 defm PBLENDVB : SS41I_ternary_int<0x10, "pblendvb", int_x86_sse41_pblendvb>;
4710 // ptest instruction we'll lower to this in X86ISelLowering primarily from
4711 // the intel intrinsic that corresponds to this.
4712 let Defs = [EFLAGS], isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in {
4713 def VPTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
4714 "vptest\t{$src2, $src1|$src1, $src2}",
4715 [(set EFLAGS, (X86ptest VR128:$src1, VR128:$src2))]>,
4717 def VPTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, i128mem:$src2),
4718 "vptest\t{$src2, $src1|$src1, $src2}",
4719 [(set EFLAGS, (X86ptest VR128:$src1, (load addr:$src2)))]>,
4723 let Defs = [EFLAGS] in {
4724 def PTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
4725 "ptest \t{$src2, $src1|$src1, $src2}",
4726 [(set EFLAGS, (X86ptest VR128:$src1, VR128:$src2))]>,
4728 def PTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, i128mem:$src2),
4729 "ptest \t{$src2, $src1|$src1, $src2}",
4730 [(set EFLAGS, (X86ptest VR128:$src1, (load addr:$src2)))]>,
4734 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in
4735 def VMOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4736 "vmovntdqa\t{$src, $dst|$dst, $src}",
4737 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
4739 def MOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4740 "movntdqa\t{$src, $dst|$dst, $src}",
4741 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
4744 //===----------------------------------------------------------------------===//
4745 // SSE4.2 - Compare Instructions
4746 //===----------------------------------------------------------------------===//
4748 /// SS42I_binop_rm_int - Simple SSE 4.2 binary operator
4749 multiclass SS42I_binop_rm_int<bits<8> opc, string OpcodeStr,
4750 Intrinsic IntId128, bit Is2Addr = 1> {
4751 def rr : SS428I<opc, MRMSrcReg, (outs VR128:$dst),
4752 (ins VR128:$src1, VR128:$src2),
4754 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4755 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4756 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
4758 def rm : SS428I<opc, MRMSrcMem, (outs VR128:$dst),
4759 (ins VR128:$src1, i128mem:$src2),
4761 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4762 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4764 (IntId128 VR128:$src1,
4765 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
4768 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE42] in
4769 defm VPCMPGTQ : SS42I_binop_rm_int<0x37, "vpcmpgtq", int_x86_sse42_pcmpgtq,
4771 let Constraints = "$src1 = $dst" in
4772 defm PCMPGTQ : SS42I_binop_rm_int<0x37, "pcmpgtq", int_x86_sse42_pcmpgtq>;
4774 def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, VR128:$src2)),
4775 (PCMPGTQrr VR128:$src1, VR128:$src2)>;
4776 def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, (memop addr:$src2))),
4777 (PCMPGTQrm VR128:$src1, addr:$src2)>;
4779 //===----------------------------------------------------------------------===//
4780 // SSE4.2 - String/text Processing Instructions
4781 //===----------------------------------------------------------------------===//
4783 // Packed Compare Implicit Length Strings, Return Mask
4784 let Defs = [EFLAGS], usesCustomInserter = 1 in {
4785 def PCMPISTRM128REG : SS42AI<0, Pseudo, (outs VR128:$dst),
4786 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
4787 "#PCMPISTRM128rr PSEUDO!",
4788 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128 VR128:$src1, VR128:$src2,
4789 imm:$src3))]>, OpSize;
4790 def PCMPISTRM128MEM : SS42AI<0, Pseudo, (outs VR128:$dst),
4791 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
4792 "#PCMPISTRM128rm PSEUDO!",
4793 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128
4794 VR128:$src1, (load addr:$src2), imm:$src3))]>, OpSize;
4797 let Defs = [XMM0, EFLAGS], isAsmParserOnly = 1,
4798 Predicates = [HasAVX, HasSSE42] in {
4799 def VPCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
4800 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
4801 "vpcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize, VEX;
4802 def VPCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
4803 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
4804 "vpcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize, VEX;
4807 let Defs = [XMM0, EFLAGS] in {
4808 def PCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
4809 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
4810 "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
4811 def PCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
4812 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
4813 "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
4816 // Packed Compare Explicit Length Strings, Return Mask
4817 let Defs = [EFLAGS], Uses = [EAX, EDX], usesCustomInserter = 1 in {
4818 def PCMPESTRM128REG : SS42AI<0, Pseudo, (outs VR128:$dst),
4819 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
4820 "#PCMPESTRM128rr PSEUDO!",
4822 (int_x86_sse42_pcmpestrm128
4823 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5))]>, OpSize;
4825 def PCMPESTRM128MEM : SS42AI<0, Pseudo, (outs VR128:$dst),
4826 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
4827 "#PCMPESTRM128rm PSEUDO!",
4828 [(set VR128:$dst, (int_x86_sse42_pcmpestrm128
4829 VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5))]>,
4833 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE42],
4834 Defs = [XMM0, EFLAGS], Uses = [EAX, EDX] in {
4835 def VPCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
4836 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
4837 "vpcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize, VEX;
4838 def VPCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
4839 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
4840 "vpcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize, VEX;
4843 let Defs = [XMM0, EFLAGS], Uses = [EAX, EDX] in {
4844 def PCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
4845 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
4846 "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
4847 def PCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
4848 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
4849 "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
4852 // Packed Compare Implicit Length Strings, Return Index
4853 let Defs = [ECX, EFLAGS] in {
4854 multiclass SS42AI_pcmpistri<Intrinsic IntId128, string asm = "pcmpistri"> {
4855 def rr : SS42AI<0x63, MRMSrcReg, (outs),
4856 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
4857 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
4858 [(set ECX, (IntId128 VR128:$src1, VR128:$src2, imm:$src3)),
4859 (implicit EFLAGS)]>, OpSize;
4860 def rm : SS42AI<0x63, MRMSrcMem, (outs),
4861 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
4862 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
4863 [(set ECX, (IntId128 VR128:$src1, (load addr:$src2), imm:$src3)),
4864 (implicit EFLAGS)]>, OpSize;
4868 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE42] in {
4869 defm VPCMPISTRI : SS42AI_pcmpistri<int_x86_sse42_pcmpistri128, "vpcmpistri">,
4871 defm VPCMPISTRIA : SS42AI_pcmpistri<int_x86_sse42_pcmpistria128, "vpcmpistri">,
4873 defm VPCMPISTRIC : SS42AI_pcmpistri<int_x86_sse42_pcmpistric128, "vpcmpistri">,
4875 defm VPCMPISTRIO : SS42AI_pcmpistri<int_x86_sse42_pcmpistrio128, "vpcmpistri">,
4877 defm VPCMPISTRIS : SS42AI_pcmpistri<int_x86_sse42_pcmpistris128, "vpcmpistri">,
4879 defm VPCMPISTRIZ : SS42AI_pcmpistri<int_x86_sse42_pcmpistriz128, "vpcmpistri">,
4883 defm PCMPISTRI : SS42AI_pcmpistri<int_x86_sse42_pcmpistri128>;
4884 defm PCMPISTRIA : SS42AI_pcmpistri<int_x86_sse42_pcmpistria128>;
4885 defm PCMPISTRIC : SS42AI_pcmpistri<int_x86_sse42_pcmpistric128>;
4886 defm PCMPISTRIO : SS42AI_pcmpistri<int_x86_sse42_pcmpistrio128>;
4887 defm PCMPISTRIS : SS42AI_pcmpistri<int_x86_sse42_pcmpistris128>;
4888 defm PCMPISTRIZ : SS42AI_pcmpistri<int_x86_sse42_pcmpistriz128>;
4890 // Packed Compare Explicit Length Strings, Return Index
4891 let Defs = [ECX, EFLAGS], Uses = [EAX, EDX] in {
4892 multiclass SS42AI_pcmpestri<Intrinsic IntId128, string asm = "pcmpestri"> {
4893 def rr : SS42AI<0x61, MRMSrcReg, (outs),
4894 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
4895 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
4896 [(set ECX, (IntId128 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5)),
4897 (implicit EFLAGS)]>, OpSize;
4898 def rm : SS42AI<0x61, MRMSrcMem, (outs),
4899 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
4900 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
4902 (IntId128 VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5)),
4903 (implicit EFLAGS)]>, OpSize;
4907 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE42] in {
4908 defm VPCMPESTRI : SS42AI_pcmpestri<int_x86_sse42_pcmpestri128, "vpcmpestri">,
4910 defm VPCMPESTRIA : SS42AI_pcmpestri<int_x86_sse42_pcmpestria128, "vpcmpestri">,
4912 defm VPCMPESTRIC : SS42AI_pcmpestri<int_x86_sse42_pcmpestric128, "vpcmpestri">,
4914 defm VPCMPESTRIO : SS42AI_pcmpestri<int_x86_sse42_pcmpestrio128, "vpcmpestri">,
4916 defm VPCMPESTRIS : SS42AI_pcmpestri<int_x86_sse42_pcmpestris128, "vpcmpestri">,
4918 defm VPCMPESTRIZ : SS42AI_pcmpestri<int_x86_sse42_pcmpestriz128, "vpcmpestri">,
4922 defm PCMPESTRI : SS42AI_pcmpestri<int_x86_sse42_pcmpestri128>;
4923 defm PCMPESTRIA : SS42AI_pcmpestri<int_x86_sse42_pcmpestria128>;
4924 defm PCMPESTRIC : SS42AI_pcmpestri<int_x86_sse42_pcmpestric128>;
4925 defm PCMPESTRIO : SS42AI_pcmpestri<int_x86_sse42_pcmpestrio128>;
4926 defm PCMPESTRIS : SS42AI_pcmpestri<int_x86_sse42_pcmpestris128>;
4927 defm PCMPESTRIZ : SS42AI_pcmpestri<int_x86_sse42_pcmpestriz128>;
4929 //===----------------------------------------------------------------------===//
4930 // SSE4.2 - CRC Instructions
4931 //===----------------------------------------------------------------------===//
4933 // No CRC instructions have AVX equivalents
4935 // crc intrinsic instruction
4936 // This set of instructions are only rm, the only difference is the size
4938 let Constraints = "$src1 = $dst" in {
4939 def CRC32m8 : SS42FI<0xF0, MRMSrcMem, (outs GR32:$dst),
4940 (ins GR32:$src1, i8mem:$src2),
4941 "crc32{b} \t{$src2, $src1|$src1, $src2}",
4943 (int_x86_sse42_crc32_8 GR32:$src1,
4944 (load addr:$src2)))]>;
4945 def CRC32r8 : SS42FI<0xF0, MRMSrcReg, (outs GR32:$dst),
4946 (ins GR32:$src1, GR8:$src2),
4947 "crc32{b} \t{$src2, $src1|$src1, $src2}",
4949 (int_x86_sse42_crc32_8 GR32:$src1, GR8:$src2))]>;
4950 def CRC32m16 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst),
4951 (ins GR32:$src1, i16mem:$src2),
4952 "crc32{w} \t{$src2, $src1|$src1, $src2}",
4954 (int_x86_sse42_crc32_16 GR32:$src1,
4955 (load addr:$src2)))]>,
4957 def CRC32r16 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst),
4958 (ins GR32:$src1, GR16:$src2),
4959 "crc32{w} \t{$src2, $src1|$src1, $src2}",
4961 (int_x86_sse42_crc32_16 GR32:$src1, GR16:$src2))]>,
4963 def CRC32m32 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst),
4964 (ins GR32:$src1, i32mem:$src2),
4965 "crc32{l} \t{$src2, $src1|$src1, $src2}",
4967 (int_x86_sse42_crc32_32 GR32:$src1,
4968 (load addr:$src2)))]>;
4969 def CRC32r32 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst),
4970 (ins GR32:$src1, GR32:$src2),
4971 "crc32{l} \t{$src2, $src1|$src1, $src2}",
4973 (int_x86_sse42_crc32_32 GR32:$src1, GR32:$src2))]>;
4974 def CRC64m8 : SS42FI<0xF0, MRMSrcMem, (outs GR64:$dst),
4975 (ins GR64:$src1, i8mem:$src2),
4976 "crc32{b} \t{$src2, $src1|$src1, $src2}",
4978 (int_x86_sse42_crc64_8 GR64:$src1,
4979 (load addr:$src2)))]>,
4981 def CRC64r8 : SS42FI<0xF0, MRMSrcReg, (outs GR64:$dst),
4982 (ins GR64:$src1, GR8:$src2),
4983 "crc32{b} \t{$src2, $src1|$src1, $src2}",
4985 (int_x86_sse42_crc64_8 GR64:$src1, GR8:$src2))]>,
4987 def CRC64m64 : SS42FI<0xF1, MRMSrcMem, (outs GR64:$dst),
4988 (ins GR64:$src1, i64mem:$src2),
4989 "crc32{q} \t{$src2, $src1|$src1, $src2}",
4991 (int_x86_sse42_crc64_64 GR64:$src1,
4992 (load addr:$src2)))]>,
4994 def CRC64r64 : SS42FI<0xF1, MRMSrcReg, (outs GR64:$dst),
4995 (ins GR64:$src1, GR64:$src2),
4996 "crc32{q} \t{$src2, $src1|$src1, $src2}",
4998 (int_x86_sse42_crc64_64 GR64:$src1, GR64:$src2))]>,
5002 //===----------------------------------------------------------------------===//
5003 // AES-NI Instructions
5004 //===----------------------------------------------------------------------===//
5006 multiclass AESI_binop_rm_int<bits<8> opc, string OpcodeStr,
5007 Intrinsic IntId128, bit Is2Addr = 1> {
5008 def rr : AES8I<opc, MRMSrcReg, (outs VR128:$dst),
5009 (ins VR128:$src1, VR128:$src2),
5011 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5012 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5013 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
5015 def rm : AES8I<opc, MRMSrcMem, (outs VR128:$dst),
5016 (ins VR128:$src1, i128mem:$src2),
5018 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5019 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5021 (IntId128 VR128:$src1,
5022 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
5025 // Perform One Round of an AES Encryption/Decryption Flow
5026 let isAsmParserOnly = 1, Predicates = [HasAVX, HasAES] in {
5027 defm VAESENC : AESI_binop_rm_int<0xDC, "vaesenc",
5028 int_x86_aesni_aesenc, 0>, VEX_4V;
5029 defm VAESENCLAST : AESI_binop_rm_int<0xDD, "vaesenclast",
5030 int_x86_aesni_aesenclast, 0>, VEX_4V;
5031 defm VAESDEC : AESI_binop_rm_int<0xDE, "vaesdec",
5032 int_x86_aesni_aesdec, 0>, VEX_4V;
5033 defm VAESDECLAST : AESI_binop_rm_int<0xDF, "vaesdeclast",
5034 int_x86_aesni_aesdeclast, 0>, VEX_4V;
5037 let Constraints = "$src1 = $dst" in {
5038 defm AESENC : AESI_binop_rm_int<0xDC, "aesenc",
5039 int_x86_aesni_aesenc>;
5040 defm AESENCLAST : AESI_binop_rm_int<0xDD, "aesenclast",
5041 int_x86_aesni_aesenclast>;
5042 defm AESDEC : AESI_binop_rm_int<0xDE, "aesdec",
5043 int_x86_aesni_aesdec>;
5044 defm AESDECLAST : AESI_binop_rm_int<0xDF, "aesdeclast",
5045 int_x86_aesni_aesdeclast>;
5048 def : Pat<(v2i64 (int_x86_aesni_aesenc VR128:$src1, VR128:$src2)),
5049 (AESENCrr VR128:$src1, VR128:$src2)>;
5050 def : Pat<(v2i64 (int_x86_aesni_aesenc VR128:$src1, (memop addr:$src2))),
5051 (AESENCrm VR128:$src1, addr:$src2)>;
5052 def : Pat<(v2i64 (int_x86_aesni_aesenclast VR128:$src1, VR128:$src2)),
5053 (AESENCLASTrr VR128:$src1, VR128:$src2)>;
5054 def : Pat<(v2i64 (int_x86_aesni_aesenclast VR128:$src1, (memop addr:$src2))),
5055 (AESENCLASTrm VR128:$src1, addr:$src2)>;
5056 def : Pat<(v2i64 (int_x86_aesni_aesdec VR128:$src1, VR128:$src2)),
5057 (AESDECrr VR128:$src1, VR128:$src2)>;
5058 def : Pat<(v2i64 (int_x86_aesni_aesdec VR128:$src1, (memop addr:$src2))),
5059 (AESDECrm VR128:$src1, addr:$src2)>;
5060 def : Pat<(v2i64 (int_x86_aesni_aesdeclast VR128:$src1, VR128:$src2)),
5061 (AESDECLASTrr VR128:$src1, VR128:$src2)>;
5062 def : Pat<(v2i64 (int_x86_aesni_aesdeclast VR128:$src1, (memop addr:$src2))),
5063 (AESDECLASTrm VR128:$src1, addr:$src2)>;
5065 // Perform the AES InvMixColumn Transformation
5066 let isAsmParserOnly = 1, Predicates = [HasAVX, HasAES] in {
5067 def VAESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
5069 "vaesimc\t{$src1, $dst|$dst, $src1}",
5071 (int_x86_aesni_aesimc VR128:$src1))]>,
5073 def VAESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
5074 (ins i128mem:$src1),
5075 "vaesimc\t{$src1, $dst|$dst, $src1}",
5077 (int_x86_aesni_aesimc (bitconvert (memopv2i64 addr:$src1))))]>,
5080 def AESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
5082 "aesimc\t{$src1, $dst|$dst, $src1}",
5084 (int_x86_aesni_aesimc VR128:$src1))]>,
5086 def AESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
5087 (ins i128mem:$src1),
5088 "aesimc\t{$src1, $dst|$dst, $src1}",
5090 (int_x86_aesni_aesimc (bitconvert (memopv2i64 addr:$src1))))]>,
5093 // AES Round Key Generation Assist
5094 let isAsmParserOnly = 1, Predicates = [HasAVX, HasAES] in {
5095 def VAESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
5096 (ins VR128:$src1, i8imm:$src2),
5097 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5099 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
5101 def VAESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
5102 (ins i128mem:$src1, i8imm:$src2),
5103 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5105 (int_x86_aesni_aeskeygenassist (bitconvert (memopv2i64 addr:$src1)),
5109 def AESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
5110 (ins VR128:$src1, i8imm:$src2),
5111 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5113 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
5115 def AESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
5116 (ins i128mem:$src1, i8imm:$src2),
5117 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5119 (int_x86_aesni_aeskeygenassist (bitconvert (memopv2i64 addr:$src1)),