1 //====- X86InstrSSE.td - Describe the X86 Instruction Set --*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86 SSE instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
17 //===----------------------------------------------------------------------===//
18 // SSE specific DAG Nodes.
19 //===----------------------------------------------------------------------===//
21 def SDTX86FPShiftOp : SDTypeProfile<1, 2, [ SDTCisSameAs<0, 1>,
22 SDTCisFP<0>, SDTCisInt<2> ]>;
23 def SDTX86VFCMP : SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisSameAs<1, 2>,
24 SDTCisFP<1>, SDTCisVT<3, i8>]>;
26 def X86fmin : SDNode<"X86ISD::FMIN", SDTFPBinOp>;
27 def X86fmax : SDNode<"X86ISD::FMAX", SDTFPBinOp>;
28 def X86fand : SDNode<"X86ISD::FAND", SDTFPBinOp,
29 [SDNPCommutative, SDNPAssociative]>;
30 def X86for : SDNode<"X86ISD::FOR", SDTFPBinOp,
31 [SDNPCommutative, SDNPAssociative]>;
32 def X86fxor : SDNode<"X86ISD::FXOR", SDTFPBinOp,
33 [SDNPCommutative, SDNPAssociative]>;
34 def X86frsqrt : SDNode<"X86ISD::FRSQRT", SDTFPUnaryOp>;
35 def X86frcp : SDNode<"X86ISD::FRCP", SDTFPUnaryOp>;
36 def X86fsrl : SDNode<"X86ISD::FSRL", SDTX86FPShiftOp>;
37 def X86comi : SDNode<"X86ISD::COMI", SDTX86CmpTest>;
38 def X86ucomi : SDNode<"X86ISD::UCOMI", SDTX86CmpTest>;
39 def X86pshufb : SDNode<"X86ISD::PSHUFB",
40 SDTypeProfile<1, 2, [SDTCisVT<0, v16i8>, SDTCisSameAs<0,1>,
42 def X86pextrb : SDNode<"X86ISD::PEXTRB",
43 SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
44 def X86pextrw : SDNode<"X86ISD::PEXTRW",
45 SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
46 def X86pinsrb : SDNode<"X86ISD::PINSRB",
47 SDTypeProfile<1, 3, [SDTCisVT<0, v16i8>, SDTCisSameAs<0,1>,
48 SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
49 def X86pinsrw : SDNode<"X86ISD::PINSRW",
50 SDTypeProfile<1, 3, [SDTCisVT<0, v8i16>, SDTCisSameAs<0,1>,
51 SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
52 def X86insrtps : SDNode<"X86ISD::INSERTPS",
53 SDTypeProfile<1, 3, [SDTCisVT<0, v4f32>, SDTCisSameAs<0,1>,
54 SDTCisVT<2, v4f32>, SDTCisPtrTy<3>]>>;
55 def X86vzmovl : SDNode<"X86ISD::VZEXT_MOVL",
56 SDTypeProfile<1, 1, [SDTCisSameAs<0,1>]>>;
57 def X86vzload : SDNode<"X86ISD::VZEXT_LOAD", SDTLoad,
58 [SDNPHasChain, SDNPMayLoad]>;
59 def X86vshl : SDNode<"X86ISD::VSHL", SDTIntShiftOp>;
60 def X86vshr : SDNode<"X86ISD::VSRL", SDTIntShiftOp>;
61 def X86cmpps : SDNode<"X86ISD::CMPPS", SDTX86VFCMP>;
62 def X86cmppd : SDNode<"X86ISD::CMPPD", SDTX86VFCMP>;
63 def X86pcmpeqb : SDNode<"X86ISD::PCMPEQB", SDTIntBinOp, [SDNPCommutative]>;
64 def X86pcmpeqw : SDNode<"X86ISD::PCMPEQW", SDTIntBinOp, [SDNPCommutative]>;
65 def X86pcmpeqd : SDNode<"X86ISD::PCMPEQD", SDTIntBinOp, [SDNPCommutative]>;
66 def X86pcmpeqq : SDNode<"X86ISD::PCMPEQQ", SDTIntBinOp, [SDNPCommutative]>;
67 def X86pcmpgtb : SDNode<"X86ISD::PCMPGTB", SDTIntBinOp>;
68 def X86pcmpgtw : SDNode<"X86ISD::PCMPGTW", SDTIntBinOp>;
69 def X86pcmpgtd : SDNode<"X86ISD::PCMPGTD", SDTIntBinOp>;
70 def X86pcmpgtq : SDNode<"X86ISD::PCMPGTQ", SDTIntBinOp>;
72 def SDTX86CmpPTest : SDTypeProfile<1, 2, [SDTCisVT<0, i32>,
75 def X86ptest : SDNode<"X86ISD::PTEST", SDTX86CmpPTest>;
77 //===----------------------------------------------------------------------===//
78 // SSE Complex Patterns
79 //===----------------------------------------------------------------------===//
81 // These are 'extloads' from a scalar to the low element of a vector, zeroing
82 // the top elements. These are used for the SSE 'ss' and 'sd' instruction
84 def sse_load_f32 : ComplexPattern<v4f32, 5, "SelectScalarSSELoad", [],
85 [SDNPHasChain, SDNPMayLoad]>;
86 def sse_load_f64 : ComplexPattern<v2f64, 5, "SelectScalarSSELoad", [],
87 [SDNPHasChain, SDNPMayLoad]>;
89 def ssmem : Operand<v4f32> {
90 let PrintMethod = "printf32mem";
91 let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm);
92 let ParserMatchClass = X86MemAsmOperand;
94 def sdmem : Operand<v2f64> {
95 let PrintMethod = "printf64mem";
96 let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm);
97 let ParserMatchClass = X86MemAsmOperand;
100 //===----------------------------------------------------------------------===//
101 // SSE pattern fragments
102 //===----------------------------------------------------------------------===//
104 def loadv4f32 : PatFrag<(ops node:$ptr), (v4f32 (load node:$ptr))>;
105 def loadv2f64 : PatFrag<(ops node:$ptr), (v2f64 (load node:$ptr))>;
106 def loadv4i32 : PatFrag<(ops node:$ptr), (v4i32 (load node:$ptr))>;
107 def loadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (load node:$ptr))>;
109 // FIXME: move this to a more appropriate place after all AVX is done.
110 def loadv8f32 : PatFrag<(ops node:$ptr), (v8f32 (load node:$ptr))>;
111 def loadv4f64 : PatFrag<(ops node:$ptr), (v4f64 (load node:$ptr))>;
112 def loadv8i32 : PatFrag<(ops node:$ptr), (v8i32 (load node:$ptr))>;
113 def loadv4i64 : PatFrag<(ops node:$ptr), (v4i64 (load node:$ptr))>;
115 // Like 'store', but always requires vector alignment.
116 def alignedstore : PatFrag<(ops node:$val, node:$ptr),
117 (store node:$val, node:$ptr), [{
118 return cast<StoreSDNode>(N)->getAlignment() >= 16;
121 // Like 'load', but always requires vector alignment.
122 def alignedload : PatFrag<(ops node:$ptr), (load node:$ptr), [{
123 return cast<LoadSDNode>(N)->getAlignment() >= 16;
126 def alignedloadfsf32 : PatFrag<(ops node:$ptr),
127 (f32 (alignedload node:$ptr))>;
128 def alignedloadfsf64 : PatFrag<(ops node:$ptr),
129 (f64 (alignedload node:$ptr))>;
130 def alignedloadv4f32 : PatFrag<(ops node:$ptr),
131 (v4f32 (alignedload node:$ptr))>;
132 def alignedloadv2f64 : PatFrag<(ops node:$ptr),
133 (v2f64 (alignedload node:$ptr))>;
134 def alignedloadv4i32 : PatFrag<(ops node:$ptr),
135 (v4i32 (alignedload node:$ptr))>;
136 def alignedloadv2i64 : PatFrag<(ops node:$ptr),
137 (v2i64 (alignedload node:$ptr))>;
139 // FIXME: move this to a more appropriate place after all AVX is done.
140 def alignedloadv8f32 : PatFrag<(ops node:$ptr),
141 (v8f32 (alignedload node:$ptr))>;
142 def alignedloadv4f64 : PatFrag<(ops node:$ptr),
143 (v4f64 (alignedload node:$ptr))>;
144 def alignedloadv8i32 : PatFrag<(ops node:$ptr),
145 (v8i32 (alignedload node:$ptr))>;
146 def alignedloadv4i64 : PatFrag<(ops node:$ptr),
147 (v4i64 (alignedload node:$ptr))>;
149 // Like 'load', but uses special alignment checks suitable for use in
150 // memory operands in most SSE instructions, which are required to
151 // be naturally aligned on some targets but not on others. If the subtarget
152 // allows unaligned accesses, match any load, though this may require
153 // setting a feature bit in the processor (on startup, for example).
154 // Opteron 10h and later implement such a feature.
155 def memop : PatFrag<(ops node:$ptr), (load node:$ptr), [{
156 return Subtarget->hasVectorUAMem()
157 || cast<LoadSDNode>(N)->getAlignment() >= 16;
160 def memopfsf32 : PatFrag<(ops node:$ptr), (f32 (memop node:$ptr))>;
161 def memopfsf64 : PatFrag<(ops node:$ptr), (f64 (memop node:$ptr))>;
162 def memopv4f32 : PatFrag<(ops node:$ptr), (v4f32 (memop node:$ptr))>;
163 def memopv2f64 : PatFrag<(ops node:$ptr), (v2f64 (memop node:$ptr))>;
164 def memopv4i32 : PatFrag<(ops node:$ptr), (v4i32 (memop node:$ptr))>;
165 def memopv2i64 : PatFrag<(ops node:$ptr), (v2i64 (memop node:$ptr))>;
166 def memopv16i8 : PatFrag<(ops node:$ptr), (v16i8 (memop node:$ptr))>;
168 // FIXME: move this to a more appropriate place after all AVX is done.
169 def memopv8f32 : PatFrag<(ops node:$ptr), (v8f32 (memop node:$ptr))>;
170 def memopv4f64 : PatFrag<(ops node:$ptr), (v4f64 (memop node:$ptr))>;
172 // SSSE3 uses MMX registers for some instructions. They aren't aligned on a
174 // FIXME: 8 byte alignment for mmx reads is not required
175 def memop64 : PatFrag<(ops node:$ptr), (load node:$ptr), [{
176 return cast<LoadSDNode>(N)->getAlignment() >= 8;
179 def memopv8i8 : PatFrag<(ops node:$ptr), (v8i8 (memop64 node:$ptr))>;
180 def memopv4i16 : PatFrag<(ops node:$ptr), (v4i16 (memop64 node:$ptr))>;
181 def memopv8i16 : PatFrag<(ops node:$ptr), (v8i16 (memop64 node:$ptr))>;
182 def memopv2i32 : PatFrag<(ops node:$ptr), (v2i32 (memop64 node:$ptr))>;
185 // Like 'store', but requires the non-temporal bit to be set
186 def nontemporalstore : PatFrag<(ops node:$val, node:$ptr),
187 (st node:$val, node:$ptr), [{
188 if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
189 return ST->isNonTemporal();
193 def alignednontemporalstore : PatFrag<(ops node:$val, node:$ptr),
194 (st node:$val, node:$ptr), [{
195 if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
196 return ST->isNonTemporal() && !ST->isTruncatingStore() &&
197 ST->getAddressingMode() == ISD::UNINDEXED &&
198 ST->getAlignment() >= 16;
202 def unalignednontemporalstore : PatFrag<(ops node:$val, node:$ptr),
203 (st node:$val, node:$ptr), [{
204 if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
205 return ST->isNonTemporal() &&
206 ST->getAlignment() < 16;
210 def bc_v4f32 : PatFrag<(ops node:$in), (v4f32 (bitconvert node:$in))>;
211 def bc_v2f64 : PatFrag<(ops node:$in), (v2f64 (bitconvert node:$in))>;
212 def bc_v16i8 : PatFrag<(ops node:$in), (v16i8 (bitconvert node:$in))>;
213 def bc_v8i16 : PatFrag<(ops node:$in), (v8i16 (bitconvert node:$in))>;
214 def bc_v4i32 : PatFrag<(ops node:$in), (v4i32 (bitconvert node:$in))>;
215 def bc_v2i64 : PatFrag<(ops node:$in), (v2i64 (bitconvert node:$in))>;
217 def vzmovl_v2i64 : PatFrag<(ops node:$src),
218 (bitconvert (v2i64 (X86vzmovl
219 (v2i64 (scalar_to_vector (loadi64 node:$src))))))>;
220 def vzmovl_v4i32 : PatFrag<(ops node:$src),
221 (bitconvert (v4i32 (X86vzmovl
222 (v4i32 (scalar_to_vector (loadi32 node:$src))))))>;
224 def vzload_v2i64 : PatFrag<(ops node:$src),
225 (bitconvert (v2i64 (X86vzload node:$src)))>;
228 def fp32imm0 : PatLeaf<(f32 fpimm), [{
229 return N->isExactlyValue(+0.0);
232 // BYTE_imm - Transform bit immediates into byte immediates.
233 def BYTE_imm : SDNodeXForm<imm, [{
234 // Transformation function: imm >> 3
235 return getI32Imm(N->getZExtValue() >> 3);
238 // SHUFFLE_get_shuf_imm xform function: convert vector_shuffle mask to PSHUF*,
240 def SHUFFLE_get_shuf_imm : SDNodeXForm<vector_shuffle, [{
241 return getI8Imm(X86::getShuffleSHUFImmediate(N));
244 // SHUFFLE_get_pshufhw_imm xform function: convert vector_shuffle mask to
246 def SHUFFLE_get_pshufhw_imm : SDNodeXForm<vector_shuffle, [{
247 return getI8Imm(X86::getShufflePSHUFHWImmediate(N));
250 // SHUFFLE_get_pshuflw_imm xform function: convert vector_shuffle mask to
252 def SHUFFLE_get_pshuflw_imm : SDNodeXForm<vector_shuffle, [{
253 return getI8Imm(X86::getShufflePSHUFLWImmediate(N));
256 // SHUFFLE_get_palign_imm xform function: convert vector_shuffle mask to
258 def SHUFFLE_get_palign_imm : SDNodeXForm<vector_shuffle, [{
259 return getI8Imm(X86::getShufflePALIGNRImmediate(N));
262 def splat_lo : PatFrag<(ops node:$lhs, node:$rhs),
263 (vector_shuffle node:$lhs, node:$rhs), [{
264 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
265 return SVOp->isSplat() && SVOp->getSplatIndex() == 0;
268 def movddup : PatFrag<(ops node:$lhs, node:$rhs),
269 (vector_shuffle node:$lhs, node:$rhs), [{
270 return X86::isMOVDDUPMask(cast<ShuffleVectorSDNode>(N));
273 def movhlps : PatFrag<(ops node:$lhs, node:$rhs),
274 (vector_shuffle node:$lhs, node:$rhs), [{
275 return X86::isMOVHLPSMask(cast<ShuffleVectorSDNode>(N));
278 def movhlps_undef : PatFrag<(ops node:$lhs, node:$rhs),
279 (vector_shuffle node:$lhs, node:$rhs), [{
280 return X86::isMOVHLPS_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
283 def movlhps : PatFrag<(ops node:$lhs, node:$rhs),
284 (vector_shuffle node:$lhs, node:$rhs), [{
285 return X86::isMOVLHPSMask(cast<ShuffleVectorSDNode>(N));
288 def movlp : PatFrag<(ops node:$lhs, node:$rhs),
289 (vector_shuffle node:$lhs, node:$rhs), [{
290 return X86::isMOVLPMask(cast<ShuffleVectorSDNode>(N));
293 def movl : PatFrag<(ops node:$lhs, node:$rhs),
294 (vector_shuffle node:$lhs, node:$rhs), [{
295 return X86::isMOVLMask(cast<ShuffleVectorSDNode>(N));
298 def movshdup : PatFrag<(ops node:$lhs, node:$rhs),
299 (vector_shuffle node:$lhs, node:$rhs), [{
300 return X86::isMOVSHDUPMask(cast<ShuffleVectorSDNode>(N));
303 def movsldup : PatFrag<(ops node:$lhs, node:$rhs),
304 (vector_shuffle node:$lhs, node:$rhs), [{
305 return X86::isMOVSLDUPMask(cast<ShuffleVectorSDNode>(N));
308 def unpckl : PatFrag<(ops node:$lhs, node:$rhs),
309 (vector_shuffle node:$lhs, node:$rhs), [{
310 return X86::isUNPCKLMask(cast<ShuffleVectorSDNode>(N));
313 def unpckh : PatFrag<(ops node:$lhs, node:$rhs),
314 (vector_shuffle node:$lhs, node:$rhs), [{
315 return X86::isUNPCKHMask(cast<ShuffleVectorSDNode>(N));
318 def unpckl_undef : PatFrag<(ops node:$lhs, node:$rhs),
319 (vector_shuffle node:$lhs, node:$rhs), [{
320 return X86::isUNPCKL_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
323 def unpckh_undef : PatFrag<(ops node:$lhs, node:$rhs),
324 (vector_shuffle node:$lhs, node:$rhs), [{
325 return X86::isUNPCKH_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
328 def pshufd : PatFrag<(ops node:$lhs, node:$rhs),
329 (vector_shuffle node:$lhs, node:$rhs), [{
330 return X86::isPSHUFDMask(cast<ShuffleVectorSDNode>(N));
331 }], SHUFFLE_get_shuf_imm>;
333 def shufp : PatFrag<(ops node:$lhs, node:$rhs),
334 (vector_shuffle node:$lhs, node:$rhs), [{
335 return X86::isSHUFPMask(cast<ShuffleVectorSDNode>(N));
336 }], SHUFFLE_get_shuf_imm>;
338 def pshufhw : PatFrag<(ops node:$lhs, node:$rhs),
339 (vector_shuffle node:$lhs, node:$rhs), [{
340 return X86::isPSHUFHWMask(cast<ShuffleVectorSDNode>(N));
341 }], SHUFFLE_get_pshufhw_imm>;
343 def pshuflw : PatFrag<(ops node:$lhs, node:$rhs),
344 (vector_shuffle node:$lhs, node:$rhs), [{
345 return X86::isPSHUFLWMask(cast<ShuffleVectorSDNode>(N));
346 }], SHUFFLE_get_pshuflw_imm>;
348 def palign : PatFrag<(ops node:$lhs, node:$rhs),
349 (vector_shuffle node:$lhs, node:$rhs), [{
350 return X86::isPALIGNRMask(cast<ShuffleVectorSDNode>(N));
351 }], SHUFFLE_get_palign_imm>;
353 //===----------------------------------------------------------------------===//
354 // SSE scalar FP Instructions
355 //===----------------------------------------------------------------------===//
357 // CMOV* - Used to implement the SSE SELECT DAG operation. Expanded after
358 // instruction selection into a branch sequence.
359 let Uses = [EFLAGS], usesCustomInserter = 1 in {
360 def CMOV_FR32 : I<0, Pseudo,
361 (outs FR32:$dst), (ins FR32:$t, FR32:$f, i8imm:$cond),
362 "#CMOV_FR32 PSEUDO!",
363 [(set FR32:$dst, (X86cmov FR32:$t, FR32:$f, imm:$cond,
365 def CMOV_FR64 : I<0, Pseudo,
366 (outs FR64:$dst), (ins FR64:$t, FR64:$f, i8imm:$cond),
367 "#CMOV_FR64 PSEUDO!",
368 [(set FR64:$dst, (X86cmov FR64:$t, FR64:$f, imm:$cond,
370 def CMOV_V4F32 : I<0, Pseudo,
371 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
372 "#CMOV_V4F32 PSEUDO!",
374 (v4f32 (X86cmov VR128:$t, VR128:$f, imm:$cond,
376 def CMOV_V2F64 : I<0, Pseudo,
377 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
378 "#CMOV_V2F64 PSEUDO!",
380 (v2f64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
382 def CMOV_V2I64 : I<0, Pseudo,
383 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
384 "#CMOV_V2I64 PSEUDO!",
386 (v2i64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
390 //===----------------------------------------------------------------------===//
391 // SSE 1 & 2 Instructions Classes
392 //===----------------------------------------------------------------------===//
394 /// sse12_fp_scalar - SSE 1 & 2 scalar instructions class
395 multiclass sse12_fp_scalar<bits<8> opc, string OpcodeStr, SDNode OpNode,
396 RegisterClass RC, X86MemOperand x86memop,
398 let isCommutable = 1 in {
399 def rr : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
401 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
402 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
403 [(set RC:$dst, (OpNode RC:$src1, RC:$src2))]>;
405 def rm : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
407 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
408 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
409 [(set RC:$dst, (OpNode RC:$src1, (load addr:$src2)))]>;
412 /// sse12_fp_scalar_int - SSE 1 & 2 scalar instructions intrinsics class
413 multiclass sse12_fp_scalar_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
414 string asm, string SSEVer, string FPSizeStr,
415 Operand memopr, ComplexPattern mem_cpat,
417 def rr_Int : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
419 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
420 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
421 [(set RC:$dst, (!nameconcat<Intrinsic>("int_x86_sse",
422 !strconcat(SSEVer, !strconcat("_",
423 !strconcat(OpcodeStr, FPSizeStr))))
424 RC:$src1, RC:$src2))]>;
425 def rm_Int : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, memopr:$src2),
427 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
428 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
429 [(set RC:$dst, (!nameconcat<Intrinsic>("int_x86_sse",
430 !strconcat(SSEVer, !strconcat("_",
431 !strconcat(OpcodeStr, FPSizeStr))))
432 RC:$src1, mem_cpat:$src2))]>;
435 /// sse12_fp_packed - SSE 1 & 2 packed instructions class
436 multiclass sse12_fp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
437 RegisterClass RC, ValueType vt,
438 X86MemOperand x86memop, PatFrag mem_frag,
439 Domain d, bit Is2Addr = 1> {
440 let isCommutable = 1 in
441 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
443 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
444 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
445 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], d>;
447 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
449 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
450 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
451 [(set RC:$dst, (OpNode RC:$src1, (mem_frag addr:$src2)))], d>;
454 /// sse12_fp_packed_logical_rm - SSE 1 & 2 packed instructions class
455 multiclass sse12_fp_packed_logical_rm<bits<8> opc, RegisterClass RC, Domain d,
456 string OpcodeStr, X86MemOperand x86memop,
457 list<dag> pat_rr, list<dag> pat_rm,
459 let isCommutable = 1 in
460 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
462 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
463 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
465 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
467 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
468 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
472 /// sse12_fp_packed_int - SSE 1 & 2 packed instructions intrinsics class
473 multiclass sse12_fp_packed_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
474 string asm, string SSEVer, string FPSizeStr,
475 X86MemOperand x86memop, PatFrag mem_frag,
476 Domain d, bit Is2Addr = 1> {
477 def rr_Int : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
479 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
480 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
481 [(set RC:$dst, (!nameconcat<Intrinsic>("int_x86_sse",
482 !strconcat(SSEVer, !strconcat("_",
483 !strconcat(OpcodeStr, FPSizeStr))))
484 RC:$src1, RC:$src2))], d>;
485 def rm_Int : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1,x86memop:$src2),
487 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
488 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
489 [(set RC:$dst, (!nameconcat<Intrinsic>("int_x86_sse",
490 !strconcat(SSEVer, !strconcat("_",
491 !strconcat(OpcodeStr, FPSizeStr))))
492 RC:$src1, (mem_frag addr:$src2)))], d>;
495 //===----------------------------------------------------------------------===//
496 // SSE 1 & 2 - Move Instructions
497 //===----------------------------------------------------------------------===//
499 class sse12_move_rr<RegisterClass RC, ValueType vt, string asm> :
500 SI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, RC:$src2), asm,
501 [(set (vt VR128:$dst), (movl VR128:$src1, (scalar_to_vector RC:$src2)))]>;
503 // Loading from memory automatically zeroing upper bits.
504 class sse12_move_rm<RegisterClass RC, X86MemOperand x86memop,
505 PatFrag mem_pat, string OpcodeStr> :
506 SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
507 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
508 [(set RC:$dst, (mem_pat addr:$src))]>;
510 // Move Instructions. Register-to-register movss/movsd is not used for FR32/64
511 // register copies because it's a partial register update; FsMOVAPSrr/FsMOVAPDrr
512 // is used instead. Register-to-register movss/movsd is not modeled as an
513 // INSERT_SUBREG because INSERT_SUBREG requires that the insert be implementable
514 // in terms of a copy, and just mentioned, we don't use movss/movsd for copies.
515 let isAsmParserOnly = 1 in {
516 def VMOVSSrr : sse12_move_rr<FR32, v4f32,
517 "movss\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XS, VEX_4V;
518 def VMOVSDrr : sse12_move_rr<FR64, v2f64,
519 "movsd\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XD, VEX_4V;
521 let canFoldAsLoad = 1, isReMaterializable = 1 in {
522 def VMOVSSrm : sse12_move_rm<FR32, f32mem, loadf32, "movss">, XS, VEX;
524 let AddedComplexity = 20 in
525 def VMOVSDrm : sse12_move_rm<FR64, f64mem, loadf64, "movsd">, XD, VEX;
529 let Constraints = "$src1 = $dst" in {
530 def MOVSSrr : sse12_move_rr<FR32, v4f32,
531 "movss\t{$src2, $dst|$dst, $src2}">, XS;
532 def MOVSDrr : sse12_move_rr<FR64, v2f64,
533 "movsd\t{$src2, $dst|$dst, $src2}">, XD;
536 let canFoldAsLoad = 1, isReMaterializable = 1 in {
537 def MOVSSrm : sse12_move_rm<FR32, f32mem, loadf32, "movss">, XS;
539 let AddedComplexity = 20 in
540 def MOVSDrm : sse12_move_rm<FR64, f64mem, loadf64, "movsd">, XD;
543 let AddedComplexity = 15 in {
544 // Extract the low 32-bit value from one vector and insert it into another.
545 def : Pat<(v4f32 (movl VR128:$src1, VR128:$src2)),
546 (MOVSSrr (v4f32 VR128:$src1),
547 (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>;
548 // Extract the low 64-bit value from one vector and insert it into another.
549 def : Pat<(v2f64 (movl VR128:$src1, VR128:$src2)),
550 (MOVSDrr (v2f64 VR128:$src1),
551 (EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>;
554 // Implicitly promote a 32-bit scalar to a vector.
555 def : Pat<(v4f32 (scalar_to_vector FR32:$src)),
556 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src, sub_ss)>;
557 // Implicitly promote a 64-bit scalar to a vector.
558 def : Pat<(v2f64 (scalar_to_vector FR64:$src)),
559 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src, sub_sd)>;
561 let AddedComplexity = 20 in {
562 // MOVSSrm zeros the high parts of the register; represent this
563 // with SUBREG_TO_REG.
564 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
565 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
566 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
567 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
568 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
569 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
570 // MOVSDrm zeros the high parts of the register; represent this
571 // with SUBREG_TO_REG.
572 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
573 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
574 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
575 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
576 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
577 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
578 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
579 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
580 def : Pat<(v2f64 (X86vzload addr:$src)),
581 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
584 // Store scalar value to memory.
585 def MOVSSmr : SSI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
586 "movss\t{$src, $dst|$dst, $src}",
587 [(store FR32:$src, addr:$dst)]>;
588 def MOVSDmr : SDI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
589 "movsd\t{$src, $dst|$dst, $src}",
590 [(store FR64:$src, addr:$dst)]>;
592 let isAsmParserOnly = 1 in {
593 def VMOVSSmr : SI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
594 "movss\t{$src, $dst|$dst, $src}",
595 [(store FR32:$src, addr:$dst)]>, XS, VEX_4V;
596 def VMOVSDmr : SI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
597 "movsd\t{$src, $dst|$dst, $src}",
598 [(store FR64:$src, addr:$dst)]>, XD, VEX_4V;
601 // Extract and store.
602 def : Pat<(store (f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
605 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
606 def : Pat<(store (f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
609 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
611 // Move Aligned/Unaligned floating point values
612 multiclass sse12_mov_packed<bits<8> opc, RegisterClass RC,
613 X86MemOperand x86memop, PatFrag ld_frag,
614 string asm, Domain d,
615 bit IsReMaterializable = 1> {
616 let neverHasSideEffects = 1 in
617 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
618 !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], d>;
619 let canFoldAsLoad = 1, isReMaterializable = IsReMaterializable in
620 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
621 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
622 [(set RC:$dst, (ld_frag addr:$src))], d>;
625 let isAsmParserOnly = 1 in {
626 defm VMOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
627 "movaps", SSEPackedSingle>, VEX;
628 defm VMOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
629 "movapd", SSEPackedDouble>, OpSize, VEX;
630 defm VMOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
631 "movups", SSEPackedSingle>, VEX;
632 defm VMOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
633 "movupd", SSEPackedDouble, 0>, OpSize, VEX;
635 defm VMOVAPSY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv8f32,
636 "movaps", SSEPackedSingle>, VEX;
637 defm VMOVAPDY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv4f64,
638 "movapd", SSEPackedDouble>, OpSize, VEX;
639 defm VMOVUPSY : sse12_mov_packed<0x10, VR256, f256mem, loadv8f32,
640 "movups", SSEPackedSingle>, VEX;
641 defm VMOVUPDY : sse12_mov_packed<0x10, VR256, f256mem, loadv4f64,
642 "movupd", SSEPackedDouble, 0>, OpSize, VEX;
644 defm MOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
645 "movaps", SSEPackedSingle>, TB;
646 defm MOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
647 "movapd", SSEPackedDouble>, TB, OpSize;
648 defm MOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
649 "movups", SSEPackedSingle>, TB;
650 defm MOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
651 "movupd", SSEPackedDouble, 0>, TB, OpSize;
653 let isAsmParserOnly = 1 in {
654 def VMOVAPSmr : VPSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
655 "movaps\t{$src, $dst|$dst, $src}",
656 [(alignedstore (v4f32 VR128:$src), addr:$dst)]>, VEX;
657 def VMOVAPDmr : VPDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
658 "movapd\t{$src, $dst|$dst, $src}",
659 [(alignedstore (v2f64 VR128:$src), addr:$dst)]>, VEX;
660 def VMOVUPSmr : VPSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
661 "movups\t{$src, $dst|$dst, $src}",
662 [(store (v4f32 VR128:$src), addr:$dst)]>, VEX;
663 def VMOVUPDmr : VPDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
664 "movupd\t{$src, $dst|$dst, $src}",
665 [(store (v2f64 VR128:$src), addr:$dst)]>, VEX;
666 def VMOVAPSYmr : VPSI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
667 "movaps\t{$src, $dst|$dst, $src}",
668 [(alignedstore (v8f32 VR256:$src), addr:$dst)]>, VEX;
669 def VMOVAPDYmr : VPDI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
670 "movapd\t{$src, $dst|$dst, $src}",
671 [(alignedstore (v4f64 VR256:$src), addr:$dst)]>, VEX;
672 def VMOVUPSYmr : VPSI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
673 "movups\t{$src, $dst|$dst, $src}",
674 [(store (v8f32 VR256:$src), addr:$dst)]>, VEX;
675 def VMOVUPDYmr : VPDI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
676 "movupd\t{$src, $dst|$dst, $src}",
677 [(store (v4f64 VR256:$src), addr:$dst)]>, VEX;
679 def MOVAPSmr : PSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
680 "movaps\t{$src, $dst|$dst, $src}",
681 [(alignedstore (v4f32 VR128:$src), addr:$dst)]>;
682 def MOVAPDmr : PDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
683 "movapd\t{$src, $dst|$dst, $src}",
684 [(alignedstore (v2f64 VR128:$src), addr:$dst)]>;
685 def MOVUPSmr : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
686 "movups\t{$src, $dst|$dst, $src}",
687 [(store (v4f32 VR128:$src), addr:$dst)]>;
688 def MOVUPDmr : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
689 "movupd\t{$src, $dst|$dst, $src}",
690 [(store (v2f64 VR128:$src), addr:$dst)]>;
692 // Intrinsic forms of MOVUPS/D load and store
693 let isAsmParserOnly = 1 in {
694 let canFoldAsLoad = 1, isReMaterializable = 1 in
695 def VMOVUPSrm_Int : VPSI<0x10, MRMSrcMem, (outs VR128:$dst),
697 "movups\t{$src, $dst|$dst, $src}",
698 [(set VR128:$dst, (int_x86_sse_loadu_ps addr:$src))]>, VEX;
699 def VMOVUPDrm_Int : VPDI<0x10, MRMSrcMem, (outs VR128:$dst),
701 "movupd\t{$src, $dst|$dst, $src}",
702 [(set VR128:$dst, (int_x86_sse2_loadu_pd addr:$src))]>, VEX;
703 def VMOVUPSmr_Int : VPSI<0x11, MRMDestMem, (outs),
704 (ins f128mem:$dst, VR128:$src),
705 "movups\t{$src, $dst|$dst, $src}",
706 [(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>, VEX;
707 def VMOVUPDmr_Int : VPDI<0x11, MRMDestMem, (outs),
708 (ins f128mem:$dst, VR128:$src),
709 "movupd\t{$src, $dst|$dst, $src}",
710 [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>, VEX;
712 let canFoldAsLoad = 1, isReMaterializable = 1 in
713 def MOVUPSrm_Int : PSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
714 "movups\t{$src, $dst|$dst, $src}",
715 [(set VR128:$dst, (int_x86_sse_loadu_ps addr:$src))]>;
716 def MOVUPDrm_Int : PDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
717 "movupd\t{$src, $dst|$dst, $src}",
718 [(set VR128:$dst, (int_x86_sse2_loadu_pd addr:$src))]>;
720 def MOVUPSmr_Int : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
721 "movups\t{$src, $dst|$dst, $src}",
722 [(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>;
723 def MOVUPDmr_Int : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
724 "movupd\t{$src, $dst|$dst, $src}",
725 [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>;
727 // Move Low/High packed floating point values
728 multiclass sse12_mov_hilo_packed<bits<8>opc, RegisterClass RC,
729 PatFrag mov_frag, string base_opc,
731 def PSrm : PI<opc, MRMSrcMem,
732 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
733 !strconcat(!strconcat(base_opc,"s"), asm_opr),
736 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))))],
737 SSEPackedSingle>, TB;
739 def PDrm : PI<opc, MRMSrcMem,
740 (outs RC:$dst), (ins RC:$src1, f64mem:$src2),
741 !strconcat(!strconcat(base_opc,"d"), asm_opr),
742 [(set RC:$dst, (v2f64 (mov_frag RC:$src1,
743 (scalar_to_vector (loadf64 addr:$src2)))))],
744 SSEPackedDouble>, TB, OpSize;
747 let isAsmParserOnly = 1, AddedComplexity = 20 in {
748 defm VMOVL : sse12_mov_hilo_packed<0x12, VR128, movlp, "movlp",
749 "\t{$src2, $src1, $dst|$dst, $src1, $src2}">, VEX_4V;
750 defm VMOVH : sse12_mov_hilo_packed<0x16, VR128, movlhps, "movhp",
751 "\t{$src2, $src1, $dst|$dst, $src1, $src2}">, VEX_4V;
753 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
754 defm MOVL : sse12_mov_hilo_packed<0x12, VR128, movlp, "movlp",
755 "\t{$src2, $dst|$dst, $src2}">;
756 defm MOVH : sse12_mov_hilo_packed<0x16, VR128, movlhps, "movhp",
757 "\t{$src2, $dst|$dst, $src2}">;
760 let isAsmParserOnly = 1 in {
761 def VMOVLPSmr : VPSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
762 "movlps\t{$src, $dst|$dst, $src}",
763 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
764 (iPTR 0))), addr:$dst)]>, VEX;
765 def VMOVLPDmr : VPDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
766 "movlpd\t{$src, $dst|$dst, $src}",
767 [(store (f64 (vector_extract (v2f64 VR128:$src),
768 (iPTR 0))), addr:$dst)]>, VEX;
770 def MOVLPSmr : PSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
771 "movlps\t{$src, $dst|$dst, $src}",
772 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
773 (iPTR 0))), addr:$dst)]>;
774 def MOVLPDmr : PDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
775 "movlpd\t{$src, $dst|$dst, $src}",
776 [(store (f64 (vector_extract (v2f64 VR128:$src),
777 (iPTR 0))), addr:$dst)]>;
779 // v2f64 extract element 1 is always custom lowered to unpack high to low
780 // and extract element 0 so the non-store version isn't too horrible.
781 let isAsmParserOnly = 1 in {
782 def VMOVHPSmr : VPSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
783 "movhps\t{$src, $dst|$dst, $src}",
784 [(store (f64 (vector_extract
785 (unpckh (bc_v2f64 (v4f32 VR128:$src)),
786 (undef)), (iPTR 0))), addr:$dst)]>,
788 def VMOVHPDmr : VPDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
789 "movhpd\t{$src, $dst|$dst, $src}",
790 [(store (f64 (vector_extract
791 (v2f64 (unpckh VR128:$src, (undef))),
792 (iPTR 0))), addr:$dst)]>,
795 def MOVHPSmr : PSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
796 "movhps\t{$src, $dst|$dst, $src}",
797 [(store (f64 (vector_extract
798 (unpckh (bc_v2f64 (v4f32 VR128:$src)),
799 (undef)), (iPTR 0))), addr:$dst)]>;
800 def MOVHPDmr : PDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
801 "movhpd\t{$src, $dst|$dst, $src}",
802 [(store (f64 (vector_extract
803 (v2f64 (unpckh VR128:$src, (undef))),
804 (iPTR 0))), addr:$dst)]>;
806 let isAsmParserOnly = 1, AddedComplexity = 20 in {
807 def VMOVLHPSrr : VPSI<0x16, MRMSrcReg, (outs VR128:$dst),
808 (ins VR128:$src1, VR128:$src2),
809 "movlhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
811 (v4f32 (movlhps VR128:$src1, VR128:$src2)))]>,
813 def VMOVHLPSrr : VPSI<0x12, MRMSrcReg, (outs VR128:$dst),
814 (ins VR128:$src1, VR128:$src2),
815 "movhlps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
817 (v4f32 (movhlps VR128:$src1, VR128:$src2)))]>,
820 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
821 def MOVLHPSrr : PSI<0x16, MRMSrcReg, (outs VR128:$dst),
822 (ins VR128:$src1, VR128:$src2),
823 "movlhps\t{$src2, $dst|$dst, $src2}",
825 (v4f32 (movlhps VR128:$src1, VR128:$src2)))]>;
826 def MOVHLPSrr : PSI<0x12, MRMSrcReg, (outs VR128:$dst),
827 (ins VR128:$src1, VR128:$src2),
828 "movhlps\t{$src2, $dst|$dst, $src2}",
830 (v4f32 (movhlps VR128:$src1, VR128:$src2)))]>;
833 def : Pat<(movlhps VR128:$src1, (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
834 (MOVHPSrm (v4i32 VR128:$src1), addr:$src2)>;
835 let AddedComplexity = 20 in {
836 def : Pat<(v4f32 (movddup VR128:$src, (undef))),
837 (MOVLHPSrr (v4f32 VR128:$src), (v4f32 VR128:$src))>;
838 def : Pat<(v2i64 (movddup VR128:$src, (undef))),
839 (MOVLHPSrr (v2i64 VR128:$src), (v2i64 VR128:$src))>;
842 //===----------------------------------------------------------------------===//
843 // SSE 1 & 2 - Conversion Instructions
844 //===----------------------------------------------------------------------===//
846 multiclass sse12_cvt_s<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
847 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
849 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
850 [(set DstRC:$dst, (OpNode SrcRC:$src))]>;
851 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
852 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))]>;
855 multiclass sse12_cvt_p<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
856 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
857 string asm, Domain d> {
858 def rr : PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
859 [(set DstRC:$dst, (OpNode SrcRC:$src))], d>;
860 def rm : PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
861 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))], d>;
864 multiclass sse12_vcvt_avx<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
865 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
867 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src),
869 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
870 (ins DstRC:$src1, x86memop:$src), asm, []>;
873 let isAsmParserOnly = 1 in {
874 defm VCVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
875 "cvttss2si\t{$src, $dst|$dst, $src}">, XS, VEX;
876 defm VCVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
877 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD, VEX;
878 defm VCVTSI2SS : sse12_vcvt_avx<0x2A, GR32, FR32, sint_to_fp, i32mem, loadi32,
879 "cvtsi2ss\t{$src, $src1, $dst|$dst, $src1, $src}">, XS,
881 defm VCVTSI2SD : sse12_vcvt_avx<0x2A, GR32, FR64, sint_to_fp, i32mem, loadi32,
882 "cvtsi2sd\t{$src, $src1, $dst|$dst, $src1, $src}">, XD,
886 defm CVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
887 "cvttss2si\t{$src, $dst|$dst, $src}">, XS;
888 defm CVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
889 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD;
890 defm CVTSI2SS : sse12_cvt_s<0x2A, GR32, FR32, sint_to_fp, i32mem, loadi32,
891 "cvtsi2ss\t{$src, $dst|$dst, $src}">, XS;
892 defm CVTSI2SD : sse12_cvt_s<0x2A, GR32, FR64, sint_to_fp, i32mem, loadi32,
893 "cvtsi2sd\t{$src, $dst|$dst, $src}">, XD;
895 // Conversion Instructions Intrinsics - Match intrinsics which expect MM
896 // and/or XMM operand(s).
897 multiclass sse12_cvt_pint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
898 Intrinsic Int, X86MemOperand x86memop, PatFrag ld_frag,
899 string asm, Domain d> {
900 def rr : PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
901 [(set DstRC:$dst, (Int SrcRC:$src))], d>;
902 def rm : PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
903 [(set DstRC:$dst, (Int (ld_frag addr:$src)))], d>;
906 multiclass sse12_cvt_sint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
907 Intrinsic Int, X86MemOperand x86memop, PatFrag ld_frag,
909 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
910 [(set DstRC:$dst, (Int SrcRC:$src))]>;
911 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
912 [(set DstRC:$dst, (Int (ld_frag addr:$src)))]>;
915 multiclass sse12_cvt_pint_3addr<bits<8> opc, RegisterClass SrcRC,
916 RegisterClass DstRC, Intrinsic Int, X86MemOperand x86memop,
917 PatFrag ld_frag, string asm, Domain d> {
918 def rr : PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src2),
919 asm, [(set DstRC:$dst, (Int DstRC:$src1, SrcRC:$src2))], d>;
920 def rm : PI<opc, MRMSrcMem, (outs DstRC:$dst),
921 (ins DstRC:$src1, x86memop:$src2), asm,
922 [(set DstRC:$dst, (Int DstRC:$src1, (ld_frag addr:$src2)))], d>;
925 multiclass sse12_cvt_sint_3addr<bits<8> opc, RegisterClass SrcRC,
926 RegisterClass DstRC, Intrinsic Int, X86MemOperand x86memop,
927 PatFrag ld_frag, string asm> {
928 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src2),
929 asm, [(set DstRC:$dst, (Int DstRC:$src1, SrcRC:$src2))]>;
930 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
931 (ins DstRC:$src1, x86memop:$src2), asm,
932 [(set DstRC:$dst, (Int DstRC:$src1, (ld_frag addr:$src2)))]>;
935 let isAsmParserOnly = 1 in {
936 defm Int_VCVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
937 f32mem, load, "cvtss2si\t{$src, $dst|$dst, $src}">, XS,
939 defm Int_VCVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
940 f128mem, load, "cvtsd2si\t{$src, $dst|$dst, $src}">, XD,
943 defm Int_CVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
944 f32mem, load, "cvtss2si\t{$src, $dst|$dst, $src}">, XS;
945 defm Int_CVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
946 f128mem, load, "cvtsd2si\t{$src, $dst|$dst, $src}">, XD;
949 let Constraints = "$src1 = $dst" in {
950 defm Int_CVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
951 int_x86_sse_cvtsi2ss, i32mem, loadi32,
952 "cvtsi2ss\t{$src2, $dst|$dst, $src2}">, XS;
953 defm Int_CVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
954 int_x86_sse2_cvtsi2sd, i32mem, loadi32,
955 "cvtsi2ss\t{$src2, $dst|$dst, $src2}">, XD;
958 // Instructions below don't have an AVX form.
959 defm Int_CVTPS2PI : sse12_cvt_pint<0x2D, VR128, VR64, int_x86_sse_cvtps2pi,
960 f64mem, load, "cvtps2pi\t{$src, $dst|$dst, $src}",
961 SSEPackedSingle>, TB;
962 defm Int_CVTPD2PI : sse12_cvt_pint<0x2D, VR128, VR64, int_x86_sse_cvtpd2pi,
963 f128mem, memop, "cvtpd2pi\t{$src, $dst|$dst, $src}",
964 SSEPackedDouble>, TB, OpSize;
965 defm Int_CVTTPS2PI : sse12_cvt_pint<0x2C, VR128, VR64, int_x86_sse_cvttps2pi,
966 f64mem, load, "cvttps2pi\t{$src, $dst|$dst, $src}",
967 SSEPackedSingle>, TB;
968 defm Int_CVTTPD2PI : sse12_cvt_pint<0x2C, VR128, VR64, int_x86_sse_cvttpd2pi,
969 f128mem, memop, "cvttpd2pi\t{$src, $dst|$dst, $src}",
970 SSEPackedDouble>, TB, OpSize;
971 defm Int_CVTPI2PD : sse12_cvt_pint<0x2A, VR64, VR128, int_x86_sse_cvtpi2pd,
972 i64mem, load, "cvtpi2pd\t{$src, $dst|$dst, $src}",
973 SSEPackedDouble>, TB, OpSize;
974 let Constraints = "$src1 = $dst" in {
975 defm Int_CVTPI2PS : sse12_cvt_pint_3addr<0x2A, VR64, VR128,
976 int_x86_sse_cvtpi2ps,
977 i64mem, load, "cvtpi2ps\t{$src2, $dst|$dst, $src2}",
978 SSEPackedSingle>, TB;
983 // Aliases for intrinsics
984 let isAsmParserOnly = 1, Pattern = []<dag> in {
985 defm Int_VCVTTSS2SI : sse12_cvt_sint_3addr<0x2C, VR128, GR32,
986 int_x86_sse_cvttss2si, f32mem, load,
987 "cvttss2si\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XS;
988 defm Int_VCVTTSD2SI : sse12_cvt_sint_3addr<0x2C, VR128, GR32,
989 int_x86_sse2_cvttsd2si, f128mem, load,
990 "cvttss2si\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XD;
992 defm Int_CVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
993 f32mem, load, "cvttss2si\t{$src, $dst|$dst, $src}">,
995 defm Int_CVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
996 f128mem, load, "cvttss2si\t{$src, $dst|$dst, $src}">,
999 let isAsmParserOnly = 1, Pattern = []<dag> in {
1000 defm VCVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load,
1001 "cvtss2si{l}\t{$src, $dst|$dst, $src}">, XS, VEX;
1002 defm VCVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, f128mem, load,
1003 "cvtdq2ps\t{$src, $dst|$dst, $src}",
1004 SSEPackedSingle>, TB, VEX;
1006 let Pattern = []<dag> in {
1007 defm CVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load /*dummy*/,
1008 "cvtss2si{l}\t{$src, $dst|$dst, $src}">, XS;
1009 defm CVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, f128mem, load /*dummy*/,
1010 "cvtdq2ps\t{$src, $dst|$dst, $src}",
1011 SSEPackedSingle>, TB; /* PD SSE3 form is avaiable */
1016 // Convert scalar double to scalar single
1017 let isAsmParserOnly = 1 in {
1018 def VCVTSD2SSrr : VSDI<0x5A, MRMSrcReg, (outs FR32:$dst),
1019 (ins FR64:$src1, FR64:$src2),
1020 "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
1022 def VCVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst),
1023 (ins FR64:$src1, f64mem:$src2),
1024 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1025 []>, XD, Requires<[HasAVX, HasSSE2, OptForSize]>, VEX_4V;
1027 def CVTSD2SSrr : SDI<0x5A, MRMSrcReg, (outs FR32:$dst), (ins FR64:$src),
1028 "cvtsd2ss\t{$src, $dst|$dst, $src}",
1029 [(set FR32:$dst, (fround FR64:$src))]>;
1030 def CVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src),
1031 "cvtsd2ss\t{$src, $dst|$dst, $src}",
1032 [(set FR32:$dst, (fround (loadf64 addr:$src)))]>, XD,
1033 Requires<[HasSSE2, OptForSize]>;
1035 let isAsmParserOnly = 1 in
1036 defm Int_VCVTSD2SS: sse12_cvt_sint_3addr<0x5A, VR128, VR128,
1037 int_x86_sse2_cvtsd2ss, f64mem, load,
1038 "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}">,
1040 let Constraints = "$src1 = $dst" in
1041 defm Int_CVTSD2SS: sse12_cvt_sint_3addr<0x5A, VR128, VR128,
1042 int_x86_sse2_cvtsd2ss, f64mem, load,
1043 "cvtsd2ss\t{$src2, $dst|$dst, $src2}">, XS;
1045 // Convert scalar single to scalar double
1046 let isAsmParserOnly = 1 in { // SSE2 instructions with XS prefix
1047 def VCVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst),
1048 (ins FR32:$src1, FR32:$src2),
1049 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1050 []>, XS, Requires<[HasAVX, HasSSE2]>, VEX_4V;
1051 def VCVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst),
1052 (ins FR32:$src1, f32mem:$src2),
1053 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1054 []>, XS, VEX_4V, Requires<[HasAVX, HasSSE2, OptForSize]>;
1056 def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src),
1057 "cvtss2sd\t{$src, $dst|$dst, $src}",
1058 [(set FR64:$dst, (fextend FR32:$src))]>, XS,
1059 Requires<[HasSSE2]>;
1060 def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src),
1061 "cvtss2sd\t{$src, $dst|$dst, $src}",
1062 [(set FR64:$dst, (extloadf32 addr:$src))]>, XS,
1063 Requires<[HasSSE2, OptForSize]>;
1065 let isAsmParserOnly = 1 in {
1066 def Int_VCVTSS2SDrr: I<0x5A, MRMSrcReg,
1067 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1068 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1069 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
1070 VR128:$src2))]>, XS, VEX_4V,
1071 Requires<[HasAVX, HasSSE2]>;
1072 def Int_VCVTSS2SDrm: I<0x5A, MRMSrcMem,
1073 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
1074 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1075 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
1076 (load addr:$src2)))]>, XS, VEX_4V,
1077 Requires<[HasAVX, HasSSE2]>;
1079 let Constraints = "$src1 = $dst" in { // SSE2 instructions with XS prefix
1080 def Int_CVTSS2SDrr: I<0x5A, MRMSrcReg,
1081 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1082 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
1083 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
1084 VR128:$src2))]>, XS,
1085 Requires<[HasSSE2]>;
1086 def Int_CVTSS2SDrm: I<0x5A, MRMSrcMem,
1087 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
1088 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
1089 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
1090 (load addr:$src2)))]>, XS,
1091 Requires<[HasSSE2]>;
1094 def : Pat<(extloadf32 addr:$src),
1095 (CVTSS2SDrr (MOVSSrm addr:$src))>,
1096 Requires<[HasSSE2, OptForSpeed]>;
1098 // Convert doubleword to packed single/double fp
1099 let isAsmParserOnly = 1 in { // SSE2 instructions without OpSize prefix
1100 def Int_VCVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1101 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
1102 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
1103 TB, VEX, Requires<[HasAVX, HasSSE2]>;
1104 def Int_VCVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
1105 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
1106 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps
1107 (bitconvert (memopv2i64 addr:$src))))]>,
1108 TB, VEX, Requires<[HasAVX, HasSSE2]>;
1110 def Int_CVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1111 "cvtdq2ps\t{$src, $dst|$dst, $src}",
1112 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
1113 TB, Requires<[HasSSE2]>;
1114 def Int_CVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
1115 "cvtdq2ps\t{$src, $dst|$dst, $src}",
1116 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps
1117 (bitconvert (memopv2i64 addr:$src))))]>,
1118 TB, Requires<[HasSSE2]>;
1120 // FIXME: why the non-intrinsic version is described as SSE3?
1121 let isAsmParserOnly = 1 in { // SSE2 instructions with XS prefix
1122 def Int_VCVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1123 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
1124 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
1125 XS, VEX, Requires<[HasAVX, HasSSE2]>;
1126 def Int_VCVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
1127 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
1128 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd
1129 (bitconvert (memopv2i64 addr:$src))))]>,
1130 XS, VEX, Requires<[HasAVX, HasSSE2]>;
1132 def Int_CVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1133 "cvtdq2pd\t{$src, $dst|$dst, $src}",
1134 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
1135 XS, Requires<[HasSSE2]>;
1136 def Int_CVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
1137 "cvtdq2pd\t{$src, $dst|$dst, $src}",
1138 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd
1139 (bitconvert (memopv2i64 addr:$src))))]>,
1140 XS, Requires<[HasSSE2]>;
1142 // Convert packed single/double fp to doubleword
1143 let isAsmParserOnly = 1 in {
1144 def VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1145 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1146 def VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1147 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1149 def CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1150 "cvtps2dq\t{$src, $dst|$dst, $src}", []>;
1151 def CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1152 "cvtps2dq\t{$src, $dst|$dst, $src}", []>;
1154 let isAsmParserOnly = 1 in {
1155 def Int_VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1156 "cvtps2dq\t{$src, $dst|$dst, $src}",
1157 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>,
1159 def Int_VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst),
1161 "cvtps2dq\t{$src, $dst|$dst, $src}",
1162 [(set VR128:$dst, (int_x86_sse2_cvtps2dq
1163 (memop addr:$src)))]>, VEX;
1165 def Int_CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1166 "cvtps2dq\t{$src, $dst|$dst, $src}",
1167 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>;
1168 def Int_CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1169 "cvtps2dq\t{$src, $dst|$dst, $src}",
1170 [(set VR128:$dst, (int_x86_sse2_cvtps2dq
1171 (memop addr:$src)))]>;
1173 let isAsmParserOnly = 1 in { // SSE2 packed instructions with XD prefix
1174 def Int_VCVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1175 "vcvtpd2dq\t{$src, $dst|$dst, $src}",
1176 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
1177 XD, VEX, Requires<[HasAVX, HasSSE2]>;
1178 def Int_VCVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1179 "vcvtpd2dq\t{$src, $dst|$dst, $src}",
1180 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
1181 (memop addr:$src)))]>,
1182 XD, VEX, Requires<[HasAVX, HasSSE2]>;
1184 def Int_CVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1185 "cvtpd2dq\t{$src, $dst|$dst, $src}",
1186 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
1187 XD, Requires<[HasSSE2]>;
1188 def Int_CVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1189 "cvtpd2dq\t{$src, $dst|$dst, $src}",
1190 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
1191 (memop addr:$src)))]>,
1192 XD, Requires<[HasSSE2]>;
1195 // Convert with truncation packed single/double fp to doubleword
1196 let isAsmParserOnly = 1 in { // SSE2 packed instructions with XS prefix
1197 def VCVTTPS2DQrr : VSSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1198 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1199 def VCVTTPS2DQrm : VSSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1200 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1202 def CVTTPS2DQrr : SSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1203 "cvttps2dq\t{$src, $dst|$dst, $src}", []>;
1204 def CVTTPS2DQrm : SSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1205 "cvttps2dq\t{$src, $dst|$dst, $src}", []>;
1208 let isAsmParserOnly = 1 in {
1209 def Int_VCVTTPS2DQrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1210 "vcvttps2dq\t{$src, $dst|$dst, $src}",
1212 (int_x86_sse2_cvttps2dq VR128:$src))]>,
1213 XS, VEX, Requires<[HasAVX, HasSSE2]>;
1214 def Int_VCVTTPS2DQrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1215 "vcvttps2dq\t{$src, $dst|$dst, $src}",
1216 [(set VR128:$dst, (int_x86_sse2_cvttps2dq
1217 (memop addr:$src)))]>,
1218 XS, VEX, Requires<[HasAVX, HasSSE2]>;
1220 def Int_CVTTPS2DQrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1221 "cvttps2dq\t{$src, $dst|$dst, $src}",
1223 (int_x86_sse2_cvttps2dq VR128:$src))]>,
1224 XS, Requires<[HasSSE2]>;
1225 def Int_CVTTPS2DQrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1226 "cvttps2dq\t{$src, $dst|$dst, $src}",
1227 [(set VR128:$dst, (int_x86_sse2_cvttps2dq
1228 (memop addr:$src)))]>,
1229 XS, Requires<[HasSSE2]>;
1231 let isAsmParserOnly = 1 in {
1232 def Int_VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst),
1234 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1235 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>,
1237 def Int_VCVTTPD2DQrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst),
1239 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1240 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
1241 (memop addr:$src)))]>, VEX;
1243 def Int_CVTTPD2DQrr : PDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1244 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1245 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>;
1246 def Int_CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst),(ins f128mem:$src),
1247 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1248 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
1249 (memop addr:$src)))]>;
1251 // Convert packed single to packed double
1252 let isAsmParserOnly = 1 in { // SSE2 instructions without OpSize prefix
1253 def VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1254 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX,
1256 def VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1257 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX,
1260 def CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1261 "cvtps2pd\t{$src, $dst|$dst, $src}", []>, TB;
1262 def CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1263 "cvtps2pd\t{$src, $dst|$dst, $src}", []>, TB;
1265 let isAsmParserOnly = 1 in {
1266 def Int_VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1267 "cvtps2pd\t{$src, $dst|$dst, $src}",
1268 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
1269 VEX, Requires<[HasAVX, HasSSE2]>;
1270 def Int_VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1271 "cvtps2pd\t{$src, $dst|$dst, $src}",
1272 [(set VR128:$dst, (int_x86_sse2_cvtps2pd
1273 (load addr:$src)))]>,
1274 VEX, Requires<[HasAVX, HasSSE2]>;
1276 def Int_CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1277 "cvtps2pd\t{$src, $dst|$dst, $src}",
1278 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
1279 TB, Requires<[HasSSE2]>;
1280 def Int_CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1281 "cvtps2pd\t{$src, $dst|$dst, $src}",
1282 [(set VR128:$dst, (int_x86_sse2_cvtps2pd
1283 (load addr:$src)))]>,
1284 TB, Requires<[HasSSE2]>;
1286 // Convert packed double to packed single
1287 let isAsmParserOnly = 1 in {
1288 def VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1289 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>, VEX;
1290 // FIXME: the memory form of this instruction should described using
1291 // use extra asm syntax
1293 def CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1294 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>;
1295 def CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1296 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>;
1299 let isAsmParserOnly = 1 in {
1300 def Int_VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1301 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1302 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
1303 def Int_VCVTPD2PSrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst),
1305 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1306 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps
1307 (memop addr:$src)))]>;
1309 def Int_CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1310 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1311 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
1312 def Int_CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1313 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1314 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps
1315 (memop addr:$src)))]>;
1317 //===----------------------------------------------------------------------===//
1318 // SSE 1 & 2 - Compare Instructions
1319 //===----------------------------------------------------------------------===//
1321 // sse12_cmp_scalar - sse 1 & 2 compare scalar instructions
1322 multiclass sse12_cmp_scalar<RegisterClass RC, X86MemOperand x86memop,
1323 string asm, string asm_alt> {
1324 def rr : SIi8<0xC2, MRMSrcReg,
1325 (outs RC:$dst), (ins RC:$src1, RC:$src, SSECC:$cc),
1328 def rm : SIi8<0xC2, MRMSrcMem,
1329 (outs RC:$dst), (ins RC:$src1, x86memop:$src, SSECC:$cc),
1331 // Accept explicit immediate argument form instead of comparison code.
1332 let isAsmParserOnly = 1 in {
1333 def rr_alt : SIi8<0xC2, MRMSrcReg,
1334 (outs RC:$dst), (ins RC:$src1, RC:$src, i8imm:$src2),
1337 def rm_alt : SIi8<0xC2, MRMSrcMem,
1338 (outs RC:$dst), (ins RC:$src1, x86memop:$src, i8imm:$src2),
1343 let neverHasSideEffects = 1, isAsmParserOnly = 1 in {
1344 defm VCMPSS : sse12_cmp_scalar<FR32, f32mem,
1345 "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}",
1346 "cmpss\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}">,
1348 defm VCMPSD : sse12_cmp_scalar<FR64, f64mem,
1349 "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}",
1350 "cmpsd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}">,
1354 let Constraints = "$src1 = $dst", neverHasSideEffects = 1 in {
1355 defm CMPSS : sse12_cmp_scalar<FR32, f32mem,
1356 "cmp${cc}ss\t{$src, $dst|$dst, $src}",
1357 "cmpss\t{$src2, $src, $dst|$dst, $src, $src2}">, XS;
1358 defm CMPSD : sse12_cmp_scalar<FR64, f64mem,
1359 "cmp${cc}sd\t{$src, $dst|$dst, $src}",
1360 "cmpsd\t{$src2, $src, $dst|$dst, $src, $src2}">, XD;
1363 multiclass sse12_cmp_scalar_int<RegisterClass RC, X86MemOperand x86memop,
1364 Intrinsic Int, string asm> {
1365 def rr : SIi8<0xC2, MRMSrcReg, (outs VR128:$dst),
1366 (ins VR128:$src1, VR128:$src, SSECC:$cc), asm,
1367 [(set VR128:$dst, (Int VR128:$src1,
1368 VR128:$src, imm:$cc))]>;
1369 def rm : SIi8<0xC2, MRMSrcMem, (outs VR128:$dst),
1370 (ins VR128:$src1, f32mem:$src, SSECC:$cc), asm,
1371 [(set VR128:$dst, (Int VR128:$src1,
1372 (load addr:$src), imm:$cc))]>;
1375 // Aliases to match intrinsics which expect XMM operand(s).
1376 let isAsmParserOnly = 1 in {
1377 defm Int_VCMPSS : sse12_cmp_scalar_int<VR128, f32mem, int_x86_sse_cmp_ss,
1378 "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}">,
1380 defm Int_VCMPSD : sse12_cmp_scalar_int<VR128, f64mem, int_x86_sse2_cmp_sd,
1381 "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}">,
1384 let Constraints = "$src1 = $dst" in {
1385 defm Int_CMPSS : sse12_cmp_scalar_int<VR128, f32mem, int_x86_sse_cmp_ss,
1386 "cmp${cc}ss\t{$src, $dst|$dst, $src}">, XS;
1387 defm Int_CMPSD : sse12_cmp_scalar_int<VR128, f64mem, int_x86_sse2_cmp_sd,
1388 "cmp${cc}sd\t{$src, $dst|$dst, $src}">, XD;
1392 // sse12_ord_cmp - Unordered/Ordered scalar fp compare and set EFLAGS
1393 multiclass sse12_ord_cmp<bits<8> opc, RegisterClass RC, SDNode OpNode,
1394 ValueType vt, X86MemOperand x86memop,
1395 PatFrag ld_frag, string OpcodeStr, Domain d> {
1396 def rr: PI<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
1397 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
1398 [(set EFLAGS, (OpNode (vt RC:$src1), RC:$src2))], d>;
1399 def rm: PI<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
1400 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
1401 [(set EFLAGS, (OpNode (vt RC:$src1),
1402 (ld_frag addr:$src2)))], d>;
1405 let Defs = [EFLAGS] in {
1406 let isAsmParserOnly = 1 in {
1407 defm VUCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
1408 "ucomiss", SSEPackedSingle>, VEX;
1409 defm VUCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
1410 "ucomisd", SSEPackedDouble>, OpSize, VEX;
1411 let Pattern = []<dag> in {
1412 defm VCOMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
1413 "comiss", SSEPackedSingle>, VEX;
1414 defm VCOMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
1415 "comisd", SSEPackedDouble>, OpSize, VEX;
1418 defm Int_VUCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
1419 load, "ucomiss", SSEPackedSingle>, VEX;
1420 defm Int_VUCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
1421 load, "ucomisd", SSEPackedDouble>, OpSize, VEX;
1423 defm Int_VCOMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem,
1424 load, "comiss", SSEPackedSingle>, VEX;
1425 defm Int_VCOMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem,
1426 load, "comisd", SSEPackedDouble>, OpSize, VEX;
1428 defm UCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
1429 "ucomiss", SSEPackedSingle>, TB;
1430 defm UCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
1431 "ucomisd", SSEPackedDouble>, TB, OpSize;
1433 let Pattern = []<dag> in {
1434 defm COMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
1435 "comiss", SSEPackedSingle>, TB;
1436 defm COMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
1437 "comisd", SSEPackedDouble>, TB, OpSize;
1440 defm Int_UCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
1441 load, "ucomiss", SSEPackedSingle>, TB;
1442 defm Int_UCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
1443 load, "ucomisd", SSEPackedDouble>, TB, OpSize;
1445 defm Int_COMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem, load,
1446 "comiss", SSEPackedSingle>, TB;
1447 defm Int_COMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem, load,
1448 "comisd", SSEPackedDouble>, TB, OpSize;
1449 } // Defs = [EFLAGS]
1451 // sse12_cmp_packed - sse 1 & 2 compared packed instructions
1452 multiclass sse12_cmp_packed<RegisterClass RC, X86MemOperand x86memop,
1453 Intrinsic Int, string asm, string asm_alt,
1455 def rri : PIi8<0xC2, MRMSrcReg,
1456 (outs RC:$dst), (ins RC:$src1, RC:$src, SSECC:$cc), asm,
1457 [(set RC:$dst, (Int RC:$src1, RC:$src, imm:$cc))], d>;
1458 def rmi : PIi8<0xC2, MRMSrcMem,
1459 (outs RC:$dst), (ins RC:$src1, f128mem:$src, SSECC:$cc), asm,
1460 [(set RC:$dst, (Int RC:$src1, (memop addr:$src), imm:$cc))], d>;
1461 // Accept explicit immediate argument form instead of comparison code.
1462 let isAsmParserOnly = 1 in {
1463 def rri_alt : PIi8<0xC2, MRMSrcReg,
1464 (outs RC:$dst), (ins RC:$src1, RC:$src, i8imm:$src2),
1466 def rmi_alt : PIi8<0xC2, MRMSrcMem,
1467 (outs RC:$dst), (ins RC:$src1, f128mem:$src, i8imm:$src2),
1472 let isAsmParserOnly = 1 in {
1473 defm VCMPPS : sse12_cmp_packed<VR128, f128mem, int_x86_sse_cmp_ps,
1474 "cmp${cc}ps\t{$src, $src1, $dst|$dst, $src1, $src}",
1475 "cmpps\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1476 SSEPackedSingle>, VEX_4V;
1477 defm VCMPPD : sse12_cmp_packed<VR128, f128mem, int_x86_sse2_cmp_pd,
1478 "cmp${cc}pd\t{$src, $src1, $dst|$dst, $src1, $src}",
1479 "cmppd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1480 SSEPackedDouble>, OpSize, VEX_4V;
1482 let Constraints = "$src1 = $dst" in {
1483 defm CMPPS : sse12_cmp_packed<VR128, f128mem, int_x86_sse_cmp_ps,
1484 "cmp${cc}ps\t{$src, $dst|$dst, $src}",
1485 "cmpps\t{$src2, $src, $dst|$dst, $src, $src2}",
1486 SSEPackedSingle>, TB;
1487 defm CMPPD : sse12_cmp_packed<VR128, f128mem, int_x86_sse2_cmp_pd,
1488 "cmp${cc}pd\t{$src, $dst|$dst, $src}",
1489 "cmppd\t{$src2, $src, $dst|$dst, $src, $src2}",
1490 SSEPackedDouble>, TB, OpSize;
1493 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
1494 (CMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
1495 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), (memop addr:$src2), imm:$cc)),
1496 (CMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
1497 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
1498 (CMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
1499 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), (memop addr:$src2), imm:$cc)),
1500 (CMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
1502 //===----------------------------------------------------------------------===//
1503 // SSE 1 & 2 - Shuffle Instructions
1504 //===----------------------------------------------------------------------===//
1506 /// sse12_shuffle - sse 1 & 2 shuffle instructions
1507 multiclass sse12_shuffle<RegisterClass RC, X86MemOperand x86memop,
1508 ValueType vt, string asm, PatFrag mem_frag,
1509 Domain d, bit IsConvertibleToThreeAddress = 0> {
1510 def rmi : PIi8<0xC6, MRMSrcMem, (outs VR128:$dst),
1511 (ins VR128:$src1, f128mem:$src2, i8imm:$src3), asm,
1512 [(set VR128:$dst, (vt (shufp:$src3
1513 VR128:$src1, (mem_frag addr:$src2))))], d>;
1514 let isConvertibleToThreeAddress = IsConvertibleToThreeAddress in
1515 def rri : PIi8<0xC6, MRMSrcReg, (outs VR128:$dst),
1516 (ins VR128:$src1, VR128:$src2, i8imm:$src3), asm,
1518 (vt (shufp:$src3 VR128:$src1, VR128:$src2)))], d>;
1521 let isAsmParserOnly = 1 in {
1522 defm VSHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
1523 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
1524 memopv4f32, SSEPackedSingle>, VEX_4V;
1525 defm VSHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
1526 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src2, $src2, $src3}",
1527 memopv2f64, SSEPackedDouble>, OpSize, VEX_4V;
1530 let Constraints = "$src1 = $dst" in {
1531 defm SHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
1532 "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1533 memopv4f32, SSEPackedSingle, 1 /* cvt to pshufd */>,
1535 defm SHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
1536 "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1537 memopv2f64, SSEPackedDouble>, TB, OpSize;
1540 //===----------------------------------------------------------------------===//
1541 // SSE 1 & 2 - Unpack Instructions
1542 //===----------------------------------------------------------------------===//
1544 /// sse12_unpack_interleave - sse 1 & 2 unpack and interleave
1545 multiclass sse12_unpack_interleave<bits<8> opc, PatFrag OpNode, ValueType vt,
1546 PatFrag mem_frag, RegisterClass RC,
1547 X86MemOperand x86memop, string asm,
1549 def rr : PI<opc, MRMSrcReg,
1550 (outs RC:$dst), (ins RC:$src1, RC:$src2),
1552 (vt (OpNode RC:$src1, RC:$src2)))], d>;
1553 def rm : PI<opc, MRMSrcMem,
1554 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
1556 (vt (OpNode RC:$src1,
1557 (mem_frag addr:$src2))))], d>;
1560 let AddedComplexity = 10 in {
1561 let isAsmParserOnly = 1 in {
1562 defm VUNPCKHPS: sse12_unpack_interleave<0x15, unpckh, v4f32, memopv4f32,
1563 VR128, f128mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1564 SSEPackedSingle>, VEX_4V;
1565 defm VUNPCKHPD: sse12_unpack_interleave<0x15, unpckh, v2f64, memopv2f64,
1566 VR128, f128mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1567 SSEPackedDouble>, OpSize, VEX_4V;
1568 defm VUNPCKLPS: sse12_unpack_interleave<0x14, unpckl, v4f32, memopv4f32,
1569 VR128, f128mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1570 SSEPackedSingle>, VEX_4V;
1571 defm VUNPCKLPD: sse12_unpack_interleave<0x14, unpckl, v2f64, memopv2f64,
1572 VR128, f128mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1573 SSEPackedDouble>, OpSize, VEX_4V;
1575 defm VUNPCKHPSY: sse12_unpack_interleave<0x15, unpckh, v8f32, memopv8f32,
1576 VR256, f256mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1577 SSEPackedSingle>, VEX_4V;
1578 defm VUNPCKHPDY: sse12_unpack_interleave<0x15, unpckh, v4f64, memopv4f64,
1579 VR256, f256mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1580 SSEPackedDouble>, OpSize, VEX_4V;
1581 defm VUNPCKLPSY: sse12_unpack_interleave<0x14, unpckl, v8f32, memopv8f32,
1582 VR256, f256mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1583 SSEPackedSingle>, VEX_4V;
1584 defm VUNPCKLPDY: sse12_unpack_interleave<0x14, unpckl, v4f64, memopv4f64,
1585 VR256, f256mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1586 SSEPackedDouble>, OpSize, VEX_4V;
1589 let Constraints = "$src1 = $dst" in {
1590 defm UNPCKHPS: sse12_unpack_interleave<0x15, unpckh, v4f32, memopv4f32,
1591 VR128, f128mem, "unpckhps\t{$src2, $dst|$dst, $src2}",
1592 SSEPackedSingle>, TB;
1593 defm UNPCKHPD: sse12_unpack_interleave<0x15, unpckh, v2f64, memopv2f64,
1594 VR128, f128mem, "unpckhpd\t{$src2, $dst|$dst, $src2}",
1595 SSEPackedDouble>, TB, OpSize;
1596 defm UNPCKLPS: sse12_unpack_interleave<0x14, unpckl, v4f32, memopv4f32,
1597 VR128, f128mem, "unpcklps\t{$src2, $dst|$dst, $src2}",
1598 SSEPackedSingle>, TB;
1599 defm UNPCKLPD: sse12_unpack_interleave<0x14, unpckl, v2f64, memopv2f64,
1600 VR128, f128mem, "unpcklpd\t{$src2, $dst|$dst, $src2}",
1601 SSEPackedDouble>, TB, OpSize;
1602 } // Constraints = "$src1 = $dst"
1603 } // AddedComplexity
1605 //===----------------------------------------------------------------------===//
1606 // SSE 1 & 2 - Extract Floating-Point Sign mask
1607 //===----------------------------------------------------------------------===//
1609 /// sse12_extr_sign_mask - sse 1 & 2 unpack and interleave
1610 multiclass sse12_extr_sign_mask<RegisterClass RC, Intrinsic Int, string asm,
1612 def rr : PI<0x50, MRMSrcReg, (outs GR32:$dst), (ins RC:$src),
1613 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
1614 [(set GR32:$dst, (Int RC:$src))], d>;
1618 defm MOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps, "movmskps",
1619 SSEPackedSingle>, TB;
1620 defm MOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd, "movmskpd",
1621 SSEPackedDouble>, TB, OpSize;
1623 let isAsmParserOnly = 1 in {
1624 defm VMOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps,
1625 "movmskps", SSEPackedSingle>, VEX;
1626 defm VMOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd,
1627 "movmskpd", SSEPackedDouble>, OpSize,
1629 // FIXME: merge with multiclass above when the intrinsics come.
1630 def VMOVMSKPSYrr : PI<0x50, MRMSrcReg, (outs GR32:$dst), (ins VR256:$src),
1631 "movmskps\t{$src, $dst|$dst, $src}", [], SSEPackedSingle>, VEX;
1632 def VMOVMSKPDYrr : PI<0x50, MRMSrcReg, (outs GR32:$dst), (ins VR256:$src),
1633 "movmskpd\t{$src, $dst|$dst, $src}", [], SSEPackedDouble>, OpSize,
1637 //===----------------------------------------------------------------------===//
1638 // SSE 1 & 2 - Misc aliasing of packed SSE 1 & 2 instructions
1639 //===----------------------------------------------------------------------===//
1641 // Aliases of packed SSE1 & SSE2 instructions for scalar use. These all have
1642 // names that start with 'Fs'.
1644 // Alias instructions that map fld0 to pxor for sse.
1645 let isReMaterializable = 1, isAsCheapAsAMove = 1, isCodeGenOnly = 1,
1646 canFoldAsLoad = 1 in {
1647 // FIXME: Set encoding to pseudo!
1648 def FsFLD0SS : I<0xEF, MRMInitReg, (outs FR32:$dst), (ins), "",
1649 [(set FR32:$dst, fp32imm0)]>,
1650 Requires<[HasSSE1]>, TB, OpSize;
1651 def FsFLD0SD : I<0xEF, MRMInitReg, (outs FR64:$dst), (ins), "",
1652 [(set FR64:$dst, fpimm0)]>,
1653 Requires<[HasSSE2]>, TB, OpSize;
1656 // Alias instruction to do FR32 or FR64 reg-to-reg copy using movaps. Upper
1657 // bits are disregarded.
1658 let neverHasSideEffects = 1 in {
1659 def FsMOVAPSrr : PSI<0x28, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
1660 "movaps\t{$src, $dst|$dst, $src}", []>;
1661 def FsMOVAPDrr : PDI<0x28, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
1662 "movapd\t{$src, $dst|$dst, $src}", []>;
1665 // Alias instruction to load FR32 or FR64 from f128mem using movaps. Upper
1666 // bits are disregarded.
1667 let canFoldAsLoad = 1, isReMaterializable = 1 in {
1668 def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
1669 "movaps\t{$src, $dst|$dst, $src}",
1670 [(set FR32:$dst, (alignedloadfsf32 addr:$src))]>;
1671 def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
1672 "movapd\t{$src, $dst|$dst, $src}",
1673 [(set FR64:$dst, (alignedloadfsf64 addr:$src))]>;
1676 //===----------------------------------------------------------------------===//
1677 // SSE 1 & 2 - Logical Instructions
1678 //===----------------------------------------------------------------------===//
1680 /// sse12_fp_alias_pack_logical - SSE 1 & 2 aliased packed FP logical ops
1682 multiclass sse12_fp_alias_pack_logical<bits<8> opc, string OpcodeStr,
1684 let isAsmParserOnly = 1 in {
1685 defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
1686 FR32, f32, f128mem, memopfsf32, SSEPackedSingle, 0>, VEX_4V;
1688 defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
1689 FR64, f64, f128mem, memopfsf64, SSEPackedDouble, 0>, OpSize, VEX_4V;
1692 let Constraints = "$src1 = $dst" in {
1693 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, FR32,
1694 f32, f128mem, memopfsf32, SSEPackedSingle>, TB;
1696 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, FR64,
1697 f64, f128mem, memopfsf64, SSEPackedDouble>, TB, OpSize;
1701 // Alias bitwise logical operations using SSE logical ops on packed FP values.
1702 let mayLoad = 0 in {
1703 defm FsAND : sse12_fp_alias_pack_logical<0x54, "and", X86fand>;
1704 defm FsOR : sse12_fp_alias_pack_logical<0x56, "or", X86for>;
1705 defm FsXOR : sse12_fp_alias_pack_logical<0x57, "xor", X86fxor>;
1708 let neverHasSideEffects = 1, Pattern = []<dag>, isCommutable = 0 in
1709 defm FsANDN : sse12_fp_alias_pack_logical<0x55, "andn", undef>;
1711 /// sse12_fp_packed_logical - SSE 1 & 2 packed FP logical ops
1713 multiclass sse12_fp_packed_logical<bits<8> opc, string OpcodeStr,
1714 SDNode OpNode, int HasPat = 0,
1715 list<list<dag>> Pattern = []> {
1716 let isAsmParserOnly = 1 in {
1717 defm V#NAME#PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
1718 !strconcat(OpcodeStr, "ps"), f128mem,
1719 !if(HasPat, Pattern[0], // rr
1720 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1,
1722 !if(HasPat, Pattern[2], // rm
1723 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
1724 (memopv2i64 addr:$src2)))]), 0>,
1727 defm V#NAME#PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
1728 !strconcat(OpcodeStr, "pd"), f128mem,
1729 !if(HasPat, Pattern[1], // rr
1730 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1733 !if(HasPat, Pattern[3], // rm
1734 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1735 (memopv2i64 addr:$src2)))]), 0>,
1738 let Constraints = "$src1 = $dst" in {
1739 defm PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
1740 !strconcat(OpcodeStr, "ps"), f128mem,
1741 !if(HasPat, Pattern[0], // rr
1742 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1,
1744 !if(HasPat, Pattern[2], // rm
1745 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
1746 (memopv2i64 addr:$src2)))])>, TB;
1748 defm PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
1749 !strconcat(OpcodeStr, "pd"), f128mem,
1750 !if(HasPat, Pattern[1], // rr
1751 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1754 !if(HasPat, Pattern[3], // rm
1755 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1756 (memopv2i64 addr:$src2)))])>,
1761 defm AND : sse12_fp_packed_logical<0x54, "and", and>;
1762 defm OR : sse12_fp_packed_logical<0x56, "or", or>;
1763 defm XOR : sse12_fp_packed_logical<0x57, "xor", xor>;
1764 let isCommutable = 0 in
1765 defm ANDN : sse12_fp_packed_logical<0x55, "andn", undef /* dummy */, 1, [
1767 [(set VR128:$dst, (v2i64 (and (xor VR128:$src1,
1768 (bc_v2i64 (v4i32 immAllOnesV))),
1771 [(set VR128:$dst, (and (vnot (bc_v2i64 (v2f64 VR128:$src1))),
1772 (bc_v2i64 (v2f64 VR128:$src2))))],
1774 [(set VR128:$dst, (v2i64 (and (xor (bc_v2i64 (v4f32 VR128:$src1)),
1775 (bc_v2i64 (v4i32 immAllOnesV))),
1776 (memopv2i64 addr:$src2))))],
1778 [(set VR128:$dst, (and (vnot (bc_v2i64 (v2f64 VR128:$src1))),
1779 (memopv2i64 addr:$src2)))]]>;
1781 //===----------------------------------------------------------------------===//
1782 // SSE 1 & 2 - Arithmetic Instructions
1783 //===----------------------------------------------------------------------===//
1785 /// basic_sse12_fp_binop_xxx - SSE 1 & 2 binops come in both scalar and
1788 /// In addition, we also have a special variant of the scalar form here to
1789 /// represent the associated intrinsic operation. This form is unlike the
1790 /// plain scalar form, in that it takes an entire vector (instead of a scalar)
1791 /// and leaves the top elements unmodified (therefore these cannot be commuted).
1793 /// These three forms can each be reg+reg or reg+mem.
1795 multiclass basic_sse12_fp_binop_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
1797 defm SS : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"),
1798 OpNode, FR32, f32mem, Is2Addr>, XS;
1799 defm SD : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd"),
1800 OpNode, FR64, f64mem, Is2Addr>, XD;
1803 multiclass basic_sse12_fp_binop_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
1805 let mayLoad = 0 in {
1806 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR128,
1807 v4f32, f128mem, memopv4f32, SSEPackedSingle, Is2Addr>, TB;
1808 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR128,
1809 v2f64, f128mem, memopv2f64, SSEPackedDouble, Is2Addr>, TB, OpSize;
1813 multiclass basic_sse12_fp_binop_s_int<bits<8> opc, string OpcodeStr,
1815 defm SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
1816 !strconcat(OpcodeStr, "ss"), "", "_ss", ssmem, sse_load_f32, Is2Addr>, XS;
1817 defm SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
1818 !strconcat(OpcodeStr, "sd"), "2", "_sd", sdmem, sse_load_f64, Is2Addr>, XD;
1821 multiclass basic_sse12_fp_binop_p_int<bits<8> opc, string OpcodeStr,
1823 defm PS : sse12_fp_packed_int<opc, OpcodeStr, VR128,
1824 !strconcat(OpcodeStr, "ps"), "", "_ps", f128mem, memopv4f32,
1825 SSEPackedSingle, Is2Addr>, TB;
1827 defm PD : sse12_fp_packed_int<opc, OpcodeStr, VR128,
1828 !strconcat(OpcodeStr, "pd"), "2", "_pd", f128mem, memopv2f64,
1829 SSEPackedDouble, Is2Addr>, TB, OpSize;
1832 // Arithmetic instructions
1833 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
1834 defm VADD : basic_sse12_fp_binop_s<0x58, "add", fadd, 0>,
1835 basic_sse12_fp_binop_p<0x58, "add", fadd, 0>, VEX_4V;
1836 defm VMUL : basic_sse12_fp_binop_s<0x59, "mul", fmul, 0>,
1837 basic_sse12_fp_binop_p<0x59, "mul", fmul, 0>, VEX_4V;
1839 let isCommutable = 0 in {
1840 defm VSUB : basic_sse12_fp_binop_s<0x5C, "sub", fsub, 0>,
1841 basic_sse12_fp_binop_p<0x5C, "sub", fsub, 0>, VEX_4V;
1842 defm VDIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv, 0>,
1843 basic_sse12_fp_binop_p<0x5E, "div", fdiv, 0>, VEX_4V;
1844 defm VMAX : basic_sse12_fp_binop_s<0x5F, "max", X86fmax, 0>,
1845 basic_sse12_fp_binop_p<0x5F, "max", X86fmax, 0>, VEX_4V;
1846 defm VMIN : basic_sse12_fp_binop_s<0x5D, "min", X86fmin, 0>,
1847 basic_sse12_fp_binop_p<0x5D, "min", X86fmin, 0>, VEX_4V;
1851 let Constraints = "$src1 = $dst" in {
1852 defm ADD : basic_sse12_fp_binop_s<0x58, "add", fadd>,
1853 basic_sse12_fp_binop_p<0x58, "add", fadd>,
1854 basic_sse12_fp_binop_s_int<0x58, "add">;
1855 defm MUL : basic_sse12_fp_binop_s<0x59, "mul", fmul>,
1856 basic_sse12_fp_binop_p<0x59, "mul", fmul>,
1857 basic_sse12_fp_binop_s_int<0x59, "mul">;
1859 let isCommutable = 0 in {
1860 defm SUB : basic_sse12_fp_binop_s<0x5C, "sub", fsub>,
1861 basic_sse12_fp_binop_p<0x5C, "sub", fsub>,
1862 basic_sse12_fp_binop_s_int<0x5C, "sub">;
1863 defm DIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv>,
1864 basic_sse12_fp_binop_p<0x5E, "div", fdiv>,
1865 basic_sse12_fp_binop_s_int<0x5E, "div">;
1866 defm MAX : basic_sse12_fp_binop_s<0x5F, "max", X86fmax>,
1867 basic_sse12_fp_binop_p<0x5F, "max", X86fmax>,
1868 basic_sse12_fp_binop_s_int<0x5F, "max">,
1869 basic_sse12_fp_binop_p_int<0x5F, "max">;
1870 defm MIN : basic_sse12_fp_binop_s<0x5D, "min", X86fmin>,
1871 basic_sse12_fp_binop_p<0x5D, "min", X86fmin>,
1872 basic_sse12_fp_binop_s_int<0x5D, "min">,
1873 basic_sse12_fp_binop_p_int<0x5D, "min">;
1878 /// In addition, we also have a special variant of the scalar form here to
1879 /// represent the associated intrinsic operation. This form is unlike the
1880 /// plain scalar form, in that it takes an entire vector (instead of a
1881 /// scalar) and leaves the top elements undefined.
1883 /// And, we have a special variant form for a full-vector intrinsic form.
1885 /// sse1_fp_unop_s - SSE1 unops in scalar form.
1886 multiclass sse1_fp_unop_s<bits<8> opc, string OpcodeStr,
1887 SDNode OpNode, Intrinsic F32Int> {
1888 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
1889 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1890 [(set FR32:$dst, (OpNode FR32:$src))]>;
1891 // For scalar unary operations, fold a load into the operation
1892 // only in OptForSize mode. It eliminates an instruction, but it also
1893 // eliminates a whole-register clobber (the load), so it introduces a
1894 // partial register update condition.
1895 def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
1896 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1897 [(set FR32:$dst, (OpNode (load addr:$src)))]>, XS,
1898 Requires<[HasSSE1, OptForSize]>;
1899 def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1900 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1901 [(set VR128:$dst, (F32Int VR128:$src))]>;
1902 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins ssmem:$src),
1903 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1904 [(set VR128:$dst, (F32Int sse_load_f32:$src))]>;
1907 /// sse1_fp_unop_p - SSE1 unops in scalar form.
1908 multiclass sse1_fp_unop_p<bits<8> opc, string OpcodeStr,
1909 SDNode OpNode, Intrinsic V4F32Int> {
1910 def PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1911 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1912 [(set VR128:$dst, (v4f32 (OpNode VR128:$src)))]>;
1913 def PSm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1914 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1915 [(set VR128:$dst, (OpNode (memopv4f32 addr:$src)))]>;
1916 def PSr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1917 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1918 [(set VR128:$dst, (V4F32Int VR128:$src))]>;
1919 def PSm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1920 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1921 [(set VR128:$dst, (V4F32Int (memopv4f32 addr:$src)))]>;
1924 /// sse1_fp_unop_s_avx - AVX SSE1 unops in scalar form.
1925 multiclass sse1_fp_unop_s_avx<bits<8> opc, string OpcodeStr,
1926 SDNode OpNode, Intrinsic F32Int> {
1927 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
1928 !strconcat(!strconcat("v", OpcodeStr),
1929 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1930 def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f32mem:$src2),
1931 !strconcat(!strconcat("v", OpcodeStr),
1932 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1933 []>, XS, Requires<[HasAVX, HasSSE1, OptForSize]>;
1934 def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst),
1935 (ins VR128:$src1, VR128:$src2),
1936 !strconcat(!strconcat("v", OpcodeStr),
1937 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1938 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst),
1939 (ins VR128:$src1, ssmem:$src2),
1940 !strconcat(!strconcat("v", OpcodeStr),
1941 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1944 /// sse2_fp_unop_s - SSE2 unops in scalar form.
1945 multiclass sse2_fp_unop_s<bits<8> opc, string OpcodeStr,
1946 SDNode OpNode, Intrinsic F64Int> {
1947 def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
1948 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1949 [(set FR64:$dst, (OpNode FR64:$src))]>;
1950 // See the comments in sse1_fp_unop_s for why this is OptForSize.
1951 def SDm : I<opc, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
1952 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1953 [(set FR64:$dst, (OpNode (load addr:$src)))]>, XD,
1954 Requires<[HasSSE2, OptForSize]>;
1955 def SDr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1956 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1957 [(set VR128:$dst, (F64Int VR128:$src))]>;
1958 def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins sdmem:$src),
1959 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1960 [(set VR128:$dst, (F64Int sse_load_f64:$src))]>;
1963 /// sse2_fp_unop_p - SSE2 unops in vector forms.
1964 multiclass sse2_fp_unop_p<bits<8> opc, string OpcodeStr,
1965 SDNode OpNode, Intrinsic V2F64Int> {
1966 def PDr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1967 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1968 [(set VR128:$dst, (v2f64 (OpNode VR128:$src)))]>;
1969 def PDm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1970 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1971 [(set VR128:$dst, (OpNode (memopv2f64 addr:$src)))]>;
1972 def PDr_Int : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1973 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1974 [(set VR128:$dst, (V2F64Int VR128:$src))]>;
1975 def PDm_Int : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1976 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1977 [(set VR128:$dst, (V2F64Int (memopv2f64 addr:$src)))]>;
1980 /// sse2_fp_unop_s_avx - AVX SSE2 unops in scalar form.
1981 multiclass sse2_fp_unop_s_avx<bits<8> opc, string OpcodeStr,
1982 SDNode OpNode, Intrinsic F64Int> {
1983 def SDr : VSDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
1984 !strconcat(OpcodeStr,
1985 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1986 def SDm : VSDI<opc, MRMSrcMem, (outs FR64:$dst),
1987 (ins FR64:$src1, f64mem:$src2),
1988 !strconcat(OpcodeStr,
1989 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1990 def SDr_Int : VSDI<opc, MRMSrcReg, (outs VR128:$dst),
1991 (ins VR128:$src1, VR128:$src2),
1992 !strconcat(OpcodeStr, "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1994 def SDm_Int : VSDI<opc, MRMSrcMem, (outs VR128:$dst),
1995 (ins VR128:$src1, sdmem:$src2),
1996 !strconcat(OpcodeStr, "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2000 let isAsmParserOnly = 1 in {
2002 let Predicates = [HasAVX, HasSSE2] in {
2003 defm VSQRT : sse2_fp_unop_s_avx<0x51, "sqrt", fsqrt, int_x86_sse2_sqrt_sd>,
2006 defm VSQRT : sse2_fp_unop_p<0x51, "vsqrt", fsqrt, int_x86_sse2_sqrt_pd>, VEX;
2009 let Predicates = [HasAVX, HasSSE1] in {
2010 defm VSQRT : sse1_fp_unop_s_avx<0x51, "sqrt", fsqrt, int_x86_sse_sqrt_ss>,
2012 defm VSQRT : sse1_fp_unop_p<0x51, "vsqrt", fsqrt, int_x86_sse_sqrt_ps>, VEX;
2013 // Reciprocal approximations. Note that these typically require refinement
2014 // in order to obtain suitable precision.
2015 defm VRSQRT : sse1_fp_unop_s_avx<0x52, "rsqrt", X86frsqrt,
2016 int_x86_sse_rsqrt_ss>, VEX_4V;
2017 defm VRSQRT : sse1_fp_unop_p<0x52, "vrsqrt", X86frsqrt, int_x86_sse_rsqrt_ps>,
2019 defm VRCP : sse1_fp_unop_s_avx<0x53, "rcp", X86frcp, int_x86_sse_rcp_ss>,
2021 defm VRCP : sse1_fp_unop_p<0x53, "vrcp", X86frcp, int_x86_sse_rcp_ps>,
2027 defm SQRT : sse1_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse_sqrt_ss>,
2028 sse1_fp_unop_p<0x51, "sqrt", fsqrt, int_x86_sse_sqrt_ps>,
2029 sse2_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse2_sqrt_sd>,
2030 sse2_fp_unop_p<0x51, "sqrt", fsqrt, int_x86_sse2_sqrt_pd>;
2032 // Reciprocal approximations. Note that these typically require refinement
2033 // in order to obtain suitable precision.
2034 defm RSQRT : sse1_fp_unop_s<0x52, "rsqrt", X86frsqrt, int_x86_sse_rsqrt_ss>,
2035 sse1_fp_unop_p<0x52, "rsqrt", X86frsqrt, int_x86_sse_rsqrt_ps>;
2036 defm RCP : sse1_fp_unop_s<0x53, "rcp", X86frcp, int_x86_sse_rcp_ss>,
2037 sse1_fp_unop_p<0x53, "rcp", X86frcp, int_x86_sse_rcp_ps>;
2039 // There is no f64 version of the reciprocal approximation instructions.
2041 //===----------------------------------------------------------------------===//
2042 // SSE 1 & 2 - Non-temporal stores
2043 //===----------------------------------------------------------------------===//
2045 let isAsmParserOnly = 1 in {
2046 def VMOVNTPSmr_Int : VPSI<0x2B, MRMDestMem, (outs),
2047 (ins i128mem:$dst, VR128:$src),
2048 "movntps\t{$src, $dst|$dst, $src}",
2049 [(int_x86_sse_movnt_ps addr:$dst, VR128:$src)]>, VEX;
2050 def VMOVNTPDmr_Int : VPDI<0x2B, MRMDestMem, (outs),
2051 (ins i128mem:$dst, VR128:$src),
2052 "movntpd\t{$src, $dst|$dst, $src}",
2053 [(int_x86_sse2_movnt_pd addr:$dst, VR128:$src)]>, VEX;
2055 let ExeDomain = SSEPackedInt in
2056 def VMOVNTDQmr_Int : VPDI<0xE7, MRMDestMem, (outs),
2057 (ins f128mem:$dst, VR128:$src),
2058 "movntdq\t{$src, $dst|$dst, $src}",
2059 [(int_x86_sse2_movnt_dq addr:$dst, VR128:$src)]>, VEX;
2061 let AddedComplexity = 400 in { // Prefer non-temporal versions
2062 def VMOVNTPSmr : VPSI<0x2B, MRMDestMem, (outs),
2063 (ins f128mem:$dst, VR128:$src),
2064 "movntps\t{$src, $dst|$dst, $src}",
2065 [(alignednontemporalstore (v4f32 VR128:$src),
2067 def VMOVNTPDmr : VPDI<0x2B, MRMDestMem, (outs),
2068 (ins f128mem:$dst, VR128:$src),
2069 "movntpd\t{$src, $dst|$dst, $src}",
2070 [(alignednontemporalstore (v2f64 VR128:$src),
2072 def VMOVNTDQ_64mr : VPDI<0xE7, MRMDestMem, (outs),
2073 (ins f128mem:$dst, VR128:$src),
2074 "movntdq\t{$src, $dst|$dst, $src}",
2075 [(alignednontemporalstore (v2f64 VR128:$src),
2077 let ExeDomain = SSEPackedInt in
2078 def VMOVNTDQmr : VPDI<0xE7, MRMDestMem, (outs),
2079 (ins f128mem:$dst, VR128:$src),
2080 "movntdq\t{$src, $dst|$dst, $src}",
2081 [(alignednontemporalstore (v4f32 VR128:$src),
2084 def VMOVNTPSYmr : VPSI<0x2B, MRMDestMem, (outs),
2085 (ins f256mem:$dst, VR256:$src),
2086 "movntps\t{$src, $dst|$dst, $src}",
2087 [(alignednontemporalstore (v8f32 VR256:$src),
2089 def VMOVNTPDYmr : VPDI<0x2B, MRMDestMem, (outs),
2090 (ins f256mem:$dst, VR256:$src),
2091 "movntpd\t{$src, $dst|$dst, $src}",
2092 [(alignednontemporalstore (v4f64 VR256:$src),
2094 def VMOVNTDQY_64mr : VPDI<0xE7, MRMDestMem, (outs),
2095 (ins f256mem:$dst, VR256:$src),
2096 "movntdq\t{$src, $dst|$dst, $src}",
2097 [(alignednontemporalstore (v4f64 VR256:$src),
2099 let ExeDomain = SSEPackedInt in
2100 def VMOVNTDQYmr : VPDI<0xE7, MRMDestMem, (outs),
2101 (ins f256mem:$dst, VR256:$src),
2102 "movntdq\t{$src, $dst|$dst, $src}",
2103 [(alignednontemporalstore (v8f32 VR256:$src),
2108 def MOVNTPSmr_Int : PSI<0x2B, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2109 "movntps\t{$src, $dst|$dst, $src}",
2110 [(int_x86_sse_movnt_ps addr:$dst, VR128:$src)]>;
2111 def MOVNTPDmr_Int : PDI<0x2B, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2112 "movntpd\t{$src, $dst|$dst, $src}",
2113 [(int_x86_sse2_movnt_pd addr:$dst, VR128:$src)]>;
2115 let ExeDomain = SSEPackedInt in
2116 def MOVNTDQmr_Int : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2117 "movntdq\t{$src, $dst|$dst, $src}",
2118 [(int_x86_sse2_movnt_dq addr:$dst, VR128:$src)]>;
2120 let AddedComplexity = 400 in { // Prefer non-temporal versions
2121 def MOVNTPSmr : PSI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2122 "movntps\t{$src, $dst|$dst, $src}",
2123 [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)]>;
2124 def MOVNTPDmr : PDI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2125 "movntpd\t{$src, $dst|$dst, $src}",
2126 [(alignednontemporalstore(v2f64 VR128:$src), addr:$dst)]>;
2128 def MOVNTDQ_64mr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2129 "movntdq\t{$src, $dst|$dst, $src}",
2130 [(alignednontemporalstore (v2f64 VR128:$src), addr:$dst)]>;
2132 let ExeDomain = SSEPackedInt in
2133 def MOVNTDQmr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2134 "movntdq\t{$src, $dst|$dst, $src}",
2135 [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)]>;
2137 // There is no AVX form for instructions below this point
2138 def MOVNTImr : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
2139 "movnti\t{$src, $dst|$dst, $src}",
2140 [(nontemporalstore (i32 GR32:$src), addr:$dst)]>,
2141 TB, Requires<[HasSSE2]>;
2143 def MOVNTI_64mr : RI<0xC3, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
2144 "movnti\t{$src, $dst|$dst, $src}",
2145 [(nontemporalstore (i64 GR64:$src), addr:$dst)]>,
2146 TB, Requires<[HasSSE2]>;
2149 def MOVNTImr_Int : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
2150 "movnti\t{$src, $dst|$dst, $src}",
2151 [(int_x86_sse2_movnt_i addr:$dst, GR32:$src)]>,
2152 TB, Requires<[HasSSE2]>;
2154 //===----------------------------------------------------------------------===//
2155 // SSE 1 & 2 - Misc Instructions (No AVX form)
2156 //===----------------------------------------------------------------------===//
2158 // Prefetch intrinsic.
2159 def PREFETCHT0 : PSI<0x18, MRM1m, (outs), (ins i8mem:$src),
2160 "prefetcht0\t$src", [(prefetch addr:$src, imm, (i32 3))]>;
2161 def PREFETCHT1 : PSI<0x18, MRM2m, (outs), (ins i8mem:$src),
2162 "prefetcht1\t$src", [(prefetch addr:$src, imm, (i32 2))]>;
2163 def PREFETCHT2 : PSI<0x18, MRM3m, (outs), (ins i8mem:$src),
2164 "prefetcht2\t$src", [(prefetch addr:$src, imm, (i32 1))]>;
2165 def PREFETCHNTA : PSI<0x18, MRM0m, (outs), (ins i8mem:$src),
2166 "prefetchnta\t$src", [(prefetch addr:$src, imm, (i32 0))]>;
2168 // Load, store, and memory fence
2169 def SFENCE : I<0xAE, MRM_F8, (outs), (ins), "sfence", [(int_x86_sse_sfence)]>,
2170 TB, Requires<[HasSSE1]>;
2172 // Alias instructions that map zero vector to pxor / xorp* for sse.
2173 // We set canFoldAsLoad because this can be converted to a constant-pool
2174 // load of an all-zeros value if folding it would be beneficial.
2175 // FIXME: Change encoding to pseudo!
2176 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
2177 isCodeGenOnly = 1 in {
2178 def V_SET0PS : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2179 [(set VR128:$dst, (v4f32 immAllZerosV))]>;
2180 def V_SET0PD : PDI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2181 [(set VR128:$dst, (v2f64 immAllZerosV))]>;
2182 let ExeDomain = SSEPackedInt in
2183 def V_SET0PI : PDI<0xEF, MRMInitReg, (outs VR128:$dst), (ins), "",
2184 [(set VR128:$dst, (v4i32 immAllZerosV))]>;
2187 def : Pat<(v2i64 immAllZerosV), (V_SET0PI)>;
2188 def : Pat<(v8i16 immAllZerosV), (V_SET0PI)>;
2189 def : Pat<(v16i8 immAllZerosV), (V_SET0PI)>;
2191 def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
2192 (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
2194 //===----------------------------------------------------------------------===//
2195 // SSE 1 & 2 - Load/Store XCSR register
2196 //===----------------------------------------------------------------------===//
2198 let isAsmParserOnly = 1 in {
2199 def VLDMXCSR : VPSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
2200 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>, VEX;
2201 def VSTMXCSR : VPSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
2202 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>, VEX;
2205 def LDMXCSR : PSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
2206 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>;
2207 def STMXCSR : PSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
2208 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>;
2210 //===---------------------------------------------------------------------===//
2211 // SSE2 - Move Aligned/Unaligned Packed Integer Instructions
2212 //===---------------------------------------------------------------------===//
2213 let ExeDomain = SSEPackedInt in { // SSE integer instructions
2215 let isAsmParserOnly = 1 in {
2216 let neverHasSideEffects = 1 in
2217 def VMOVDQArr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2218 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2219 def VMOVDQUrr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2220 "movdqu\t{$src, $dst|$dst, $src}", []>, XS, VEX;
2222 let canFoldAsLoad = 1, mayLoad = 1 in {
2223 def VMOVDQArm : VPDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2224 "movdqa\t{$src, $dst|$dst, $src}",
2225 [/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/]>,
2227 def VMOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2228 "vmovdqu\t{$src, $dst|$dst, $src}",
2229 [/*(set VR128:$dst, (loadv2i64 addr:$src))*/]>,
2230 XS, VEX, Requires<[HasAVX, HasSSE2]>;
2233 let mayStore = 1 in {
2234 def VMOVDQAmr : VPDI<0x7F, MRMDestMem, (outs),
2235 (ins i128mem:$dst, VR128:$src),
2236 "movdqa\t{$src, $dst|$dst, $src}",
2237 [/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/]>, VEX;
2238 def VMOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2239 "vmovdqu\t{$src, $dst|$dst, $src}",
2240 [/*(store (v2i64 VR128:$src), addr:$dst)*/]>,
2241 XS, VEX, Requires<[HasAVX, HasSSE2]>;
2245 let neverHasSideEffects = 1 in
2246 def MOVDQArr : PDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2247 "movdqa\t{$src, $dst|$dst, $src}", []>;
2249 let canFoldAsLoad = 1, mayLoad = 1 in {
2250 def MOVDQArm : PDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2251 "movdqa\t{$src, $dst|$dst, $src}",
2252 [/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/]>;
2253 def MOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2254 "movdqu\t{$src, $dst|$dst, $src}",
2255 [/*(set VR128:$dst, (loadv2i64 addr:$src))*/]>,
2256 XS, Requires<[HasSSE2]>;
2259 let mayStore = 1 in {
2260 def MOVDQAmr : PDI<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2261 "movdqa\t{$src, $dst|$dst, $src}",
2262 [/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/]>;
2263 def MOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2264 "movdqu\t{$src, $dst|$dst, $src}",
2265 [/*(store (v2i64 VR128:$src), addr:$dst)*/]>,
2266 XS, Requires<[HasSSE2]>;
2269 // Intrinsic forms of MOVDQU load and store
2270 let isAsmParserOnly = 1 in {
2271 let canFoldAsLoad = 1 in
2272 def VMOVDQUrm_Int : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2273 "vmovdqu\t{$src, $dst|$dst, $src}",
2274 [(set VR128:$dst, (int_x86_sse2_loadu_dq addr:$src))]>,
2275 XS, VEX, Requires<[HasAVX, HasSSE2]>;
2276 def VMOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2277 "vmovdqu\t{$src, $dst|$dst, $src}",
2278 [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
2279 XS, VEX, Requires<[HasAVX, HasSSE2]>;
2282 let canFoldAsLoad = 1 in
2283 def MOVDQUrm_Int : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2284 "movdqu\t{$src, $dst|$dst, $src}",
2285 [(set VR128:$dst, (int_x86_sse2_loadu_dq addr:$src))]>,
2286 XS, Requires<[HasSSE2]>;
2287 def MOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2288 "movdqu\t{$src, $dst|$dst, $src}",
2289 [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
2290 XS, Requires<[HasSSE2]>;
2292 } // ExeDomain = SSEPackedInt
2294 //===---------------------------------------------------------------------===//
2295 // SSE2 - Packed Integer Arithmetic Instructions
2296 //===---------------------------------------------------------------------===//
2298 let ExeDomain = SSEPackedInt in { // SSE integer instructions
2300 multiclass PDI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId,
2301 bit IsCommutable = 0, bit Is2Addr = 1> {
2302 let isCommutable = IsCommutable in
2303 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2304 (ins VR128:$src1, VR128:$src2),
2306 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2307 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2308 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
2309 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2310 (ins VR128:$src1, i128mem:$src2),
2312 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2313 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2314 [(set VR128:$dst, (IntId VR128:$src1,
2315 (bitconvert (memopv2i64 addr:$src2))))]>;
2318 multiclass PDI_binop_rmi_int<bits<8> opc, bits<8> opc2, Format ImmForm,
2319 string OpcodeStr, Intrinsic IntId,
2320 Intrinsic IntId2, bit Is2Addr = 1> {
2321 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2322 (ins VR128:$src1, VR128:$src2),
2324 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2325 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2326 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
2327 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2328 (ins VR128:$src1, i128mem:$src2),
2330 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2331 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2332 [(set VR128:$dst, (IntId VR128:$src1,
2333 (bitconvert (memopv2i64 addr:$src2))))]>;
2334 def ri : PDIi8<opc2, ImmForm, (outs VR128:$dst),
2335 (ins VR128:$src1, i32i8imm:$src2),
2337 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2338 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2339 [(set VR128:$dst, (IntId2 VR128:$src1, (i32 imm:$src2)))]>;
2342 /// PDI_binop_rm - Simple SSE2 binary operator.
2343 multiclass PDI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
2344 ValueType OpVT, bit IsCommutable = 0, bit Is2Addr = 1> {
2345 let isCommutable = IsCommutable in
2346 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2347 (ins VR128:$src1, VR128:$src2),
2349 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2350 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2351 [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]>;
2352 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2353 (ins VR128:$src1, i128mem:$src2),
2355 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2356 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2357 [(set VR128:$dst, (OpVT (OpNode VR128:$src1,
2358 (bitconvert (memopv2i64 addr:$src2)))))]>;
2361 /// PDI_binop_rm_v2i64 - Simple SSE2 binary operator whose type is v2i64.
2363 /// FIXME: we could eliminate this and use PDI_binop_rm instead if tblgen knew
2364 /// to collapse (bitconvert VT to VT) into its operand.
2366 multiclass PDI_binop_rm_v2i64<bits<8> opc, string OpcodeStr, SDNode OpNode,
2367 bit IsCommutable = 0, bit Is2Addr = 1> {
2368 let isCommutable = IsCommutable in
2369 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2370 (ins VR128:$src1, VR128:$src2),
2372 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2373 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2374 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))]>;
2375 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2376 (ins VR128:$src1, i128mem:$src2),
2378 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2379 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2380 [(set VR128:$dst, (OpNode VR128:$src1, (memopv2i64 addr:$src2)))]>;
2383 } // ExeDomain = SSEPackedInt
2385 // 128-bit Integer Arithmetic
2387 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE2] in {
2388 defm VPADDB : PDI_binop_rm<0xFC, "vpaddb", add, v16i8, 1, 0 /*3addr*/>, VEX_4V;
2389 defm VPADDW : PDI_binop_rm<0xFD, "vpaddw", add, v8i16, 1, 0>, VEX_4V;
2390 defm VPADDD : PDI_binop_rm<0xFE, "vpaddd", add, v4i32, 1, 0>, VEX_4V;
2391 defm VPADDQ : PDI_binop_rm_v2i64<0xD4, "vpaddq", add, 1, 0>, VEX_4V;
2392 defm VPMULLW : PDI_binop_rm<0xD5, "vpmullw", mul, v8i16, 1, 0>, VEX_4V;
2393 defm VPSUBB : PDI_binop_rm<0xF8, "vpsubb", sub, v16i8, 0, 0>, VEX_4V;
2394 defm VPSUBW : PDI_binop_rm<0xF9, "vpsubw", sub, v8i16, 0, 0>, VEX_4V;
2395 defm VPSUBD : PDI_binop_rm<0xFA, "vpsubd", sub, v4i32, 0, 0>, VEX_4V;
2396 defm VPSUBQ : PDI_binop_rm_v2i64<0xFB, "vpsubq", sub, 0, 0>, VEX_4V;
2399 defm VPSUBSB : PDI_binop_rm_int<0xE8, "vpsubsb" , int_x86_sse2_psubs_b, 0, 0>,
2401 defm VPSUBSW : PDI_binop_rm_int<0xE9, "vpsubsw" , int_x86_sse2_psubs_w, 0, 0>,
2403 defm VPSUBUSB : PDI_binop_rm_int<0xD8, "vpsubusb", int_x86_sse2_psubus_b, 0, 0>,
2405 defm VPSUBUSW : PDI_binop_rm_int<0xD9, "vpsubusw", int_x86_sse2_psubus_w, 0, 0>,
2407 defm VPADDSB : PDI_binop_rm_int<0xEC, "vpaddsb" , int_x86_sse2_padds_b, 1, 0>,
2409 defm VPADDSW : PDI_binop_rm_int<0xED, "vpaddsw" , int_x86_sse2_padds_w, 1, 0>,
2411 defm VPADDUSB : PDI_binop_rm_int<0xDC, "vpaddusb", int_x86_sse2_paddus_b, 1, 0>,
2413 defm VPADDUSW : PDI_binop_rm_int<0xDD, "vpaddusw", int_x86_sse2_paddus_w, 1, 0>,
2415 defm VPMULHUW : PDI_binop_rm_int<0xE4, "vpmulhuw", int_x86_sse2_pmulhu_w, 1, 0>,
2417 defm VPMULHW : PDI_binop_rm_int<0xE5, "vpmulhw" , int_x86_sse2_pmulh_w, 1, 0>,
2419 defm VPMULUDQ : PDI_binop_rm_int<0xF4, "vpmuludq", int_x86_sse2_pmulu_dq, 1, 0>,
2421 defm VPMADDWD : PDI_binop_rm_int<0xF5, "vpmaddwd", int_x86_sse2_pmadd_wd, 1, 0>,
2423 defm VPAVGB : PDI_binop_rm_int<0xE0, "vpavgb", int_x86_sse2_pavg_b, 1, 0>,
2425 defm VPAVGW : PDI_binop_rm_int<0xE3, "vpavgw", int_x86_sse2_pavg_w, 1, 0>,
2427 defm VPMINUB : PDI_binop_rm_int<0xDA, "vpminub", int_x86_sse2_pminu_b, 1, 0>,
2429 defm VPMINSW : PDI_binop_rm_int<0xEA, "vpminsw", int_x86_sse2_pmins_w, 1, 0>,
2431 defm VPMAXUB : PDI_binop_rm_int<0xDE, "vpmaxub", int_x86_sse2_pmaxu_b, 1, 0>,
2433 defm VPMAXSW : PDI_binop_rm_int<0xEE, "vpmaxsw", int_x86_sse2_pmaxs_w, 1, 0>,
2435 defm VPSADBW : PDI_binop_rm_int<0xF6, "vpsadbw", int_x86_sse2_psad_bw, 1, 0>,
2439 let Constraints = "$src1 = $dst" in {
2440 defm PADDB : PDI_binop_rm<0xFC, "paddb", add, v16i8, 1>;
2441 defm PADDW : PDI_binop_rm<0xFD, "paddw", add, v8i16, 1>;
2442 defm PADDD : PDI_binop_rm<0xFE, "paddd", add, v4i32, 1>;
2443 defm PADDQ : PDI_binop_rm_v2i64<0xD4, "paddq", add, 1>;
2444 defm PMULLW : PDI_binop_rm<0xD5, "pmullw", mul, v8i16, 1>;
2445 defm PSUBB : PDI_binop_rm<0xF8, "psubb", sub, v16i8>;
2446 defm PSUBW : PDI_binop_rm<0xF9, "psubw", sub, v8i16>;
2447 defm PSUBD : PDI_binop_rm<0xFA, "psubd", sub, v4i32>;
2448 defm PSUBQ : PDI_binop_rm_v2i64<0xFB, "psubq", sub>;
2451 defm PSUBSB : PDI_binop_rm_int<0xE8, "psubsb" , int_x86_sse2_psubs_b>;
2452 defm PSUBSW : PDI_binop_rm_int<0xE9, "psubsw" , int_x86_sse2_psubs_w>;
2453 defm PSUBUSB : PDI_binop_rm_int<0xD8, "psubusb", int_x86_sse2_psubus_b>;
2454 defm PSUBUSW : PDI_binop_rm_int<0xD9, "psubusw", int_x86_sse2_psubus_w>;
2455 defm PADDSB : PDI_binop_rm_int<0xEC, "paddsb" , int_x86_sse2_padds_b, 1>;
2456 defm PADDSW : PDI_binop_rm_int<0xED, "paddsw" , int_x86_sse2_padds_w, 1>;
2457 defm PADDUSB : PDI_binop_rm_int<0xDC, "paddusb", int_x86_sse2_paddus_b, 1>;
2458 defm PADDUSW : PDI_binop_rm_int<0xDD, "paddusw", int_x86_sse2_paddus_w, 1>;
2459 defm PMULHUW : PDI_binop_rm_int<0xE4, "pmulhuw", int_x86_sse2_pmulhu_w, 1>;
2460 defm PMULHW : PDI_binop_rm_int<0xE5, "pmulhw" , int_x86_sse2_pmulh_w, 1>;
2461 defm PMULUDQ : PDI_binop_rm_int<0xF4, "pmuludq", int_x86_sse2_pmulu_dq, 1>;
2462 defm PMADDWD : PDI_binop_rm_int<0xF5, "pmaddwd", int_x86_sse2_pmadd_wd, 1>;
2463 defm PAVGB : PDI_binop_rm_int<0xE0, "pavgb", int_x86_sse2_pavg_b, 1>;
2464 defm PAVGW : PDI_binop_rm_int<0xE3, "pavgw", int_x86_sse2_pavg_w, 1>;
2465 defm PMINUB : PDI_binop_rm_int<0xDA, "pminub", int_x86_sse2_pminu_b, 1>;
2466 defm PMINSW : PDI_binop_rm_int<0xEA, "pminsw", int_x86_sse2_pmins_w, 1>;
2467 defm PMAXUB : PDI_binop_rm_int<0xDE, "pmaxub", int_x86_sse2_pmaxu_b, 1>;
2468 defm PMAXSW : PDI_binop_rm_int<0xEE, "pmaxsw", int_x86_sse2_pmaxs_w, 1>;
2469 defm PSADBW : PDI_binop_rm_int<0xF6, "psadbw", int_x86_sse2_psad_bw, 1>;
2471 } // Constraints = "$src1 = $dst"
2473 //===---------------------------------------------------------------------===//
2474 // SSE2 - Packed Integer Logical Instructions
2475 //===---------------------------------------------------------------------===//
2477 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE2] in {
2478 defm VPSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "vpsllw",
2479 int_x86_sse2_psll_w, int_x86_sse2_pslli_w, 0>,
2481 defm VPSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "vpslld",
2482 int_x86_sse2_psll_d, int_x86_sse2_pslli_d, 0>,
2484 defm VPSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "vpsllq",
2485 int_x86_sse2_psll_q, int_x86_sse2_pslli_q, 0>,
2488 defm VPSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "vpsrlw",
2489 int_x86_sse2_psrl_w, int_x86_sse2_psrli_w, 0>,
2491 defm VPSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "vpsrld",
2492 int_x86_sse2_psrl_d, int_x86_sse2_psrli_d, 0>,
2494 defm VPSRLQ : PDI_binop_rmi_int<0xD3, 0x73, MRM2r, "vpsrlq",
2495 int_x86_sse2_psrl_q, int_x86_sse2_psrli_q, 0>,
2498 defm VPSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "vpsraw",
2499 int_x86_sse2_psra_w, int_x86_sse2_psrai_w, 0>,
2501 defm VPSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "vpsrad",
2502 int_x86_sse2_psra_d, int_x86_sse2_psrai_d, 0>,
2505 defm VPAND : PDI_binop_rm_v2i64<0xDB, "vpand", and, 1, 0>, VEX_4V;
2506 defm VPOR : PDI_binop_rm_v2i64<0xEB, "vpor" , or, 1, 0>, VEX_4V;
2507 defm VPXOR : PDI_binop_rm_v2i64<0xEF, "vpxor", xor, 1, 0>, VEX_4V;
2509 let ExeDomain = SSEPackedInt in {
2510 let neverHasSideEffects = 1 in {
2511 // 128-bit logical shifts.
2512 def VPSLLDQri : PDIi8<0x73, MRM7r,
2513 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2514 "vpslldq\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
2516 def VPSRLDQri : PDIi8<0x73, MRM3r,
2517 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2518 "vpsrldq\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
2520 // PSRADQri doesn't exist in SSE[1-3].
2522 def VPANDNrr : PDI<0xDF, MRMSrcReg,
2523 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2524 "vpandn\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2525 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
2526 VR128:$src2)))]>, VEX_4V;
2528 def VPANDNrm : PDI<0xDF, MRMSrcMem,
2529 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2530 "vpandn\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2531 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
2532 (memopv2i64 addr:$src2))))]>,
2537 let Constraints = "$src1 = $dst" in {
2538 defm PSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "psllw",
2539 int_x86_sse2_psll_w, int_x86_sse2_pslli_w>;
2540 defm PSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "pslld",
2541 int_x86_sse2_psll_d, int_x86_sse2_pslli_d>;
2542 defm PSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "psllq",
2543 int_x86_sse2_psll_q, int_x86_sse2_pslli_q>;
2545 defm PSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "psrlw",
2546 int_x86_sse2_psrl_w, int_x86_sse2_psrli_w>;
2547 defm PSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "psrld",
2548 int_x86_sse2_psrl_d, int_x86_sse2_psrli_d>;
2549 defm PSRLQ : PDI_binop_rmi_int<0xD3, 0x73, MRM2r, "psrlq",
2550 int_x86_sse2_psrl_q, int_x86_sse2_psrli_q>;
2552 defm PSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "psraw",
2553 int_x86_sse2_psra_w, int_x86_sse2_psrai_w>;
2554 defm PSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "psrad",
2555 int_x86_sse2_psra_d, int_x86_sse2_psrai_d>;
2557 defm PAND : PDI_binop_rm_v2i64<0xDB, "pand", and, 1>;
2558 defm POR : PDI_binop_rm_v2i64<0xEB, "por" , or, 1>;
2559 defm PXOR : PDI_binop_rm_v2i64<0xEF, "pxor", xor, 1>;
2561 let ExeDomain = SSEPackedInt in {
2562 let neverHasSideEffects = 1 in {
2563 // 128-bit logical shifts.
2564 def PSLLDQri : PDIi8<0x73, MRM7r,
2565 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2566 "pslldq\t{$src2, $dst|$dst, $src2}", []>;
2567 def PSRLDQri : PDIi8<0x73, MRM3r,
2568 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2569 "psrldq\t{$src2, $dst|$dst, $src2}", []>;
2570 // PSRADQri doesn't exist in SSE[1-3].
2572 def PANDNrr : PDI<0xDF, MRMSrcReg,
2573 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2574 "pandn\t{$src2, $dst|$dst, $src2}",
2575 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
2578 def PANDNrm : PDI<0xDF, MRMSrcMem,
2579 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2580 "pandn\t{$src2, $dst|$dst, $src2}",
2581 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
2582 (memopv2i64 addr:$src2))))]>;
2584 } // Constraints = "$src1 = $dst"
2586 let Predicates = [HasSSE2] in {
2587 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
2588 (v2i64 (PSLLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2589 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
2590 (v2i64 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2591 def : Pat<(int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2),
2592 (v2i64 (PSLLDQri VR128:$src1, imm:$src2))>;
2593 def : Pat<(int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2),
2594 (v2i64 (PSRLDQri VR128:$src1, imm:$src2))>;
2595 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
2596 (v2f64 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2598 // Shift up / down and insert zero's.
2599 def : Pat<(v2i64 (X86vshl VR128:$src, (i8 imm:$amt))),
2600 (v2i64 (PSLLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2601 def : Pat<(v2i64 (X86vshr VR128:$src, (i8 imm:$amt))),
2602 (v2i64 (PSRLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2605 //===---------------------------------------------------------------------===//
2606 // SSE2 - Packed Integer Comparison Instructions
2607 //===---------------------------------------------------------------------===//
2609 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE2] in {
2610 defm VPCMPEQB : PDI_binop_rm_int<0x74, "vpcmpeqb", int_x86_sse2_pcmpeq_b, 1,
2612 defm VPCMPEQW : PDI_binop_rm_int<0x75, "vpcmpeqw", int_x86_sse2_pcmpeq_w, 1,
2614 defm VPCMPEQD : PDI_binop_rm_int<0x76, "vpcmpeqd", int_x86_sse2_pcmpeq_d, 1,
2616 defm VPCMPGTB : PDI_binop_rm_int<0x64, "vpcmpgtb", int_x86_sse2_pcmpgt_b, 0,
2618 defm VPCMPGTW : PDI_binop_rm_int<0x65, "vpcmpgtw", int_x86_sse2_pcmpgt_w, 0,
2620 defm VPCMPGTD : PDI_binop_rm_int<0x66, "vpcmpgtd", int_x86_sse2_pcmpgt_d, 0,
2624 let Constraints = "$src1 = $dst" in {
2625 defm PCMPEQB : PDI_binop_rm_int<0x74, "pcmpeqb", int_x86_sse2_pcmpeq_b, 1>;
2626 defm PCMPEQW : PDI_binop_rm_int<0x75, "pcmpeqw", int_x86_sse2_pcmpeq_w, 1>;
2627 defm PCMPEQD : PDI_binop_rm_int<0x76, "pcmpeqd", int_x86_sse2_pcmpeq_d, 1>;
2628 defm PCMPGTB : PDI_binop_rm_int<0x64, "pcmpgtb", int_x86_sse2_pcmpgt_b>;
2629 defm PCMPGTW : PDI_binop_rm_int<0x65, "pcmpgtw", int_x86_sse2_pcmpgt_w>;
2630 defm PCMPGTD : PDI_binop_rm_int<0x66, "pcmpgtd", int_x86_sse2_pcmpgt_d>;
2631 } // Constraints = "$src1 = $dst"
2633 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, VR128:$src2)),
2634 (PCMPEQBrr VR128:$src1, VR128:$src2)>;
2635 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, (memop addr:$src2))),
2636 (PCMPEQBrm VR128:$src1, addr:$src2)>;
2637 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, VR128:$src2)),
2638 (PCMPEQWrr VR128:$src1, VR128:$src2)>;
2639 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, (memop addr:$src2))),
2640 (PCMPEQWrm VR128:$src1, addr:$src2)>;
2641 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, VR128:$src2)),
2642 (PCMPEQDrr VR128:$src1, VR128:$src2)>;
2643 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, (memop addr:$src2))),
2644 (PCMPEQDrm VR128:$src1, addr:$src2)>;
2646 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, VR128:$src2)),
2647 (PCMPGTBrr VR128:$src1, VR128:$src2)>;
2648 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, (memop addr:$src2))),
2649 (PCMPGTBrm VR128:$src1, addr:$src2)>;
2650 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, VR128:$src2)),
2651 (PCMPGTWrr VR128:$src1, VR128:$src2)>;
2652 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, (memop addr:$src2))),
2653 (PCMPGTWrm VR128:$src1, addr:$src2)>;
2654 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, VR128:$src2)),
2655 (PCMPGTDrr VR128:$src1, VR128:$src2)>;
2656 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, (memop addr:$src2))),
2657 (PCMPGTDrm VR128:$src1, addr:$src2)>;
2659 //===---------------------------------------------------------------------===//
2660 // SSE2 - Packed Integer Pack Instructions
2661 //===---------------------------------------------------------------------===//
2663 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE2] in {
2664 defm VPACKSSWB : PDI_binop_rm_int<0x63, "vpacksswb", int_x86_sse2_packsswb_128,
2666 defm VPACKSSDW : PDI_binop_rm_int<0x6B, "vpackssdw", int_x86_sse2_packssdw_128,
2668 defm VPACKUSWB : PDI_binop_rm_int<0x67, "vpackuswb", int_x86_sse2_packuswb_128,
2672 let Constraints = "$src1 = $dst" in {
2673 defm PACKSSWB : PDI_binop_rm_int<0x63, "packsswb", int_x86_sse2_packsswb_128>;
2674 defm PACKSSDW : PDI_binop_rm_int<0x6B, "packssdw", int_x86_sse2_packssdw_128>;
2675 defm PACKUSWB : PDI_binop_rm_int<0x67, "packuswb", int_x86_sse2_packuswb_128>;
2676 } // Constraints = "$src1 = $dst"
2678 //===---------------------------------------------------------------------===//
2679 // SSE2 - Packed Integer Shuffle Instructions
2680 //===---------------------------------------------------------------------===//
2682 let ExeDomain = SSEPackedInt in {
2683 multiclass sse2_pshuffle<string OpcodeStr, ValueType vt, PatFrag pshuf_frag,
2685 def ri : Ii8<0x70, MRMSrcReg,
2686 (outs VR128:$dst), (ins VR128:$src1, i8imm:$src2),
2687 !strconcat(OpcodeStr,
2688 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2689 [(set VR128:$dst, (vt (pshuf_frag:$src2 VR128:$src1,
2691 def mi : Ii8<0x70, MRMSrcMem,
2692 (outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
2693 !strconcat(OpcodeStr,
2694 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2695 [(set VR128:$dst, (vt (pshuf_frag:$src2
2696 (bc_frag (memopv2i64 addr:$src1)),
2699 } // ExeDomain = SSEPackedInt
2701 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE2] in {
2702 let AddedComplexity = 5 in
2703 defm VPSHUFD : sse2_pshuffle<"vpshufd", v4i32, pshufd, bc_v4i32>, OpSize,
2706 // SSE2 with ImmT == Imm8 and XS prefix.
2707 defm VPSHUFHW : sse2_pshuffle<"vpshufhw", v8i16, pshufhw, bc_v8i16>, XS,
2710 // SSE2 with ImmT == Imm8 and XD prefix.
2711 defm VPSHUFLW : sse2_pshuffle<"vpshuflw", v8i16, pshuflw, bc_v8i16>, XD,
2715 let Predicates = [HasSSE2] in {
2716 let AddedComplexity = 5 in
2717 defm PSHUFD : sse2_pshuffle<"pshufd", v4i32, pshufd, bc_v4i32>, TB, OpSize;
2719 // SSE2 with ImmT == Imm8 and XS prefix.
2720 defm PSHUFHW : sse2_pshuffle<"pshufhw", v8i16, pshufhw, bc_v8i16>, XS;
2722 // SSE2 with ImmT == Imm8 and XD prefix.
2723 defm PSHUFLW : sse2_pshuffle<"pshuflw", v8i16, pshuflw, bc_v8i16>, XD;
2726 //===---------------------------------------------------------------------===//
2727 // SSE2 - Packed Integer Unpack Instructions
2728 //===---------------------------------------------------------------------===//
2730 let ExeDomain = SSEPackedInt in {
2731 multiclass sse2_unpack<bits<8> opc, string OpcodeStr, ValueType vt,
2732 PatFrag unp_frag, PatFrag bc_frag, bit Is2Addr = 1> {
2733 def rr : PDI<opc, MRMSrcReg,
2734 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2736 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
2737 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2738 [(set VR128:$dst, (vt (unp_frag VR128:$src1, VR128:$src2)))]>;
2739 def rm : PDI<opc, MRMSrcMem,
2740 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2742 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
2743 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2744 [(set VR128:$dst, (unp_frag VR128:$src1,
2745 (bc_frag (memopv2i64
2749 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE2] in {
2750 defm VPUNPCKLBW : sse2_unpack<0x60, "vpunpcklbw", v16i8, unpckl, bc_v16i8,
2752 defm VPUNPCKLWD : sse2_unpack<0x61, "vpunpcklwd", v8i16, unpckl, bc_v8i16,
2754 defm VPUNPCKLDQ : sse2_unpack<0x62, "vpunpckldq", v4i32, unpckl, bc_v4i32,
2757 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2758 /// knew to collapse (bitconvert VT to VT) into its operand.
2759 def VPUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
2760 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2761 "vpunpcklqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2763 (v2i64 (unpckl VR128:$src1, VR128:$src2)))]>, VEX_4V;
2764 def VPUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
2765 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2766 "vpunpcklqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2768 (v2i64 (unpckl VR128:$src1,
2769 (memopv2i64 addr:$src2))))]>, VEX_4V;
2771 defm VPUNPCKHBW : sse2_unpack<0x68, "vpunpckhbw", v16i8, unpckh, bc_v16i8,
2773 defm VPUNPCKHWD : sse2_unpack<0x69, "vpunpckhwd", v8i16, unpckh, bc_v8i16,
2775 defm VPUNPCKHDQ : sse2_unpack<0x6A, "vpunpckhdq", v4i32, unpckh, bc_v4i32,
2778 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2779 /// knew to collapse (bitconvert VT to VT) into its operand.
2780 def VPUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
2781 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2782 "vpunpckhqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2784 (v2i64 (unpckh VR128:$src1, VR128:$src2)))]>, VEX_4V;
2785 def VPUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
2786 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2787 "vpunpckhqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2789 (v2i64 (unpckh VR128:$src1,
2790 (memopv2i64 addr:$src2))))]>, VEX_4V;
2793 let Constraints = "$src1 = $dst" in {
2794 defm PUNPCKLBW : sse2_unpack<0x60, "punpcklbw", v16i8, unpckl, bc_v16i8>;
2795 defm PUNPCKLWD : sse2_unpack<0x61, "punpcklwd", v8i16, unpckl, bc_v8i16>;
2796 defm PUNPCKLDQ : sse2_unpack<0x62, "punpckldq", v4i32, unpckl, bc_v4i32>;
2798 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2799 /// knew to collapse (bitconvert VT to VT) into its operand.
2800 def PUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
2801 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2802 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
2804 (v2i64 (unpckl VR128:$src1, VR128:$src2)))]>;
2805 def PUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
2806 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2807 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
2809 (v2i64 (unpckl VR128:$src1,
2810 (memopv2i64 addr:$src2))))]>;
2812 defm PUNPCKHBW : sse2_unpack<0x68, "punpckhbw", v16i8, unpckh, bc_v16i8>;
2813 defm PUNPCKHWD : sse2_unpack<0x69, "punpckhwd", v8i16, unpckh, bc_v8i16>;
2814 defm PUNPCKHDQ : sse2_unpack<0x6A, "punpckhdq", v4i32, unpckh, bc_v4i32>;
2816 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2817 /// knew to collapse (bitconvert VT to VT) into its operand.
2818 def PUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
2819 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2820 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
2822 (v2i64 (unpckh VR128:$src1, VR128:$src2)))]>;
2823 def PUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
2824 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2825 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
2827 (v2i64 (unpckh VR128:$src1,
2828 (memopv2i64 addr:$src2))))]>;
2831 } // ExeDomain = SSEPackedInt
2833 //===---------------------------------------------------------------------===//
2834 // SSE2 - Packed Integer Extract and Insert
2835 //===---------------------------------------------------------------------===//
2837 let ExeDomain = SSEPackedInt in {
2838 multiclass sse2_pinsrw<bit Is2Addr = 1> {
2839 def rri : Ii8<0xC4, MRMSrcReg,
2840 (outs VR128:$dst), (ins VR128:$src1,
2841 GR32:$src2, i32i8imm:$src3),
2843 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2844 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
2846 (X86pinsrw VR128:$src1, GR32:$src2, imm:$src3))]>;
2847 def rmi : Ii8<0xC4, MRMSrcMem,
2848 (outs VR128:$dst), (ins VR128:$src1,
2849 i16mem:$src2, i32i8imm:$src3),
2851 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2852 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
2854 (X86pinsrw VR128:$src1, (extloadi16 addr:$src2),
2859 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE2] in
2860 def VPEXTRWri : Ii8<0xC5, MRMSrcReg,
2861 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
2862 "vpextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2863 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
2864 imm:$src2))]>, OpSize, VEX;
2865 def PEXTRWri : PDIi8<0xC5, MRMSrcReg,
2866 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
2867 "pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2868 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
2872 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE2] in
2873 defm PINSRW : sse2_pinsrw<0>, OpSize, VEX_4V;
2875 let Constraints = "$src1 = $dst" in
2876 defm VPINSRW : sse2_pinsrw, TB, OpSize;
2878 } // ExeDomain = SSEPackedInt
2880 //===---------------------------------------------------------------------===//
2881 // SSE2 - Packed Mask Creation
2882 //===---------------------------------------------------------------------===//
2884 let ExeDomain = SSEPackedInt in {
2886 let isAsmParserOnly = 1 in
2887 def VPMOVMSKBrr : VPDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
2888 "pmovmskb\t{$src, $dst|$dst, $src}",
2889 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>, VEX;
2890 def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
2891 "pmovmskb\t{$src, $dst|$dst, $src}",
2892 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>;
2894 } // ExeDomain = SSEPackedInt
2896 //===---------------------------------------------------------------------===//
2897 // SSE2 - Conditional Store
2898 //===---------------------------------------------------------------------===//
2900 let ExeDomain = SSEPackedInt in {
2902 let isAsmParserOnly = 1 in {
2904 def VMASKMOVDQU : VPDI<0xF7, MRMSrcReg, (outs),
2905 (ins VR128:$src, VR128:$mask),
2906 "maskmovdqu\t{$mask, $src|$src, $mask}",
2907 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>, VEX;
2909 def VMASKMOVDQU64 : VPDI<0xF7, MRMSrcReg, (outs),
2910 (ins VR128:$src, VR128:$mask),
2911 "maskmovdqu\t{$mask, $src|$src, $mask}",
2912 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>, VEX;
2916 def MASKMOVDQU : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
2917 "maskmovdqu\t{$mask, $src|$src, $mask}",
2918 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>;
2920 def MASKMOVDQU64 : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
2921 "maskmovdqu\t{$mask, $src|$src, $mask}",
2922 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>;
2924 } // ExeDomain = SSEPackedInt
2926 //===---------------------------------------------------------------------===//
2927 // SSE2 - Move Doubleword
2928 //===---------------------------------------------------------------------===//
2930 // Move Int Doubleword to Packed Double Int
2931 let isAsmParserOnly = 1 in {
2932 def VMOVDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
2933 "movd\t{$src, $dst|$dst, $src}",
2935 (v4i32 (scalar_to_vector GR32:$src)))]>, VEX;
2936 def VMOVDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
2937 "movd\t{$src, $dst|$dst, $src}",
2939 (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>,
2942 def MOVDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
2943 "movd\t{$src, $dst|$dst, $src}",
2945 (v4i32 (scalar_to_vector GR32:$src)))]>;
2946 def MOVDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
2947 "movd\t{$src, $dst|$dst, $src}",
2949 (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>;
2952 // Move Int Doubleword to Single Scalar
2953 let isAsmParserOnly = 1 in {
2954 def VMOVDI2SSrr : VPDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
2955 "movd\t{$src, $dst|$dst, $src}",
2956 [(set FR32:$dst, (bitconvert GR32:$src))]>, VEX;
2958 def VMOVDI2SSrm : VPDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
2959 "movd\t{$src, $dst|$dst, $src}",
2960 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>,
2963 def MOVDI2SSrr : PDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
2964 "movd\t{$src, $dst|$dst, $src}",
2965 [(set FR32:$dst, (bitconvert GR32:$src))]>;
2967 def MOVDI2SSrm : PDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
2968 "movd\t{$src, $dst|$dst, $src}",
2969 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>;
2971 // Move Packed Doubleword Int to Packed Double Int
2972 let isAsmParserOnly = 1 in {
2973 def VMOVPDI2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
2974 "movd\t{$src, $dst|$dst, $src}",
2975 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
2977 def VMOVPDI2DImr : VPDI<0x7E, MRMDestMem, (outs),
2978 (ins i32mem:$dst, VR128:$src),
2979 "movd\t{$src, $dst|$dst, $src}",
2980 [(store (i32 (vector_extract (v4i32 VR128:$src),
2981 (iPTR 0))), addr:$dst)]>, VEX;
2983 def MOVPDI2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
2984 "movd\t{$src, $dst|$dst, $src}",
2985 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
2987 def MOVPDI2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src),
2988 "movd\t{$src, $dst|$dst, $src}",
2989 [(store (i32 (vector_extract (v4i32 VR128:$src),
2990 (iPTR 0))), addr:$dst)]>;
2992 // Move Scalar Single to Double Int
2993 let isAsmParserOnly = 1 in {
2994 def VMOVSS2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
2995 "movd\t{$src, $dst|$dst, $src}",
2996 [(set GR32:$dst, (bitconvert FR32:$src))]>, VEX;
2997 def VMOVSS2DImr : VPDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
2998 "movd\t{$src, $dst|$dst, $src}",
2999 [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>, VEX;
3001 def MOVSS2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
3002 "movd\t{$src, $dst|$dst, $src}",
3003 [(set GR32:$dst, (bitconvert FR32:$src))]>;
3004 def MOVSS2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
3005 "movd\t{$src, $dst|$dst, $src}",
3006 [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>;
3008 // movd / movq to XMM register zero-extends
3009 let AddedComplexity = 15, isAsmParserOnly = 1 in {
3010 def VMOVZDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
3011 "movd\t{$src, $dst|$dst, $src}",
3012 [(set VR128:$dst, (v4i32 (X86vzmovl
3013 (v4i32 (scalar_to_vector GR32:$src)))))]>,
3015 def VMOVZQI2PQIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3016 "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
3017 [(set VR128:$dst, (v2i64 (X86vzmovl
3018 (v2i64 (scalar_to_vector GR64:$src)))))]>,
3021 let AddedComplexity = 15 in {
3022 def MOVZDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
3023 "movd\t{$src, $dst|$dst, $src}",
3024 [(set VR128:$dst, (v4i32 (X86vzmovl
3025 (v4i32 (scalar_to_vector GR32:$src)))))]>;
3026 def MOVZQI2PQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3027 "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
3028 [(set VR128:$dst, (v2i64 (X86vzmovl
3029 (v2i64 (scalar_to_vector GR64:$src)))))]>;
3032 let AddedComplexity = 20 in {
3033 let isAsmParserOnly = 1 in
3034 def VMOVZDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
3035 "movd\t{$src, $dst|$dst, $src}",
3037 (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
3038 (loadi32 addr:$src))))))]>,
3040 def MOVZDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
3041 "movd\t{$src, $dst|$dst, $src}",
3043 (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
3044 (loadi32 addr:$src))))))]>;
3046 def : Pat<(v4i32 (X86vzmovl (loadv4i32 addr:$src))),
3047 (MOVZDI2PDIrm addr:$src)>;
3048 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
3049 (MOVZDI2PDIrm addr:$src)>;
3050 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
3051 (MOVZDI2PDIrm addr:$src)>;
3054 //===---------------------------------------------------------------------===//
3055 // SSE2 - Move Quadword
3056 //===---------------------------------------------------------------------===//
3058 // Move Quadword Int to Packed Quadword Int
3059 let isAsmParserOnly = 1 in
3060 def VMOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3061 "vmovq\t{$src, $dst|$dst, $src}",
3063 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
3064 VEX, Requires<[HasAVX, HasSSE2]>;
3065 def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3066 "movq\t{$src, $dst|$dst, $src}",
3068 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
3069 Requires<[HasSSE2]>; // SSE2 instruction with XS Prefix
3071 // Move Packed Quadword Int to Quadword Int
3072 let isAsmParserOnly = 1 in
3073 def VMOVPQI2QImr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3074 "movq\t{$src, $dst|$dst, $src}",
3075 [(store (i64 (vector_extract (v2i64 VR128:$src),
3076 (iPTR 0))), addr:$dst)]>, VEX;
3077 def MOVPQI2QImr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3078 "movq\t{$src, $dst|$dst, $src}",
3079 [(store (i64 (vector_extract (v2i64 VR128:$src),
3080 (iPTR 0))), addr:$dst)]>;
3082 def : Pat<(f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
3083 (f64 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
3085 // Store / copy lower 64-bits of a XMM register.
3086 let isAsmParserOnly = 1 in
3087 def VMOVLQ128mr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3088 "movq\t{$src, $dst|$dst, $src}",
3089 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>, VEX;
3090 def MOVLQ128mr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3091 "movq\t{$src, $dst|$dst, $src}",
3092 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>;
3094 let AddedComplexity = 20, isAsmParserOnly = 1 in
3095 def VMOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3096 "vmovq\t{$src, $dst|$dst, $src}",
3098 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
3099 (loadi64 addr:$src))))))]>,
3100 XS, VEX, Requires<[HasAVX, HasSSE2]>;
3102 let AddedComplexity = 20 in {
3103 def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3104 "movq\t{$src, $dst|$dst, $src}",
3106 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
3107 (loadi64 addr:$src))))))]>,
3108 XS, Requires<[HasSSE2]>;
3110 def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
3111 (MOVZQI2PQIrm addr:$src)>;
3112 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4f32 addr:$src)))),
3113 (MOVZQI2PQIrm addr:$src)>;
3114 def : Pat<(v2i64 (X86vzload addr:$src)), (MOVZQI2PQIrm addr:$src)>;
3117 // Moving from XMM to XMM and clear upper 64 bits. Note, there is a bug in
3118 // IA32 document. movq xmm1, xmm2 does clear the high bits.
3119 let isAsmParserOnly = 1, AddedComplexity = 15 in
3120 def VMOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3121 "vmovq\t{$src, $dst|$dst, $src}",
3122 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
3123 XS, VEX, Requires<[HasAVX, HasSSE2]>;
3124 let AddedComplexity = 15 in
3125 def MOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3126 "movq\t{$src, $dst|$dst, $src}",
3127 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
3128 XS, Requires<[HasSSE2]>;
3130 let AddedComplexity = 20, isAsmParserOnly = 1 in
3131 def VMOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3132 "vmovq\t{$src, $dst|$dst, $src}",
3133 [(set VR128:$dst, (v2i64 (X86vzmovl
3134 (loadv2i64 addr:$src))))]>,
3135 XS, VEX, Requires<[HasAVX, HasSSE2]>;
3136 let AddedComplexity = 20 in {
3137 def MOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3138 "movq\t{$src, $dst|$dst, $src}",
3139 [(set VR128:$dst, (v2i64 (X86vzmovl
3140 (loadv2i64 addr:$src))))]>,
3141 XS, Requires<[HasSSE2]>;
3143 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4i32 addr:$src)))),
3144 (MOVZPQILo2PQIrm addr:$src)>;
3147 // Instructions to match in the assembler
3148 let isAsmParserOnly = 1 in {
3149 // This instructions is in fact an alias to movd with 64 bit dst
3150 def VMOVQs64rr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3151 "movq\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
3152 def VMOVQd64rr : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
3153 "movq\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
3156 // Instructions for the disassembler
3157 // xr = XMM register
3160 let isAsmParserOnly = 1 in
3161 def VMOVQxrxr: I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3162 "vmovq\t{$src, $dst|$dst, $src}", []>, VEX, XS;
3163 def MOVQxrxr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3164 "movq\t{$src, $dst|$dst, $src}", []>, XS;
3166 //===---------------------------------------------------------------------===//
3167 // SSE2 - Misc Instructions
3168 //===---------------------------------------------------------------------===//
3171 def CLFLUSH : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
3172 "clflush\t$src", [(int_x86_sse2_clflush addr:$src)]>,
3173 TB, Requires<[HasSSE2]>;
3175 // Load, store, and memory fence
3176 def LFENCE : I<0xAE, MRM_E8, (outs), (ins),
3177 "lfence", [(int_x86_sse2_lfence)]>, TB, Requires<[HasSSE2]>;
3178 def MFENCE : I<0xAE, MRM_F0, (outs), (ins),
3179 "mfence", [(int_x86_sse2_mfence)]>, TB, Requires<[HasSSE2]>;
3181 // Pause. This "instruction" is encoded as "rep; nop", so even though it
3182 // was introduced with SSE2, it's backward compatible.
3183 def PAUSE : I<0x90, RawFrm, (outs), (ins), "pause", []>, REP;
3185 //TODO: custom lower this so as to never even generate the noop
3186 def : Pat<(membarrier (i8 imm), (i8 imm), (i8 imm), (i8 imm),
3188 def : Pat<(membarrier (i8 0), (i8 0), (i8 0), (i8 1), (i8 1)), (SFENCE)>;
3189 def : Pat<(membarrier (i8 1), (i8 0), (i8 0), (i8 0), (i8 1)), (LFENCE)>;
3190 def : Pat<(membarrier (i8 imm), (i8 imm), (i8 imm), (i8 imm),
3193 // Alias instructions that map zero vector to pxor / xorp* for sse.
3194 // We set canFoldAsLoad because this can be converted to a constant-pool
3195 // load of an all-ones value if folding it would be beneficial.
3196 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
3197 isCodeGenOnly = 1, ExeDomain = SSEPackedInt in
3198 // FIXME: Change encoding to pseudo.
3199 def V_SETALLONES : PDI<0x76, MRMInitReg, (outs VR128:$dst), (ins), "",
3200 [(set VR128:$dst, (v4i32 immAllOnesV))]>;
3202 //===---------------------------------------------------------------------===//
3203 // SSE3 - Conversion Instructions
3204 //===---------------------------------------------------------------------===//
3206 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE3] in {
3207 def VCVTPD2DQrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3208 "vcvtpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
3209 def VCVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3210 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3211 def VCVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3212 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3215 def CVTPD2DQrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3216 "cvtpd2dq\t{$src, $dst|$dst, $src}", []>;
3217 def CVTPD2DQrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3218 "cvtpd2dq\t{$src, $dst|$dst, $src}", []>;
3219 def CVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3220 "cvtdq2pd\t{$src, $dst|$dst, $src}", []>;
3221 def CVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3222 "cvtdq2pd\t{$src, $dst|$dst, $src}", []>;
3224 //===---------------------------------------------------------------------===//
3225 // SSE3 - Move Instructions
3226 //===---------------------------------------------------------------------===//
3228 // Replicate Single FP
3229 multiclass sse3_replicate_sfp<bits<8> op, PatFrag rep_frag, string OpcodeStr> {
3230 def rr : S3SI<op, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3231 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3232 [(set VR128:$dst, (v4f32 (rep_frag
3233 VR128:$src, (undef))))]>;
3234 def rm : S3SI<op, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3235 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3236 [(set VR128:$dst, (rep_frag
3237 (memopv4f32 addr:$src), (undef)))]>;
3240 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE3] in {
3241 defm VMOVSHDUP : sse3_replicate_sfp<0x16, movshdup, "vmovshdup">, VEX;
3242 defm VMOVSLDUP : sse3_replicate_sfp<0x12, movsldup, "vmovsldup">, VEX;
3244 defm MOVSHDUP : sse3_replicate_sfp<0x16, movshdup, "movshdup">;
3245 defm MOVSLDUP : sse3_replicate_sfp<0x12, movsldup, "movsldup">;
3247 // Replicate Double FP
3248 multiclass sse3_replicate_dfp<string OpcodeStr> {
3249 def rr : S3DI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3250 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3251 [(set VR128:$dst,(v2f64 (movddup VR128:$src, (undef))))]>;
3252 def rm : S3DI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
3253 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3255 (v2f64 (movddup (scalar_to_vector (loadf64 addr:$src)),
3259 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE3] in
3260 defm VMOVDDUP : sse3_replicate_dfp<"vmovddup">, VEX;
3261 defm MOVDDUP : sse3_replicate_dfp<"movddup">;
3263 // Move Unaligned Integer
3264 let isAsmParserOnly = 1 in
3265 def VLDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3266 "vlddqu\t{$src, $dst|$dst, $src}",
3267 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>, VEX;
3268 def LDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3269 "lddqu\t{$src, $dst|$dst, $src}",
3270 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>;
3272 def : Pat<(movddup (bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src)))),
3274 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3276 // Several Move patterns
3277 let AddedComplexity = 5 in {
3278 def : Pat<(movddup (memopv2f64 addr:$src), (undef)),
3279 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3280 def : Pat<(movddup (bc_v4f32 (memopv2f64 addr:$src)), (undef)),
3281 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3282 def : Pat<(movddup (memopv2i64 addr:$src), (undef)),
3283 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3284 def : Pat<(movddup (bc_v4i32 (memopv2i64 addr:$src)), (undef)),
3285 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3288 // vector_shuffle v1, <undef> <1, 1, 3, 3>
3289 let AddedComplexity = 15 in
3290 def : Pat<(v4i32 (movshdup VR128:$src, (undef))),
3291 (MOVSHDUPrr VR128:$src)>, Requires<[HasSSE3]>;
3292 let AddedComplexity = 20 in
3293 def : Pat<(v4i32 (movshdup (bc_v4i32 (memopv2i64 addr:$src)), (undef))),
3294 (MOVSHDUPrm addr:$src)>, Requires<[HasSSE3]>;
3296 // vector_shuffle v1, <undef> <0, 0, 2, 2>
3297 let AddedComplexity = 15 in
3298 def : Pat<(v4i32 (movsldup VR128:$src, (undef))),
3299 (MOVSLDUPrr VR128:$src)>, Requires<[HasSSE3]>;
3300 let AddedComplexity = 20 in
3301 def : Pat<(v4i32 (movsldup (bc_v4i32 (memopv2i64 addr:$src)), (undef))),
3302 (MOVSLDUPrm addr:$src)>, Requires<[HasSSE3]>;
3304 //===---------------------------------------------------------------------===//
3305 // SSE3 - Arithmetic
3306 //===---------------------------------------------------------------------===//
3308 multiclass sse3_addsub<Intrinsic Int, string OpcodeStr, bit Is2Addr = 1> {
3309 def rr : I<0xD0, MRMSrcReg,
3310 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
3312 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3313 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3314 [(set VR128:$dst, (Int VR128:$src1,
3316 def rm : I<0xD0, MRMSrcMem,
3317 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
3319 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3320 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3321 [(set VR128:$dst, (Int VR128:$src1,
3322 (memop addr:$src2)))]>;
3326 let isAsmParserOnly = 1, Predicates = [HasSSE3, HasAVX],
3327 ExeDomain = SSEPackedDouble in {
3328 defm VADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "vaddsubps", 0>, XD,
3330 defm VADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "vaddsubpd", 0>, OpSize,
3333 let Constraints = "$src1 = $dst", Predicates = [HasSSE3],
3334 ExeDomain = SSEPackedDouble in {
3335 defm ADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "addsubps">, XD;
3336 defm ADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "addsubpd">, TB, OpSize;
3339 //===---------------------------------------------------------------------===//
3340 // SSE3 Instructions
3341 //===---------------------------------------------------------------------===//
3344 class S3D_Intrr<bits<8> o, string OpcodeStr, Intrinsic IntId, bit Is2Addr = 1>
3345 : S3DI<o, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
3347 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3348 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3349 [(set VR128:$dst, (v4f32 (IntId VR128:$src1, VR128:$src2)))]>;
3350 class S3D_Intrm<bits<8> o, string OpcodeStr, Intrinsic IntId, bit Is2Addr = 1>
3351 : S3DI<o, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
3353 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3354 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3355 [(set VR128:$dst, (v4f32 (IntId VR128:$src1, (memop addr:$src2))))]>;
3356 class S3_Intrr<bits<8> o, string OpcodeStr, Intrinsic IntId, bit Is2Addr = 1>
3357 : S3I<o, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
3359 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3360 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3361 [(set VR128:$dst, (v2f64 (IntId VR128:$src1, VR128:$src2)))]>;
3362 class S3_Intrm<bits<8> o, string OpcodeStr, Intrinsic IntId, bit Is2Addr = 1>
3363 : S3I<o, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
3365 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3366 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3367 [(set VR128:$dst, (v2f64 (IntId VR128:$src1, (memopv2f64 addr:$src2))))]>;
3369 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE3] in {
3370 def VHADDPSrr : S3D_Intrr<0x7C, "vhaddps", int_x86_sse3_hadd_ps, 0>, VEX_4V;
3371 def VHADDPSrm : S3D_Intrm<0x7C, "vhaddps", int_x86_sse3_hadd_ps, 0>, VEX_4V;
3372 def VHADDPDrr : S3_Intrr <0x7C, "vhaddpd", int_x86_sse3_hadd_pd, 0>, VEX_4V;
3373 def VHADDPDrm : S3_Intrm <0x7C, "vhaddpd", int_x86_sse3_hadd_pd, 0>, VEX_4V;
3374 def VHSUBPSrr : S3D_Intrr<0x7D, "vhsubps", int_x86_sse3_hsub_ps, 0>, VEX_4V;
3375 def VHSUBPSrm : S3D_Intrm<0x7D, "vhsubps", int_x86_sse3_hsub_ps, 0>, VEX_4V;
3376 def VHSUBPDrr : S3_Intrr <0x7D, "vhsubpd", int_x86_sse3_hsub_pd, 0>, VEX_4V;
3377 def VHSUBPDrm : S3_Intrm <0x7D, "vhsubpd", int_x86_sse3_hsub_pd, 0>, VEX_4V;
3380 let Constraints = "$src1 = $dst" in {
3381 def HADDPSrr : S3D_Intrr<0x7C, "haddps", int_x86_sse3_hadd_ps>;
3382 def HADDPSrm : S3D_Intrm<0x7C, "haddps", int_x86_sse3_hadd_ps>;
3383 def HADDPDrr : S3_Intrr <0x7C, "haddpd", int_x86_sse3_hadd_pd>;
3384 def HADDPDrm : S3_Intrm <0x7C, "haddpd", int_x86_sse3_hadd_pd>;
3385 def HSUBPSrr : S3D_Intrr<0x7D, "hsubps", int_x86_sse3_hsub_ps>;
3386 def HSUBPSrm : S3D_Intrm<0x7D, "hsubps", int_x86_sse3_hsub_ps>;
3387 def HSUBPDrr : S3_Intrr <0x7D, "hsubpd", int_x86_sse3_hsub_pd>;
3388 def HSUBPDrm : S3_Intrm <0x7D, "hsubpd", int_x86_sse3_hsub_pd>;
3391 //===---------------------------------------------------------------------===//
3392 // SSSE3 - Packed Absolute Instructions
3393 //===---------------------------------------------------------------------===//
3395 /// SS3I_unop_rm_int - Simple SSSE3 unary op whose type can be v*{i8,i16,i32}.
3396 multiclass SS3I_unop_rm_int<bits<8> opc, string OpcodeStr,
3397 PatFrag mem_frag64, PatFrag mem_frag128,
3398 Intrinsic IntId64, Intrinsic IntId128> {
3399 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst), (ins VR64:$src),
3400 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3401 [(set VR64:$dst, (IntId64 VR64:$src))]>;
3403 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst), (ins i64mem:$src),
3404 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3406 (IntId64 (bitconvert (mem_frag64 addr:$src))))]>;
3408 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
3410 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3411 [(set VR128:$dst, (IntId128 VR128:$src))]>,
3414 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
3416 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3419 (bitconvert (mem_frag128 addr:$src))))]>, OpSize;
3422 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE3] in {
3423 defm VPABSB : SS3I_unop_rm_int<0x1C, "vpabsb", memopv8i8, memopv16i8,
3424 int_x86_ssse3_pabs_b,
3425 int_x86_ssse3_pabs_b_128>, VEX;
3426 defm VPABSW : SS3I_unop_rm_int<0x1D, "vpabsw", memopv4i16, memopv8i16,
3427 int_x86_ssse3_pabs_w,
3428 int_x86_ssse3_pabs_w_128>, VEX;
3429 defm VPABSD : SS3I_unop_rm_int<0x1E, "vpabsd", memopv2i32, memopv4i32,
3430 int_x86_ssse3_pabs_d,
3431 int_x86_ssse3_pabs_d_128>, VEX;
3434 defm PABSB : SS3I_unop_rm_int<0x1C, "pabsb", memopv8i8, memopv16i8,
3435 int_x86_ssse3_pabs_b,
3436 int_x86_ssse3_pabs_b_128>;
3437 defm PABSW : SS3I_unop_rm_int<0x1D, "pabsw", memopv4i16, memopv8i16,
3438 int_x86_ssse3_pabs_w,
3439 int_x86_ssse3_pabs_w_128>;
3440 defm PABSD : SS3I_unop_rm_int<0x1E, "pabsd", memopv2i32, memopv4i32,
3441 int_x86_ssse3_pabs_d,
3442 int_x86_ssse3_pabs_d_128>;
3444 //===---------------------------------------------------------------------===//
3445 // SSSE3 - Packed Binary Operator Instructions
3446 //===---------------------------------------------------------------------===//
3448 /// SS3I_binop_rm_int - Simple SSSE3 bin op whose type can be v*{i8,i16,i32}.
3449 multiclass SS3I_binop_rm_int<bits<8> opc, string OpcodeStr,
3450 PatFrag mem_frag64, PatFrag mem_frag128,
3451 Intrinsic IntId64, Intrinsic IntId128,
3453 let isCommutable = 1 in
3454 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
3455 (ins VR64:$src1, VR64:$src2),
3457 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3458 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3459 [(set VR64:$dst, (IntId64 VR64:$src1, VR64:$src2))]>;
3460 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
3461 (ins VR64:$src1, i64mem:$src2),
3463 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3464 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3466 (IntId64 VR64:$src1,
3467 (bitconvert (memopv8i8 addr:$src2))))]>;
3469 let isCommutable = 1 in
3470 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
3471 (ins VR128:$src1, VR128:$src2),
3473 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3474 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3475 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
3477 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
3478 (ins VR128:$src1, i128mem:$src2),
3480 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3481 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3483 (IntId128 VR128:$src1,
3484 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
3487 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE3] in {
3488 let isCommutable = 0 in {
3489 defm VPHADDW : SS3I_binop_rm_int<0x01, "vphaddw", memopv4i16, memopv8i16,
3490 int_x86_ssse3_phadd_w,
3491 int_x86_ssse3_phadd_w_128, 0>, VEX_4V;
3492 defm VPHADDD : SS3I_binop_rm_int<0x02, "vphaddd", memopv2i32, memopv4i32,
3493 int_x86_ssse3_phadd_d,
3494 int_x86_ssse3_phadd_d_128, 0>, VEX_4V;
3495 defm VPHADDSW : SS3I_binop_rm_int<0x03, "vphaddsw", memopv4i16, memopv8i16,
3496 int_x86_ssse3_phadd_sw,
3497 int_x86_ssse3_phadd_sw_128, 0>, VEX_4V;
3498 defm VPHSUBW : SS3I_binop_rm_int<0x05, "vphsubw", memopv4i16, memopv8i16,
3499 int_x86_ssse3_phsub_w,
3500 int_x86_ssse3_phsub_w_128, 0>, VEX_4V;
3501 defm VPHSUBD : SS3I_binop_rm_int<0x06, "vphsubd", memopv2i32, memopv4i32,
3502 int_x86_ssse3_phsub_d,
3503 int_x86_ssse3_phsub_d_128, 0>, VEX_4V;
3504 defm VPHSUBSW : SS3I_binop_rm_int<0x07, "vphsubsw", memopv4i16, memopv8i16,
3505 int_x86_ssse3_phsub_sw,
3506 int_x86_ssse3_phsub_sw_128, 0>, VEX_4V;
3507 defm VPMADDUBSW : SS3I_binop_rm_int<0x04, "vpmaddubsw", memopv8i8, memopv16i8,
3508 int_x86_ssse3_pmadd_ub_sw,
3509 int_x86_ssse3_pmadd_ub_sw_128, 0>, VEX_4V;
3510 defm VPSHUFB : SS3I_binop_rm_int<0x00, "vpshufb", memopv8i8, memopv16i8,
3511 int_x86_ssse3_pshuf_b,
3512 int_x86_ssse3_pshuf_b_128, 0>, VEX_4V;
3513 defm VPSIGNB : SS3I_binop_rm_int<0x08, "vpsignb", memopv8i8, memopv16i8,
3514 int_x86_ssse3_psign_b,
3515 int_x86_ssse3_psign_b_128, 0>, VEX_4V;
3516 defm VPSIGNW : SS3I_binop_rm_int<0x09, "vpsignw", memopv4i16, memopv8i16,
3517 int_x86_ssse3_psign_w,
3518 int_x86_ssse3_psign_w_128, 0>, VEX_4V;
3519 defm VPSIGND : SS3I_binop_rm_int<0x0A, "vpsignd", memopv2i32, memopv4i32,
3520 int_x86_ssse3_psign_d,
3521 int_x86_ssse3_psign_d_128, 0>, VEX_4V;
3523 defm VPMULHRSW : SS3I_binop_rm_int<0x0B, "vpmulhrsw", memopv4i16, memopv8i16,
3524 int_x86_ssse3_pmul_hr_sw,
3525 int_x86_ssse3_pmul_hr_sw_128, 0>, VEX_4V;
3528 // None of these have i8 immediate fields.
3529 let ImmT = NoImm, Constraints = "$src1 = $dst" in {
3530 let isCommutable = 0 in {
3531 defm PHADDW : SS3I_binop_rm_int<0x01, "phaddw", memopv4i16, memopv8i16,
3532 int_x86_ssse3_phadd_w,
3533 int_x86_ssse3_phadd_w_128>;
3534 defm PHADDD : SS3I_binop_rm_int<0x02, "phaddd", memopv2i32, memopv4i32,
3535 int_x86_ssse3_phadd_d,
3536 int_x86_ssse3_phadd_d_128>;
3537 defm PHADDSW : SS3I_binop_rm_int<0x03, "phaddsw", memopv4i16, memopv8i16,
3538 int_x86_ssse3_phadd_sw,
3539 int_x86_ssse3_phadd_sw_128>;
3540 defm PHSUBW : SS3I_binop_rm_int<0x05, "phsubw", memopv4i16, memopv8i16,
3541 int_x86_ssse3_phsub_w,
3542 int_x86_ssse3_phsub_w_128>;
3543 defm PHSUBD : SS3I_binop_rm_int<0x06, "phsubd", memopv2i32, memopv4i32,
3544 int_x86_ssse3_phsub_d,
3545 int_x86_ssse3_phsub_d_128>;
3546 defm PHSUBSW : SS3I_binop_rm_int<0x07, "phsubsw", memopv4i16, memopv8i16,
3547 int_x86_ssse3_phsub_sw,
3548 int_x86_ssse3_phsub_sw_128>;
3549 defm PMADDUBSW : SS3I_binop_rm_int<0x04, "pmaddubsw", memopv8i8, memopv16i8,
3550 int_x86_ssse3_pmadd_ub_sw,
3551 int_x86_ssse3_pmadd_ub_sw_128>;
3552 defm PSHUFB : SS3I_binop_rm_int<0x00, "pshufb", memopv8i8, memopv16i8,
3553 int_x86_ssse3_pshuf_b,
3554 int_x86_ssse3_pshuf_b_128>;
3555 defm PSIGNB : SS3I_binop_rm_int<0x08, "psignb", memopv8i8, memopv16i8,
3556 int_x86_ssse3_psign_b,
3557 int_x86_ssse3_psign_b_128>;
3558 defm PSIGNW : SS3I_binop_rm_int<0x09, "psignw", memopv4i16, memopv8i16,
3559 int_x86_ssse3_psign_w,
3560 int_x86_ssse3_psign_w_128>;
3561 defm PSIGND : SS3I_binop_rm_int<0x0A, "psignd", memopv2i32, memopv4i32,
3562 int_x86_ssse3_psign_d,
3563 int_x86_ssse3_psign_d_128>;
3565 defm PMULHRSW : SS3I_binop_rm_int<0x0B, "pmulhrsw", memopv4i16, memopv8i16,
3566 int_x86_ssse3_pmul_hr_sw,
3567 int_x86_ssse3_pmul_hr_sw_128>;
3570 def : Pat<(X86pshufb VR128:$src, VR128:$mask),
3571 (PSHUFBrr128 VR128:$src, VR128:$mask)>, Requires<[HasSSSE3]>;
3572 def : Pat<(X86pshufb VR128:$src, (bc_v16i8 (memopv2i64 addr:$mask))),
3573 (PSHUFBrm128 VR128:$src, addr:$mask)>, Requires<[HasSSSE3]>;
3575 //===---------------------------------------------------------------------===//
3576 // SSSE3 - Packed Align Instruction Patterns
3577 //===---------------------------------------------------------------------===//
3579 multiclass sse3_palign<string asm, bit Is2Addr = 1> {
3580 def R64rr : SS3AI<0x0F, MRMSrcReg, (outs VR64:$dst),
3581 (ins VR64:$src1, VR64:$src2, i8imm:$src3),
3583 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3585 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
3587 def R64rm : SS3AI<0x0F, MRMSrcMem, (outs VR64:$dst),
3588 (ins VR64:$src1, i64mem:$src2, i8imm:$src3),
3590 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3592 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
3595 def R128rr : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
3596 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
3598 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3600 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
3602 def R128rm : SS3AI<0x0F, MRMSrcMem, (outs VR128:$dst),
3603 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
3605 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3607 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
3611 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE3] in
3612 defm VPALIGN : sse3_palign<"vpalignr", 0>, VEX_4V;
3613 let Constraints = "$src1 = $dst" in
3614 defm PALIGN : sse3_palign<"palignr">;
3616 let AddedComplexity = 5 in {
3618 def : Pat<(v1i64 (palign:$src3 VR64:$src1, VR64:$src2)),
3619 (PALIGNR64rr VR64:$src2, VR64:$src1,
3620 (SHUFFLE_get_palign_imm VR64:$src3))>,
3621 Requires<[HasSSSE3]>;
3622 def : Pat<(v2i32 (palign:$src3 VR64:$src1, VR64:$src2)),
3623 (PALIGNR64rr VR64:$src2, VR64:$src1,
3624 (SHUFFLE_get_palign_imm VR64:$src3))>,
3625 Requires<[HasSSSE3]>;
3626 def : Pat<(v4i16 (palign:$src3 VR64:$src1, VR64:$src2)),
3627 (PALIGNR64rr VR64:$src2, VR64:$src1,
3628 (SHUFFLE_get_palign_imm VR64:$src3))>,
3629 Requires<[HasSSSE3]>;
3630 def : Pat<(v8i8 (palign:$src3 VR64:$src1, VR64:$src2)),
3631 (PALIGNR64rr VR64:$src2, VR64:$src1,
3632 (SHUFFLE_get_palign_imm VR64:$src3))>,
3633 Requires<[HasSSSE3]>;
3635 def : Pat<(v4i32 (palign:$src3 VR128:$src1, VR128:$src2)),
3636 (PALIGNR128rr VR128:$src2, VR128:$src1,
3637 (SHUFFLE_get_palign_imm VR128:$src3))>,
3638 Requires<[HasSSSE3]>;
3639 def : Pat<(v4f32 (palign:$src3 VR128:$src1, VR128:$src2)),
3640 (PALIGNR128rr VR128:$src2, VR128:$src1,
3641 (SHUFFLE_get_palign_imm VR128:$src3))>,
3642 Requires<[HasSSSE3]>;
3643 def : Pat<(v8i16 (palign:$src3 VR128:$src1, VR128:$src2)),
3644 (PALIGNR128rr VR128:$src2, VR128:$src1,
3645 (SHUFFLE_get_palign_imm VR128:$src3))>,
3646 Requires<[HasSSSE3]>;
3647 def : Pat<(v16i8 (palign:$src3 VR128:$src1, VR128:$src2)),
3648 (PALIGNR128rr VR128:$src2, VR128:$src1,
3649 (SHUFFLE_get_palign_imm VR128:$src3))>,
3650 Requires<[HasSSSE3]>;
3653 //===---------------------------------------------------------------------===//
3654 // SSSE3 Misc Instructions
3655 //===---------------------------------------------------------------------===//
3657 // Thread synchronization
3658 def MONITOR : I<0x01, MRM_C8, (outs), (ins), "monitor",
3659 [(int_x86_sse3_monitor EAX, ECX, EDX)]>,TB, Requires<[HasSSE3]>;
3660 def MWAIT : I<0x01, MRM_C9, (outs), (ins), "mwait",
3661 [(int_x86_sse3_mwait ECX, EAX)]>, TB, Requires<[HasSSE3]>;
3663 //===---------------------------------------------------------------------===//
3664 // Non-Instruction Patterns
3665 //===---------------------------------------------------------------------===//
3667 // extload f32 -> f64. This matches load+fextend because we have a hack in
3668 // the isel (PreprocessForFPConvert) that can introduce loads after dag
3670 // Since these loads aren't folded into the fextend, we have to match it
3672 let Predicates = [HasSSE2] in
3673 def : Pat<(fextend (loadf32 addr:$src)),
3674 (CVTSS2SDrm addr:$src)>;
3677 let Predicates = [HasSSE2] in {
3678 def : Pat<(v2i64 (bitconvert (v4i32 VR128:$src))), (v2i64 VR128:$src)>;
3679 def : Pat<(v2i64 (bitconvert (v8i16 VR128:$src))), (v2i64 VR128:$src)>;
3680 def : Pat<(v2i64 (bitconvert (v16i8 VR128:$src))), (v2i64 VR128:$src)>;
3681 def : Pat<(v2i64 (bitconvert (v2f64 VR128:$src))), (v2i64 VR128:$src)>;
3682 def : Pat<(v2i64 (bitconvert (v4f32 VR128:$src))), (v2i64 VR128:$src)>;
3683 def : Pat<(v4i32 (bitconvert (v2i64 VR128:$src))), (v4i32 VR128:$src)>;
3684 def : Pat<(v4i32 (bitconvert (v8i16 VR128:$src))), (v4i32 VR128:$src)>;
3685 def : Pat<(v4i32 (bitconvert (v16i8 VR128:$src))), (v4i32 VR128:$src)>;
3686 def : Pat<(v4i32 (bitconvert (v2f64 VR128:$src))), (v4i32 VR128:$src)>;
3687 def : Pat<(v4i32 (bitconvert (v4f32 VR128:$src))), (v4i32 VR128:$src)>;
3688 def : Pat<(v8i16 (bitconvert (v2i64 VR128:$src))), (v8i16 VR128:$src)>;
3689 def : Pat<(v8i16 (bitconvert (v4i32 VR128:$src))), (v8i16 VR128:$src)>;
3690 def : Pat<(v8i16 (bitconvert (v16i8 VR128:$src))), (v8i16 VR128:$src)>;
3691 def : Pat<(v8i16 (bitconvert (v2f64 VR128:$src))), (v8i16 VR128:$src)>;
3692 def : Pat<(v8i16 (bitconvert (v4f32 VR128:$src))), (v8i16 VR128:$src)>;
3693 def : Pat<(v16i8 (bitconvert (v2i64 VR128:$src))), (v16i8 VR128:$src)>;
3694 def : Pat<(v16i8 (bitconvert (v4i32 VR128:$src))), (v16i8 VR128:$src)>;
3695 def : Pat<(v16i8 (bitconvert (v8i16 VR128:$src))), (v16i8 VR128:$src)>;
3696 def : Pat<(v16i8 (bitconvert (v2f64 VR128:$src))), (v16i8 VR128:$src)>;
3697 def : Pat<(v16i8 (bitconvert (v4f32 VR128:$src))), (v16i8 VR128:$src)>;
3698 def : Pat<(v4f32 (bitconvert (v2i64 VR128:$src))), (v4f32 VR128:$src)>;
3699 def : Pat<(v4f32 (bitconvert (v4i32 VR128:$src))), (v4f32 VR128:$src)>;
3700 def : Pat<(v4f32 (bitconvert (v8i16 VR128:$src))), (v4f32 VR128:$src)>;
3701 def : Pat<(v4f32 (bitconvert (v16i8 VR128:$src))), (v4f32 VR128:$src)>;
3702 def : Pat<(v4f32 (bitconvert (v2f64 VR128:$src))), (v4f32 VR128:$src)>;
3703 def : Pat<(v2f64 (bitconvert (v2i64 VR128:$src))), (v2f64 VR128:$src)>;
3704 def : Pat<(v2f64 (bitconvert (v4i32 VR128:$src))), (v2f64 VR128:$src)>;
3705 def : Pat<(v2f64 (bitconvert (v8i16 VR128:$src))), (v2f64 VR128:$src)>;
3706 def : Pat<(v2f64 (bitconvert (v16i8 VR128:$src))), (v2f64 VR128:$src)>;
3707 def : Pat<(v2f64 (bitconvert (v4f32 VR128:$src))), (v2f64 VR128:$src)>;
3710 // Move scalar to XMM zero-extended
3711 // movd to XMM register zero-extends
3712 let AddedComplexity = 15 in {
3713 // Zeroing a VR128 then do a MOVS{S|D} to the lower bits.
3714 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))),
3715 (MOVSDrr (v2f64 (V_SET0PS)), FR64:$src)>;
3716 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),
3717 (MOVSSrr (v4f32 (V_SET0PS)), FR32:$src)>;
3718 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
3719 (MOVSSrr (v4f32 (V_SET0PS)),
3720 (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss)))>;
3721 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
3722 (MOVSSrr (v4i32 (V_SET0PI)),
3723 (EXTRACT_SUBREG (v4i32 VR128:$src), sub_ss))>;
3726 // Splat v2f64 / v2i64
3727 let AddedComplexity = 10 in {
3728 def : Pat<(splat_lo (v2f64 VR128:$src), (undef)),
3729 (UNPCKLPDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3730 def : Pat<(unpckh (v2f64 VR128:$src), (undef)),
3731 (UNPCKHPDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3732 def : Pat<(splat_lo (v2i64 VR128:$src), (undef)),
3733 (PUNPCKLQDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3734 def : Pat<(unpckh (v2i64 VR128:$src), (undef)),
3735 (PUNPCKHQDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3738 // Special unary SHUFPSrri case.
3739 def : Pat<(v4f32 (pshufd:$src3 VR128:$src1, (undef))),
3740 (SHUFPSrri VR128:$src1, VR128:$src1,
3741 (SHUFFLE_get_shuf_imm VR128:$src3))>;
3742 let AddedComplexity = 5 in
3743 def : Pat<(v4f32 (pshufd:$src2 VR128:$src1, (undef))),
3744 (PSHUFDri VR128:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3745 Requires<[HasSSE2]>;
3746 // Special unary SHUFPDrri case.
3747 def : Pat<(v2i64 (pshufd:$src3 VR128:$src1, (undef))),
3748 (SHUFPDrri VR128:$src1, VR128:$src1,
3749 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3750 Requires<[HasSSE2]>;
3751 // Special unary SHUFPDrri case.
3752 def : Pat<(v2f64 (pshufd:$src3 VR128:$src1, (undef))),
3753 (SHUFPDrri VR128:$src1, VR128:$src1,
3754 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3755 Requires<[HasSSE2]>;
3756 // Unary v4f32 shuffle with PSHUF* in order to fold a load.
3757 def : Pat<(pshufd:$src2 (bc_v4i32 (memopv4f32 addr:$src1)), (undef)),
3758 (PSHUFDmi addr:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3759 Requires<[HasSSE2]>;
3761 // Special binary v4i32 shuffle cases with SHUFPS.
3762 def : Pat<(v4i32 (shufp:$src3 VR128:$src1, (v4i32 VR128:$src2))),
3763 (SHUFPSrri VR128:$src1, VR128:$src2,
3764 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3765 Requires<[HasSSE2]>;
3766 def : Pat<(v4i32 (shufp:$src3 VR128:$src1, (bc_v4i32 (memopv2i64 addr:$src2)))),
3767 (SHUFPSrmi VR128:$src1, addr:$src2,
3768 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3769 Requires<[HasSSE2]>;
3770 // Special binary v2i64 shuffle cases using SHUFPDrri.
3771 def : Pat<(v2i64 (shufp:$src3 VR128:$src1, VR128:$src2)),
3772 (SHUFPDrri VR128:$src1, VR128:$src2,
3773 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3774 Requires<[HasSSE2]>;
3776 // vector_shuffle v1, <undef>, <0, 0, 1, 1, ...>
3777 let AddedComplexity = 15 in {
3778 def : Pat<(v4i32 (unpckl_undef:$src2 VR128:$src, (undef))),
3779 (PSHUFDri VR128:$src, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3780 Requires<[OptForSpeed, HasSSE2]>;
3781 def : Pat<(v4f32 (unpckl_undef:$src2 VR128:$src, (undef))),
3782 (PSHUFDri VR128:$src, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3783 Requires<[OptForSpeed, HasSSE2]>;
3785 let AddedComplexity = 10 in {
3786 def : Pat<(v4f32 (unpckl_undef VR128:$src, (undef))),
3787 (UNPCKLPSrr VR128:$src, VR128:$src)>;
3788 def : Pat<(v16i8 (unpckl_undef VR128:$src, (undef))),
3789 (PUNPCKLBWrr VR128:$src, VR128:$src)>;
3790 def : Pat<(v8i16 (unpckl_undef VR128:$src, (undef))),
3791 (PUNPCKLWDrr VR128:$src, VR128:$src)>;
3792 def : Pat<(v4i32 (unpckl_undef VR128:$src, (undef))),
3793 (PUNPCKLDQrr VR128:$src, VR128:$src)>;
3796 // vector_shuffle v1, <undef>, <2, 2, 3, 3, ...>
3797 let AddedComplexity = 15 in {
3798 def : Pat<(v4i32 (unpckh_undef:$src2 VR128:$src, (undef))),
3799 (PSHUFDri VR128:$src, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3800 Requires<[OptForSpeed, HasSSE2]>;
3801 def : Pat<(v4f32 (unpckh_undef:$src2 VR128:$src, (undef))),
3802 (PSHUFDri VR128:$src, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3803 Requires<[OptForSpeed, HasSSE2]>;
3805 let AddedComplexity = 10 in {
3806 def : Pat<(v4f32 (unpckh_undef VR128:$src, (undef))),
3807 (UNPCKHPSrr VR128:$src, VR128:$src)>;
3808 def : Pat<(v16i8 (unpckh_undef VR128:$src, (undef))),
3809 (PUNPCKHBWrr VR128:$src, VR128:$src)>;
3810 def : Pat<(v8i16 (unpckh_undef VR128:$src, (undef))),
3811 (PUNPCKHWDrr VR128:$src, VR128:$src)>;
3812 def : Pat<(v4i32 (unpckh_undef VR128:$src, (undef))),
3813 (PUNPCKHDQrr VR128:$src, VR128:$src)>;
3816 let AddedComplexity = 20 in {
3817 // vector_shuffle v1, v2 <0, 1, 4, 5> using MOVLHPS
3818 def : Pat<(v4i32 (movlhps VR128:$src1, VR128:$src2)),
3819 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
3821 // vector_shuffle v1, v2 <6, 7, 2, 3> using MOVHLPS
3822 def : Pat<(v4i32 (movhlps VR128:$src1, VR128:$src2)),
3823 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
3825 // vector_shuffle v1, undef <2, ?, ?, ?> using MOVHLPS
3826 def : Pat<(v4f32 (movhlps_undef VR128:$src1, (undef))),
3827 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
3828 def : Pat<(v4i32 (movhlps_undef VR128:$src1, (undef))),
3829 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
3832 let AddedComplexity = 20 in {
3833 // vector_shuffle v1, (load v2) <4, 5, 2, 3> using MOVLPS
3834 def : Pat<(v4f32 (movlp VR128:$src1, (load addr:$src2))),
3835 (MOVLPSrm VR128:$src1, addr:$src2)>;
3836 def : Pat<(v2f64 (movlp VR128:$src1, (load addr:$src2))),
3837 (MOVLPDrm VR128:$src1, addr:$src2)>;
3838 def : Pat<(v4i32 (movlp VR128:$src1, (load addr:$src2))),
3839 (MOVLPSrm VR128:$src1, addr:$src2)>;
3840 def : Pat<(v2i64 (movlp VR128:$src1, (load addr:$src2))),
3841 (MOVLPDrm VR128:$src1, addr:$src2)>;
3844 // (store (vector_shuffle (load addr), v2, <4, 5, 2, 3>), addr) using MOVLPS
3845 def : Pat<(store (v4f32 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
3846 (MOVLPSmr addr:$src1, VR128:$src2)>;
3847 def : Pat<(store (v2f64 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
3848 (MOVLPDmr addr:$src1, VR128:$src2)>;
3849 def : Pat<(store (v4i32 (movlp (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)),
3851 (MOVLPSmr addr:$src1, VR128:$src2)>;
3852 def : Pat<(store (v2i64 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
3853 (MOVLPDmr addr:$src1, VR128:$src2)>;
3855 let AddedComplexity = 15 in {
3856 // Setting the lowest element in the vector.
3857 def : Pat<(v4i32 (movl VR128:$src1, VR128:$src2)),
3858 (MOVSSrr (v4i32 VR128:$src1),
3859 (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_ss))>;
3860 def : Pat<(v2i64 (movl VR128:$src1, VR128:$src2)),
3861 (MOVSDrr (v2i64 VR128:$src1),
3862 (EXTRACT_SUBREG (v2i64 VR128:$src2), sub_sd))>;
3864 // vector_shuffle v1, v2 <4, 5, 2, 3> using movsd
3865 def : Pat<(v4f32 (movlp VR128:$src1, VR128:$src2)),
3866 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, sub_sd))>,
3867 Requires<[HasSSE2]>;
3868 def : Pat<(v4i32 (movlp VR128:$src1, VR128:$src2)),
3869 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, sub_sd))>,
3870 Requires<[HasSSE2]>;
3873 // vector_shuffle v1, v2 <4, 5, 2, 3> using SHUFPSrri (we prefer movsd, but
3874 // fall back to this for SSE1)
3875 def : Pat<(v4f32 (movlp:$src3 VR128:$src1, (v4f32 VR128:$src2))),
3876 (SHUFPSrri VR128:$src2, VR128:$src1,
3877 (SHUFFLE_get_shuf_imm VR128:$src3))>;
3879 // Set lowest element and zero upper elements.
3880 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
3881 (MOVZPQILo2PQIrr VR128:$src)>, Requires<[HasSSE2]>;
3883 // Some special case pandn patterns.
3884 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v4i32 immAllOnesV))),
3886 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3887 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v8i16 immAllOnesV))),
3889 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3890 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v16i8 immAllOnesV))),
3892 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3894 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v4i32 immAllOnesV))),
3895 (memop addr:$src2))),
3896 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
3897 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v8i16 immAllOnesV))),
3898 (memop addr:$src2))),
3899 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
3900 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v16i8 immAllOnesV))),
3901 (memop addr:$src2))),
3902 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
3904 // vector -> vector casts
3905 def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
3906 (Int_CVTDQ2PSrr VR128:$src)>, Requires<[HasSSE2]>;
3907 def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
3908 (Int_CVTTPS2DQrr VR128:$src)>, Requires<[HasSSE2]>;
3909 def : Pat<(v2f64 (sint_to_fp (v2i32 VR64:$src))),
3910 (Int_CVTPI2PDrr VR64:$src)>, Requires<[HasSSE2]>;
3911 def : Pat<(v2i32 (fp_to_sint (v2f64 VR128:$src))),
3912 (Int_CVTTPD2PIrr VR128:$src)>, Requires<[HasSSE2]>;
3914 // Use movaps / movups for SSE integer load / store (one byte shorter).
3915 def : Pat<(alignedloadv4i32 addr:$src),
3916 (MOVAPSrm addr:$src)>;
3917 def : Pat<(loadv4i32 addr:$src),
3918 (MOVUPSrm addr:$src)>;
3919 def : Pat<(alignedloadv2i64 addr:$src),
3920 (MOVAPSrm addr:$src)>;
3921 def : Pat<(loadv2i64 addr:$src),
3922 (MOVUPSrm addr:$src)>;
3924 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
3925 (MOVAPSmr addr:$dst, VR128:$src)>;
3926 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
3927 (MOVAPSmr addr:$dst, VR128:$src)>;
3928 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
3929 (MOVAPSmr addr:$dst, VR128:$src)>;
3930 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
3931 (MOVAPSmr addr:$dst, VR128:$src)>;
3932 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
3933 (MOVUPSmr addr:$dst, VR128:$src)>;
3934 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
3935 (MOVUPSmr addr:$dst, VR128:$src)>;
3936 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
3937 (MOVUPSmr addr:$dst, VR128:$src)>;
3938 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
3939 (MOVUPSmr addr:$dst, VR128:$src)>;
3941 //===----------------------------------------------------------------------===//
3942 // SSE4.1 - Packed Move with Sign/Zero Extend
3943 //===----------------------------------------------------------------------===//
3945 multiclass SS41I_binop_rm_int8<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
3946 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3947 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3948 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
3950 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3951 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3953 (IntId (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))]>,
3957 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in {
3958 defm VPMOVSXBW : SS41I_binop_rm_int8<0x20, "vpmovsxbw", int_x86_sse41_pmovsxbw>,
3960 defm VPMOVSXWD : SS41I_binop_rm_int8<0x23, "vpmovsxwd", int_x86_sse41_pmovsxwd>,
3962 defm VPMOVSXDQ : SS41I_binop_rm_int8<0x25, "vpmovsxdq", int_x86_sse41_pmovsxdq>,
3964 defm VPMOVZXBW : SS41I_binop_rm_int8<0x30, "vpmovzxbw", int_x86_sse41_pmovzxbw>,
3966 defm VPMOVZXWD : SS41I_binop_rm_int8<0x33, "vpmovzxwd", int_x86_sse41_pmovzxwd>,
3968 defm VPMOVZXDQ : SS41I_binop_rm_int8<0x35, "vpmovzxdq", int_x86_sse41_pmovzxdq>,
3972 defm PMOVSXBW : SS41I_binop_rm_int8<0x20, "pmovsxbw", int_x86_sse41_pmovsxbw>;
3973 defm PMOVSXWD : SS41I_binop_rm_int8<0x23, "pmovsxwd", int_x86_sse41_pmovsxwd>;
3974 defm PMOVSXDQ : SS41I_binop_rm_int8<0x25, "pmovsxdq", int_x86_sse41_pmovsxdq>;
3975 defm PMOVZXBW : SS41I_binop_rm_int8<0x30, "pmovzxbw", int_x86_sse41_pmovzxbw>;
3976 defm PMOVZXWD : SS41I_binop_rm_int8<0x33, "pmovzxwd", int_x86_sse41_pmovzxwd>;
3977 defm PMOVZXDQ : SS41I_binop_rm_int8<0x35, "pmovzxdq", int_x86_sse41_pmovzxdq>;
3979 // Common patterns involving scalar load.
3980 def : Pat<(int_x86_sse41_pmovsxbw (vzmovl_v2i64 addr:$src)),
3981 (PMOVSXBWrm addr:$src)>, Requires<[HasSSE41]>;
3982 def : Pat<(int_x86_sse41_pmovsxbw (vzload_v2i64 addr:$src)),
3983 (PMOVSXBWrm addr:$src)>, Requires<[HasSSE41]>;
3985 def : Pat<(int_x86_sse41_pmovsxwd (vzmovl_v2i64 addr:$src)),
3986 (PMOVSXWDrm addr:$src)>, Requires<[HasSSE41]>;
3987 def : Pat<(int_x86_sse41_pmovsxwd (vzload_v2i64 addr:$src)),
3988 (PMOVSXWDrm addr:$src)>, Requires<[HasSSE41]>;
3990 def : Pat<(int_x86_sse41_pmovsxdq (vzmovl_v2i64 addr:$src)),
3991 (PMOVSXDQrm addr:$src)>, Requires<[HasSSE41]>;
3992 def : Pat<(int_x86_sse41_pmovsxdq (vzload_v2i64 addr:$src)),
3993 (PMOVSXDQrm addr:$src)>, Requires<[HasSSE41]>;
3995 def : Pat<(int_x86_sse41_pmovzxbw (vzmovl_v2i64 addr:$src)),
3996 (PMOVZXBWrm addr:$src)>, Requires<[HasSSE41]>;
3997 def : Pat<(int_x86_sse41_pmovzxbw (vzload_v2i64 addr:$src)),
3998 (PMOVZXBWrm addr:$src)>, Requires<[HasSSE41]>;
4000 def : Pat<(int_x86_sse41_pmovzxwd (vzmovl_v2i64 addr:$src)),
4001 (PMOVZXWDrm addr:$src)>, Requires<[HasSSE41]>;
4002 def : Pat<(int_x86_sse41_pmovzxwd (vzload_v2i64 addr:$src)),
4003 (PMOVZXWDrm addr:$src)>, Requires<[HasSSE41]>;
4005 def : Pat<(int_x86_sse41_pmovzxdq (vzmovl_v2i64 addr:$src)),
4006 (PMOVZXDQrm addr:$src)>, Requires<[HasSSE41]>;
4007 def : Pat<(int_x86_sse41_pmovzxdq (vzload_v2i64 addr:$src)),
4008 (PMOVZXDQrm addr:$src)>, Requires<[HasSSE41]>;
4011 multiclass SS41I_binop_rm_int4<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4012 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4013 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4014 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
4016 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
4017 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4019 (IntId (bitconvert (v4i32 (scalar_to_vector (loadi32 addr:$src))))))]>,
4023 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in {
4024 defm VPMOVSXBD : SS41I_binop_rm_int4<0x21, "vpmovsxbd", int_x86_sse41_pmovsxbd>,
4026 defm VPMOVSXWQ : SS41I_binop_rm_int4<0x24, "vpmovsxwq", int_x86_sse41_pmovsxwq>,
4028 defm VPMOVZXBD : SS41I_binop_rm_int4<0x31, "vpmovzxbd", int_x86_sse41_pmovzxbd>,
4030 defm VPMOVZXWQ : SS41I_binop_rm_int4<0x34, "vpmovzxwq", int_x86_sse41_pmovzxwq>,
4034 defm PMOVSXBD : SS41I_binop_rm_int4<0x21, "pmovsxbd", int_x86_sse41_pmovsxbd>;
4035 defm PMOVSXWQ : SS41I_binop_rm_int4<0x24, "pmovsxwq", int_x86_sse41_pmovsxwq>;
4036 defm PMOVZXBD : SS41I_binop_rm_int4<0x31, "pmovzxbd", int_x86_sse41_pmovzxbd>;
4037 defm PMOVZXWQ : SS41I_binop_rm_int4<0x34, "pmovzxwq", int_x86_sse41_pmovzxwq>;
4039 // Common patterns involving scalar load
4040 def : Pat<(int_x86_sse41_pmovsxbd (vzmovl_v4i32 addr:$src)),
4041 (PMOVSXBDrm addr:$src)>, Requires<[HasSSE41]>;
4042 def : Pat<(int_x86_sse41_pmovsxwq (vzmovl_v4i32 addr:$src)),
4043 (PMOVSXWQrm addr:$src)>, Requires<[HasSSE41]>;
4045 def : Pat<(int_x86_sse41_pmovzxbd (vzmovl_v4i32 addr:$src)),
4046 (PMOVZXBDrm addr:$src)>, Requires<[HasSSE41]>;
4047 def : Pat<(int_x86_sse41_pmovzxwq (vzmovl_v4i32 addr:$src)),
4048 (PMOVZXWQrm addr:$src)>, Requires<[HasSSE41]>;
4051 multiclass SS41I_binop_rm_int2<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4052 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4053 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4054 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
4056 // Expecting a i16 load any extended to i32 value.
4057 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i16mem:$src),
4058 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4059 [(set VR128:$dst, (IntId (bitconvert
4060 (v4i32 (scalar_to_vector (loadi16_anyext addr:$src))))))]>,
4064 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in {
4065 defm VPMOVSXBQ : SS41I_binop_rm_int2<0x22, "vpmovsxbq", int_x86_sse41_pmovsxbq>,
4067 defm VPMOVZXBQ : SS41I_binop_rm_int2<0x32, "vpmovzxbq", int_x86_sse41_pmovzxbq>,
4070 defm PMOVSXBQ : SS41I_binop_rm_int2<0x22, "pmovsxbq", int_x86_sse41_pmovsxbq>;
4071 defm PMOVZXBQ : SS41I_binop_rm_int2<0x32, "pmovzxbq", int_x86_sse41_pmovzxbq>;
4073 // Common patterns involving scalar load
4074 def : Pat<(int_x86_sse41_pmovsxbq
4075 (bitconvert (v4i32 (X86vzmovl
4076 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
4077 (PMOVSXBQrm addr:$src)>, Requires<[HasSSE41]>;
4079 def : Pat<(int_x86_sse41_pmovzxbq
4080 (bitconvert (v4i32 (X86vzmovl
4081 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
4082 (PMOVZXBQrm addr:$src)>, Requires<[HasSSE41]>;
4084 //===----------------------------------------------------------------------===//
4085 // SSE4.1 - Extract Instructions
4086 //===----------------------------------------------------------------------===//
4088 /// SS41I_binop_ext8 - SSE 4.1 extract 8 bits to 32 bit reg or 8 bit mem
4089 multiclass SS41I_extract8<bits<8> opc, string OpcodeStr> {
4090 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4091 (ins VR128:$src1, i32i8imm:$src2),
4092 !strconcat(OpcodeStr,
4093 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4094 [(set GR32:$dst, (X86pextrb (v16i8 VR128:$src1), imm:$src2))]>,
4096 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4097 (ins i8mem:$dst, VR128:$src1, i32i8imm:$src2),
4098 !strconcat(OpcodeStr,
4099 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4102 // There's an AssertZext in the way of writing the store pattern
4103 // (store (i8 (trunc (X86pextrb (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
4106 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in
4107 defm VPEXTRB : SS41I_extract8<0x14, "vpextrb">, VEX;
4109 defm PEXTRB : SS41I_extract8<0x14, "pextrb">;
4112 /// SS41I_extract16 - SSE 4.1 extract 16 bits to memory destination
4113 multiclass SS41I_extract16<bits<8> opc, string OpcodeStr> {
4114 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4115 (ins i16mem:$dst, VR128:$src1, i32i8imm:$src2),
4116 !strconcat(OpcodeStr,
4117 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4120 // There's an AssertZext in the way of writing the store pattern
4121 // (store (i16 (trunc (X86pextrw (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
4124 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in
4125 defm VPEXTRW : SS41I_extract16<0x15, "vpextrw">, VEX;
4127 defm PEXTRW : SS41I_extract16<0x15, "pextrw">;
4130 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
4131 multiclass SS41I_extract32<bits<8> opc, string OpcodeStr> {
4132 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4133 (ins VR128:$src1, i32i8imm:$src2),
4134 !strconcat(OpcodeStr,
4135 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4137 (extractelt (v4i32 VR128:$src1), imm:$src2))]>, OpSize;
4138 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4139 (ins i32mem:$dst, VR128:$src1, i32i8imm:$src2),
4140 !strconcat(OpcodeStr,
4141 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4142 [(store (extractelt (v4i32 VR128:$src1), imm:$src2),
4143 addr:$dst)]>, OpSize;
4146 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in
4147 defm VPEXTRD : SS41I_extract32<0x16, "vpextrd">, VEX;
4149 defm PEXTRD : SS41I_extract32<0x16, "pextrd">;
4151 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
4152 multiclass SS41I_extract64<bits<8> opc, string OpcodeStr> {
4153 def rr : SS4AIi8<opc, MRMDestReg, (outs GR64:$dst),
4154 (ins VR128:$src1, i32i8imm:$src2),
4155 !strconcat(OpcodeStr,
4156 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4158 (extractelt (v2i64 VR128:$src1), imm:$src2))]>, OpSize, REX_W;
4159 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4160 (ins i64mem:$dst, VR128:$src1, i32i8imm:$src2),
4161 !strconcat(OpcodeStr,
4162 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4163 [(store (extractelt (v2i64 VR128:$src1), imm:$src2),
4164 addr:$dst)]>, OpSize, REX_W;
4167 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in
4168 defm VPEXTRQ : SS41I_extract64<0x16, "vpextrq">, VEX, VEX_W;
4170 defm PEXTRQ : SS41I_extract64<0x16, "pextrq">;
4172 /// SS41I_extractf32 - SSE 4.1 extract 32 bits fp value to int reg or memory
4174 multiclass SS41I_extractf32<bits<8> opc, string OpcodeStr> {
4175 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4176 (ins VR128:$src1, i32i8imm:$src2),
4177 !strconcat(OpcodeStr,
4178 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4180 (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2))]>,
4182 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4183 (ins f32mem:$dst, VR128:$src1, i32i8imm:$src2),
4184 !strconcat(OpcodeStr,
4185 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4186 [(store (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2),
4187 addr:$dst)]>, OpSize;
4190 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in
4191 defm VEXTRACTPS : SS41I_extractf32<0x17, "vextractps">, VEX;
4192 defm EXTRACTPS : SS41I_extractf32<0x17, "extractps">;
4194 // Also match an EXTRACTPS store when the store is done as f32 instead of i32.
4195 def : Pat<(store (f32 (bitconvert (extractelt (bc_v4i32 (v4f32 VR128:$src1)),
4198 (EXTRACTPSmr addr:$dst, VR128:$src1, imm:$src2)>,
4199 Requires<[HasSSE41]>;
4201 //===----------------------------------------------------------------------===//
4202 // SSE4.1 - Insert Instructions
4203 //===----------------------------------------------------------------------===//
4205 multiclass SS41I_insert8<bits<8> opc, string asm, bit Is2Addr = 1> {
4206 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4207 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
4209 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4211 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4213 (X86pinsrb VR128:$src1, GR32:$src2, imm:$src3))]>, OpSize;
4214 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4215 (ins VR128:$src1, i8mem:$src2, i32i8imm:$src3),
4217 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4219 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4221 (X86pinsrb VR128:$src1, (extloadi8 addr:$src2),
4222 imm:$src3))]>, OpSize;
4225 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in
4226 defm VPINSRB : SS41I_insert8<0x20, "vpinsrb", 0>, VEX_4V;
4227 let Constraints = "$src1 = $dst" in
4228 defm PINSRB : SS41I_insert8<0x20, "pinsrb">;
4230 multiclass SS41I_insert32<bits<8> opc, string asm, bit Is2Addr = 1> {
4231 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4232 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
4234 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4236 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4238 (v4i32 (insertelt VR128:$src1, GR32:$src2, imm:$src3)))]>,
4240 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4241 (ins VR128:$src1, i32mem:$src2, i32i8imm:$src3),
4243 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4245 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4247 (v4i32 (insertelt VR128:$src1, (loadi32 addr:$src2),
4248 imm:$src3)))]>, OpSize;
4251 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in
4252 defm VPINSRD : SS41I_insert32<0x22, "vpinsrd", 0>, VEX_4V;
4253 let Constraints = "$src1 = $dst" in
4254 defm PINSRD : SS41I_insert32<0x22, "pinsrd">;
4256 multiclass SS41I_insert64<bits<8> opc, string asm, bit Is2Addr = 1> {
4257 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4258 (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
4260 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4262 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4264 (v2i64 (insertelt VR128:$src1, GR64:$src2, imm:$src3)))]>,
4266 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4267 (ins VR128:$src1, i64mem:$src2, i32i8imm:$src3),
4269 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4271 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4273 (v2i64 (insertelt VR128:$src1, (loadi64 addr:$src2),
4274 imm:$src3)))]>, OpSize;
4277 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in
4278 defm VPINSRQ : SS41I_insert64<0x22, "vpinsrq", 0>, VEX_4V, VEX_W;
4279 let Constraints = "$src1 = $dst" in
4280 defm PINSRQ : SS41I_insert64<0x22, "pinsrq">, REX_W;
4282 // insertps has a few different modes, there's the first two here below which
4283 // are optimized inserts that won't zero arbitrary elements in the destination
4284 // vector. The next one matches the intrinsic and could zero arbitrary elements
4285 // in the target vector.
4286 multiclass SS41I_insertf32<bits<8> opc, string asm, bit Is2Addr = 1> {
4287 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4288 (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4290 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4292 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4294 (X86insrtps VR128:$src1, VR128:$src2, imm:$src3))]>,
4296 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4297 (ins VR128:$src1, f32mem:$src2, i32i8imm:$src3),
4299 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4301 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4303 (X86insrtps VR128:$src1,
4304 (v4f32 (scalar_to_vector (loadf32 addr:$src2))),
4305 imm:$src3))]>, OpSize;
4308 let Constraints = "$src1 = $dst" in
4309 defm INSERTPS : SS41I_insertf32<0x21, "insertps">;
4310 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in
4311 defm VINSERTPS : SS41I_insertf32<0x21, "vinsertps", 0>, VEX_4V;
4313 def : Pat<(int_x86_sse41_insertps VR128:$src1, VR128:$src2, imm:$src3),
4314 (INSERTPSrr VR128:$src1, VR128:$src2, imm:$src3)>;
4316 //===----------------------------------------------------------------------===//
4317 // SSE4.1 - Round Instructions
4318 //===----------------------------------------------------------------------===//
4320 multiclass sse41_fp_unop_rm<bits<8> opcps, bits<8> opcpd,
4323 Intrinsic V2F64Int> {
4324 // Intrinsic operation, reg.
4325 // Vector intrinsic operation, reg
4326 def PSr_Int : SS4AIi8<opcps, MRMSrcReg,
4327 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
4328 !strconcat(OpcodeStr,
4329 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4330 [(set VR128:$dst, (V4F32Int VR128:$src1, imm:$src2))]>,
4333 // Vector intrinsic operation, mem
4334 def PSm_Int : Ii8<opcps, MRMSrcMem,
4335 (outs VR128:$dst), (ins f128mem:$src1, i32i8imm:$src2),
4336 !strconcat(OpcodeStr,
4337 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4339 (V4F32Int (memopv4f32 addr:$src1),imm:$src2))]>,
4341 Requires<[HasSSE41]>;
4343 // Vector intrinsic operation, reg
4344 def PDr_Int : SS4AIi8<opcpd, MRMSrcReg,
4345 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
4346 !strconcat(OpcodeStr,
4347 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4348 [(set VR128:$dst, (V2F64Int VR128:$src1, imm:$src2))]>,
4351 // Vector intrinsic operation, mem
4352 def PDm_Int : SS4AIi8<opcpd, MRMSrcMem,
4353 (outs VR128:$dst), (ins f128mem:$src1, i32i8imm:$src2),
4354 !strconcat(OpcodeStr,
4355 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4357 (V2F64Int (memopv2f64 addr:$src1),imm:$src2))]>,
4361 multiclass sse41_fp_unop_rm_avx<bits<8> opcps, bits<8> opcpd,
4363 // Intrinsic operation, reg.
4364 // Vector intrinsic operation, reg
4365 def PSr : SS4AIi8<opcps, MRMSrcReg,
4366 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
4367 !strconcat(OpcodeStr,
4368 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4371 // Vector intrinsic operation, mem
4372 def PSm : Ii8<opcps, MRMSrcMem,
4373 (outs VR128:$dst), (ins f128mem:$src1, i32i8imm:$src2),
4374 !strconcat(OpcodeStr,
4375 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4376 []>, TA, OpSize, Requires<[HasSSE41]>;
4378 // Vector intrinsic operation, reg
4379 def PDr : SS4AIi8<opcpd, MRMSrcReg,
4380 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
4381 !strconcat(OpcodeStr,
4382 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4385 // Vector intrinsic operation, mem
4386 def PDm : SS4AIi8<opcpd, MRMSrcMem,
4387 (outs VR128:$dst), (ins f128mem:$src1, i32i8imm:$src2),
4388 !strconcat(OpcodeStr,
4389 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4393 multiclass sse41_fp_binop_rm<bits<8> opcss, bits<8> opcsd,
4396 Intrinsic F64Int, bit Is2Addr = 1> {
4397 // Intrinsic operation, reg.
4398 def SSr_Int : SS4AIi8<opcss, MRMSrcReg,
4399 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4401 !strconcat(OpcodeStr,
4402 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4403 !strconcat(OpcodeStr,
4404 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4405 [(set VR128:$dst, (F32Int VR128:$src1, VR128:$src2, imm:$src3))]>,
4408 // Intrinsic operation, mem.
4409 def SSm_Int : SS4AIi8<opcss, MRMSrcMem,
4410 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
4412 !strconcat(OpcodeStr,
4413 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4414 !strconcat(OpcodeStr,
4415 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4417 (F32Int VR128:$src1, sse_load_f32:$src2, imm:$src3))]>,
4420 // Intrinsic operation, reg.
4421 def SDr_Int : SS4AIi8<opcsd, MRMSrcReg,
4422 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4424 !strconcat(OpcodeStr,
4425 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4426 !strconcat(OpcodeStr,
4427 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4428 [(set VR128:$dst, (F64Int VR128:$src1, VR128:$src2, imm:$src3))]>,
4431 // Intrinsic operation, mem.
4432 def SDm_Int : SS4AIi8<opcsd, MRMSrcMem,
4433 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
4435 !strconcat(OpcodeStr,
4436 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4437 !strconcat(OpcodeStr,
4438 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4440 (F64Int VR128:$src1, sse_load_f64:$src2, imm:$src3))]>,
4444 multiclass sse41_fp_binop_rm_avx<bits<8> opcss, bits<8> opcsd,
4446 // Intrinsic operation, reg.
4447 def SSr : SS4AIi8<opcss, MRMSrcReg,
4448 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4449 !strconcat(OpcodeStr,
4450 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4453 // Intrinsic operation, mem.
4454 def SSm : SS4AIi8<opcss, MRMSrcMem,
4455 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
4456 !strconcat(OpcodeStr,
4457 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4460 // Intrinsic operation, reg.
4461 def SDr : SS4AIi8<opcsd, MRMSrcReg,
4462 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4463 !strconcat(OpcodeStr,
4464 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4467 // Intrinsic operation, mem.
4468 def SDm : SS4AIi8<opcsd, MRMSrcMem,
4469 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
4470 !strconcat(OpcodeStr,
4471 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4475 // FP round - roundss, roundps, roundsd, roundpd
4476 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in {
4478 defm VROUND : sse41_fp_unop_rm<0x08, 0x09, "vround",
4479 int_x86_sse41_round_ps, int_x86_sse41_round_pd>,
4481 defm VROUND : sse41_fp_binop_rm<0x0A, 0x0B, "vround",
4482 int_x86_sse41_round_ss, int_x86_sse41_round_sd,
4484 // Instructions for the assembler
4485 defm VROUND : sse41_fp_unop_rm_avx<0x08, 0x09, "vround">, VEX;
4486 defm VROUND : sse41_fp_binop_rm_avx<0x0A, 0x0B, "vround">, VEX_4V;
4489 defm ROUND : sse41_fp_unop_rm<0x08, 0x09, "round",
4490 int_x86_sse41_round_ps, int_x86_sse41_round_pd>;
4491 let Constraints = "$src1 = $dst" in
4492 defm ROUND : sse41_fp_binop_rm<0x0A, 0x0B, "round",
4493 int_x86_sse41_round_ss, int_x86_sse41_round_sd>;
4495 //===----------------------------------------------------------------------===//
4496 // SSE4.1 - Misc Instructions
4497 //===----------------------------------------------------------------------===//
4499 // SS41I_unop_rm_int_v16 - SSE 4.1 unary operator whose type is v8i16.
4500 multiclass SS41I_unop_rm_int_v16<bits<8> opc, string OpcodeStr,
4501 Intrinsic IntId128> {
4502 def rr128 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4504 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4505 [(set VR128:$dst, (IntId128 VR128:$src))]>, OpSize;
4506 def rm128 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4508 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4511 (bitconvert (memopv8i16 addr:$src))))]>, OpSize;
4514 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in
4515 defm VPHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "vphminposuw",
4516 int_x86_sse41_phminposuw>, VEX;
4517 defm PHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "phminposuw",
4518 int_x86_sse41_phminposuw>;
4520 /// SS41I_binop_rm_int - Simple SSE 4.1 binary operator
4521 multiclass SS41I_binop_rm_int<bits<8> opc, string OpcodeStr,
4522 Intrinsic IntId128, bit Is2Addr = 1> {
4523 let isCommutable = 1 in
4524 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4525 (ins VR128:$src1, VR128:$src2),
4527 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4528 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4529 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>, OpSize;
4530 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4531 (ins VR128:$src1, i128mem:$src2),
4533 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4534 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4536 (IntId128 VR128:$src1,
4537 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
4540 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in {
4541 let isCommutable = 0 in
4542 defm VPACKUSDW : SS41I_binop_rm_int<0x2B, "vpackusdw", int_x86_sse41_packusdw,
4544 defm VPCMPEQQ : SS41I_binop_rm_int<0x29, "vpcmpeqq", int_x86_sse41_pcmpeqq,
4546 defm VPMINSB : SS41I_binop_rm_int<0x38, "vpminsb", int_x86_sse41_pminsb,
4548 defm VPMINSD : SS41I_binop_rm_int<0x39, "vpminsd", int_x86_sse41_pminsd,
4550 defm VPMINUD : SS41I_binop_rm_int<0x3B, "vpminud", int_x86_sse41_pminud,
4552 defm VPMINUW : SS41I_binop_rm_int<0x3A, "vpminuw", int_x86_sse41_pminuw,
4554 defm VPMAXSB : SS41I_binop_rm_int<0x3C, "vpmaxsb", int_x86_sse41_pmaxsb,
4556 defm VPMAXSD : SS41I_binop_rm_int<0x3D, "vpmaxsd", int_x86_sse41_pmaxsd,
4558 defm VPMAXUD : SS41I_binop_rm_int<0x3F, "vpmaxud", int_x86_sse41_pmaxud,
4560 defm VPMAXUW : SS41I_binop_rm_int<0x3E, "vpmaxuw", int_x86_sse41_pmaxuw,
4562 defm VPMULDQ : SS41I_binop_rm_int<0x28, "vpmuldq", int_x86_sse41_pmuldq,
4566 let Constraints = "$src1 = $dst" in {
4567 let isCommutable = 0 in
4568 defm PACKUSDW : SS41I_binop_rm_int<0x2B, "packusdw", int_x86_sse41_packusdw>;
4569 defm PCMPEQQ : SS41I_binop_rm_int<0x29, "pcmpeqq", int_x86_sse41_pcmpeqq>;
4570 defm PMINSB : SS41I_binop_rm_int<0x38, "pminsb", int_x86_sse41_pminsb>;
4571 defm PMINSD : SS41I_binop_rm_int<0x39, "pminsd", int_x86_sse41_pminsd>;
4572 defm PMINUD : SS41I_binop_rm_int<0x3B, "pminud", int_x86_sse41_pminud>;
4573 defm PMINUW : SS41I_binop_rm_int<0x3A, "pminuw", int_x86_sse41_pminuw>;
4574 defm PMAXSB : SS41I_binop_rm_int<0x3C, "pmaxsb", int_x86_sse41_pmaxsb>;
4575 defm PMAXSD : SS41I_binop_rm_int<0x3D, "pmaxsd", int_x86_sse41_pmaxsd>;
4576 defm PMAXUD : SS41I_binop_rm_int<0x3F, "pmaxud", int_x86_sse41_pmaxud>;
4577 defm PMAXUW : SS41I_binop_rm_int<0x3E, "pmaxuw", int_x86_sse41_pmaxuw>;
4578 defm PMULDQ : SS41I_binop_rm_int<0x28, "pmuldq", int_x86_sse41_pmuldq>;
4581 def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, VR128:$src2)),
4582 (PCMPEQQrr VR128:$src1, VR128:$src2)>;
4583 def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, (memop addr:$src2))),
4584 (PCMPEQQrm VR128:$src1, addr:$src2)>;
4586 /// SS48I_binop_rm - Simple SSE41 binary operator.
4587 multiclass SS48I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
4588 ValueType OpVT, bit Is2Addr = 1> {
4589 let isCommutable = 1 in
4590 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4591 (ins VR128:$src1, VR128:$src2),
4593 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4594 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4595 [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]>,
4597 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4598 (ins VR128:$src1, i128mem:$src2),
4600 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4601 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4602 [(set VR128:$dst, (OpNode VR128:$src1,
4603 (bc_v4i32 (memopv2i64 addr:$src2))))]>,
4607 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in
4608 defm VPMULLD : SS48I_binop_rm<0x40, "vpmulld", mul, v4i32, 0>, VEX_4V;
4609 let Constraints = "$src1 = $dst" in
4610 defm PMULLD : SS48I_binop_rm<0x40, "pmulld", mul, v4i32>;
4612 /// SS41I_binop_rmi_int - SSE 4.1 binary operator with 8-bit immediate
4613 multiclass SS41I_binop_rmi_int<bits<8> opc, string OpcodeStr,
4614 Intrinsic IntId128, bit Is2Addr = 1> {
4615 let isCommutable = 1 in
4616 def rri : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4617 (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4619 !strconcat(OpcodeStr,
4620 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4621 !strconcat(OpcodeStr,
4622 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4624 (IntId128 VR128:$src1, VR128:$src2, imm:$src3))]>,
4626 def rmi : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4627 (ins VR128:$src1, i128mem:$src2, i32i8imm:$src3),
4629 !strconcat(OpcodeStr,
4630 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4631 !strconcat(OpcodeStr,
4632 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4634 (IntId128 VR128:$src1,
4635 (bitconvert (memopv16i8 addr:$src2)), imm:$src3))]>,
4639 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in {
4640 let isCommutable = 0 in {
4641 defm VBLENDPS : SS41I_binop_rmi_int<0x0C, "vblendps", int_x86_sse41_blendps,
4643 defm VBLENDPD : SS41I_binop_rmi_int<0x0D, "vblendpd", int_x86_sse41_blendpd,
4645 defm VPBLENDW : SS41I_binop_rmi_int<0x0E, "vpblendw", int_x86_sse41_pblendw,
4647 defm VMPSADBW : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_sse41_mpsadbw,
4650 defm VDPPS : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_sse41_dpps,
4652 defm VDPPD : SS41I_binop_rmi_int<0x41, "vdppd", int_x86_sse41_dppd,
4656 let Constraints = "$src1 = $dst" in {
4657 let isCommutable = 0 in {
4658 defm BLENDPS : SS41I_binop_rmi_int<0x0C, "blendps", int_x86_sse41_blendps>;
4659 defm BLENDPD : SS41I_binop_rmi_int<0x0D, "blendpd", int_x86_sse41_blendpd>;
4660 defm PBLENDW : SS41I_binop_rmi_int<0x0E, "pblendw", int_x86_sse41_pblendw>;
4661 defm MPSADBW : SS41I_binop_rmi_int<0x42, "mpsadbw", int_x86_sse41_mpsadbw>;
4663 defm DPPS : SS41I_binop_rmi_int<0x40, "dpps", int_x86_sse41_dpps>;
4664 defm DPPD : SS41I_binop_rmi_int<0x41, "dppd", int_x86_sse41_dppd>;
4667 /// SS41I_quaternary_int_avx - AVX SSE 4.1 with 4 operators
4668 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in {
4669 multiclass SS41I_quaternary_int_avx<bits<8> opc, string OpcodeStr> {
4670 def rr : I<opc, MRMSrcReg, (outs VR128:$dst),
4671 (ins VR128:$src1, VR128:$src2, VR128:$src3),
4672 !strconcat(OpcodeStr,
4673 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4674 [], SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
4676 def rm : I<opc, MRMSrcMem, (outs VR128:$dst),
4677 (ins VR128:$src1, i128mem:$src2, VR128:$src3),
4678 !strconcat(OpcodeStr,
4679 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4680 [], SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
4684 defm VBLENDVPD : SS41I_quaternary_int_avx<0x4B, "vblendvpd">;
4685 defm VBLENDVPS : SS41I_quaternary_int_avx<0x4A, "vblendvps">;
4686 defm VPBLENDVB : SS41I_quaternary_int_avx<0x4C, "vpblendvb">;
4688 /// SS41I_ternary_int - SSE 4.1 ternary operator
4689 let Uses = [XMM0], Constraints = "$src1 = $dst" in {
4690 multiclass SS41I_ternary_int<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4691 def rr0 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4692 (ins VR128:$src1, VR128:$src2),
4693 !strconcat(OpcodeStr,
4694 "\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}"),
4695 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2, XMM0))]>,
4698 def rm0 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4699 (ins VR128:$src1, i128mem:$src2),
4700 !strconcat(OpcodeStr,
4701 "\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}"),
4704 (bitconvert (memopv16i8 addr:$src2)), XMM0))]>, OpSize;
4708 defm BLENDVPD : SS41I_ternary_int<0x15, "blendvpd", int_x86_sse41_blendvpd>;
4709 defm BLENDVPS : SS41I_ternary_int<0x14, "blendvps", int_x86_sse41_blendvps>;
4710 defm PBLENDVB : SS41I_ternary_int<0x10, "pblendvb", int_x86_sse41_pblendvb>;
4712 // ptest instruction we'll lower to this in X86ISelLowering primarily from
4713 // the intel intrinsic that corresponds to this.
4714 let Defs = [EFLAGS], isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in {
4715 def VPTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
4716 "vptest\t{$src2, $src1|$src1, $src2}",
4717 [(set EFLAGS, (X86ptest VR128:$src1, VR128:$src2))]>,
4719 def VPTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, i128mem:$src2),
4720 "vptest\t{$src2, $src1|$src1, $src2}",
4721 [(set EFLAGS, (X86ptest VR128:$src1, (load addr:$src2)))]>,
4725 let Defs = [EFLAGS] in {
4726 def PTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
4727 "ptest \t{$src2, $src1|$src1, $src2}",
4728 [(set EFLAGS, (X86ptest VR128:$src1, VR128:$src2))]>,
4730 def PTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, i128mem:$src2),
4731 "ptest \t{$src2, $src1|$src1, $src2}",
4732 [(set EFLAGS, (X86ptest VR128:$src1, (load addr:$src2)))]>,
4736 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE41] in
4737 def VMOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4738 "vmovntdqa\t{$src, $dst|$dst, $src}",
4739 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
4741 def MOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4742 "movntdqa\t{$src, $dst|$dst, $src}",
4743 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
4746 //===----------------------------------------------------------------------===//
4747 // SSE4.2 - Compare Instructions
4748 //===----------------------------------------------------------------------===//
4750 /// SS42I_binop_rm_int - Simple SSE 4.2 binary operator
4751 multiclass SS42I_binop_rm_int<bits<8> opc, string OpcodeStr,
4752 Intrinsic IntId128, bit Is2Addr = 1> {
4753 def rr : SS428I<opc, MRMSrcReg, (outs VR128:$dst),
4754 (ins VR128:$src1, VR128:$src2),
4756 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4757 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4758 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
4760 def rm : SS428I<opc, MRMSrcMem, (outs VR128:$dst),
4761 (ins VR128:$src1, i128mem:$src2),
4763 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4764 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4766 (IntId128 VR128:$src1,
4767 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
4770 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE42] in
4771 defm VPCMPGTQ : SS42I_binop_rm_int<0x37, "vpcmpgtq", int_x86_sse42_pcmpgtq,
4773 let Constraints = "$src1 = $dst" in
4774 defm PCMPGTQ : SS42I_binop_rm_int<0x37, "pcmpgtq", int_x86_sse42_pcmpgtq>;
4776 def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, VR128:$src2)),
4777 (PCMPGTQrr VR128:$src1, VR128:$src2)>;
4778 def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, (memop addr:$src2))),
4779 (PCMPGTQrm VR128:$src1, addr:$src2)>;
4781 //===----------------------------------------------------------------------===//
4782 // SSE4.2 - String/text Processing Instructions
4783 //===----------------------------------------------------------------------===//
4785 // Packed Compare Implicit Length Strings, Return Mask
4786 let Defs = [EFLAGS], usesCustomInserter = 1 in {
4787 def PCMPISTRM128REG : SS42AI<0, Pseudo, (outs VR128:$dst),
4788 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
4789 "#PCMPISTRM128rr PSEUDO!",
4790 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128 VR128:$src1, VR128:$src2,
4791 imm:$src3))]>, OpSize;
4792 def PCMPISTRM128MEM : SS42AI<0, Pseudo, (outs VR128:$dst),
4793 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
4794 "#PCMPISTRM128rm PSEUDO!",
4795 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128
4796 VR128:$src1, (load addr:$src2), imm:$src3))]>, OpSize;
4799 let Defs = [XMM0, EFLAGS], isAsmParserOnly = 1,
4800 Predicates = [HasAVX, HasSSE42] in {
4801 def VPCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
4802 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
4803 "vpcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize, VEX;
4804 def VPCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
4805 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
4806 "vpcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize, VEX;
4809 let Defs = [XMM0, EFLAGS] in {
4810 def PCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
4811 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
4812 "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
4813 def PCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
4814 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
4815 "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
4818 // Packed Compare Explicit Length Strings, Return Mask
4819 let Defs = [EFLAGS], Uses = [EAX, EDX], usesCustomInserter = 1 in {
4820 def PCMPESTRM128REG : SS42AI<0, Pseudo, (outs VR128:$dst),
4821 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
4822 "#PCMPESTRM128rr PSEUDO!",
4824 (int_x86_sse42_pcmpestrm128
4825 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5))]>, OpSize;
4827 def PCMPESTRM128MEM : SS42AI<0, Pseudo, (outs VR128:$dst),
4828 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
4829 "#PCMPESTRM128rm PSEUDO!",
4830 [(set VR128:$dst, (int_x86_sse42_pcmpestrm128
4831 VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5))]>,
4835 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE42],
4836 Defs = [XMM0, EFLAGS], Uses = [EAX, EDX] in {
4837 def VPCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
4838 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
4839 "vpcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize, VEX;
4840 def VPCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
4841 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
4842 "vpcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize, VEX;
4845 let Defs = [XMM0, EFLAGS], Uses = [EAX, EDX] in {
4846 def PCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
4847 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
4848 "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
4849 def PCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
4850 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
4851 "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
4854 // Packed Compare Implicit Length Strings, Return Index
4855 let Defs = [ECX, EFLAGS] in {
4856 multiclass SS42AI_pcmpistri<Intrinsic IntId128, string asm = "pcmpistri"> {
4857 def rr : SS42AI<0x63, MRMSrcReg, (outs),
4858 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
4859 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
4860 [(set ECX, (IntId128 VR128:$src1, VR128:$src2, imm:$src3)),
4861 (implicit EFLAGS)]>, OpSize;
4862 def rm : SS42AI<0x63, MRMSrcMem, (outs),
4863 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
4864 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
4865 [(set ECX, (IntId128 VR128:$src1, (load addr:$src2), imm:$src3)),
4866 (implicit EFLAGS)]>, OpSize;
4870 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE42] in {
4871 defm VPCMPISTRI : SS42AI_pcmpistri<int_x86_sse42_pcmpistri128, "vpcmpistri">,
4873 defm VPCMPISTRIA : SS42AI_pcmpistri<int_x86_sse42_pcmpistria128, "vpcmpistri">,
4875 defm VPCMPISTRIC : SS42AI_pcmpistri<int_x86_sse42_pcmpistric128, "vpcmpistri">,
4877 defm VPCMPISTRIO : SS42AI_pcmpistri<int_x86_sse42_pcmpistrio128, "vpcmpistri">,
4879 defm VPCMPISTRIS : SS42AI_pcmpistri<int_x86_sse42_pcmpistris128, "vpcmpistri">,
4881 defm VPCMPISTRIZ : SS42AI_pcmpistri<int_x86_sse42_pcmpistriz128, "vpcmpistri">,
4885 defm PCMPISTRI : SS42AI_pcmpistri<int_x86_sse42_pcmpistri128>;
4886 defm PCMPISTRIA : SS42AI_pcmpistri<int_x86_sse42_pcmpistria128>;
4887 defm PCMPISTRIC : SS42AI_pcmpistri<int_x86_sse42_pcmpistric128>;
4888 defm PCMPISTRIO : SS42AI_pcmpistri<int_x86_sse42_pcmpistrio128>;
4889 defm PCMPISTRIS : SS42AI_pcmpistri<int_x86_sse42_pcmpistris128>;
4890 defm PCMPISTRIZ : SS42AI_pcmpistri<int_x86_sse42_pcmpistriz128>;
4892 // Packed Compare Explicit Length Strings, Return Index
4893 let Defs = [ECX, EFLAGS], Uses = [EAX, EDX] in {
4894 multiclass SS42AI_pcmpestri<Intrinsic IntId128, string asm = "pcmpestri"> {
4895 def rr : SS42AI<0x61, MRMSrcReg, (outs),
4896 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
4897 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
4898 [(set ECX, (IntId128 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5)),
4899 (implicit EFLAGS)]>, OpSize;
4900 def rm : SS42AI<0x61, MRMSrcMem, (outs),
4901 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
4902 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
4904 (IntId128 VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5)),
4905 (implicit EFLAGS)]>, OpSize;
4909 let isAsmParserOnly = 1, Predicates = [HasAVX, HasSSE42] in {
4910 defm VPCMPESTRI : SS42AI_pcmpestri<int_x86_sse42_pcmpestri128, "vpcmpestri">,
4912 defm VPCMPESTRIA : SS42AI_pcmpestri<int_x86_sse42_pcmpestria128, "vpcmpestri">,
4914 defm VPCMPESTRIC : SS42AI_pcmpestri<int_x86_sse42_pcmpestric128, "vpcmpestri">,
4916 defm VPCMPESTRIO : SS42AI_pcmpestri<int_x86_sse42_pcmpestrio128, "vpcmpestri">,
4918 defm VPCMPESTRIS : SS42AI_pcmpestri<int_x86_sse42_pcmpestris128, "vpcmpestri">,
4920 defm VPCMPESTRIZ : SS42AI_pcmpestri<int_x86_sse42_pcmpestriz128, "vpcmpestri">,
4924 defm PCMPESTRI : SS42AI_pcmpestri<int_x86_sse42_pcmpestri128>;
4925 defm PCMPESTRIA : SS42AI_pcmpestri<int_x86_sse42_pcmpestria128>;
4926 defm PCMPESTRIC : SS42AI_pcmpestri<int_x86_sse42_pcmpestric128>;
4927 defm PCMPESTRIO : SS42AI_pcmpestri<int_x86_sse42_pcmpestrio128>;
4928 defm PCMPESTRIS : SS42AI_pcmpestri<int_x86_sse42_pcmpestris128>;
4929 defm PCMPESTRIZ : SS42AI_pcmpestri<int_x86_sse42_pcmpestriz128>;
4931 //===----------------------------------------------------------------------===//
4932 // SSE4.2 - CRC Instructions
4933 //===----------------------------------------------------------------------===//
4935 // No CRC instructions have AVX equivalents
4937 // crc intrinsic instruction
4938 // This set of instructions are only rm, the only difference is the size
4940 let Constraints = "$src1 = $dst" in {
4941 def CRC32m8 : SS42FI<0xF0, MRMSrcMem, (outs GR32:$dst),
4942 (ins GR32:$src1, i8mem:$src2),
4943 "crc32{b} \t{$src2, $src1|$src1, $src2}",
4945 (int_x86_sse42_crc32_8 GR32:$src1,
4946 (load addr:$src2)))]>;
4947 def CRC32r8 : SS42FI<0xF0, MRMSrcReg, (outs GR32:$dst),
4948 (ins GR32:$src1, GR8:$src2),
4949 "crc32{b} \t{$src2, $src1|$src1, $src2}",
4951 (int_x86_sse42_crc32_8 GR32:$src1, GR8:$src2))]>;
4952 def CRC32m16 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst),
4953 (ins GR32:$src1, i16mem:$src2),
4954 "crc32{w} \t{$src2, $src1|$src1, $src2}",
4956 (int_x86_sse42_crc32_16 GR32:$src1,
4957 (load addr:$src2)))]>,
4959 def CRC32r16 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst),
4960 (ins GR32:$src1, GR16:$src2),
4961 "crc32{w} \t{$src2, $src1|$src1, $src2}",
4963 (int_x86_sse42_crc32_16 GR32:$src1, GR16:$src2))]>,
4965 def CRC32m32 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst),
4966 (ins GR32:$src1, i32mem:$src2),
4967 "crc32{l} \t{$src2, $src1|$src1, $src2}",
4969 (int_x86_sse42_crc32_32 GR32:$src1,
4970 (load addr:$src2)))]>;
4971 def CRC32r32 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst),
4972 (ins GR32:$src1, GR32:$src2),
4973 "crc32{l} \t{$src2, $src1|$src1, $src2}",
4975 (int_x86_sse42_crc32_32 GR32:$src1, GR32:$src2))]>;
4976 def CRC64m8 : SS42FI<0xF0, MRMSrcMem, (outs GR64:$dst),
4977 (ins GR64:$src1, i8mem:$src2),
4978 "crc32{b} \t{$src2, $src1|$src1, $src2}",
4980 (int_x86_sse42_crc64_8 GR64:$src1,
4981 (load addr:$src2)))]>,
4983 def CRC64r8 : SS42FI<0xF0, MRMSrcReg, (outs GR64:$dst),
4984 (ins GR64:$src1, GR8:$src2),
4985 "crc32{b} \t{$src2, $src1|$src1, $src2}",
4987 (int_x86_sse42_crc64_8 GR64:$src1, GR8:$src2))]>,
4989 def CRC64m64 : SS42FI<0xF1, MRMSrcMem, (outs GR64:$dst),
4990 (ins GR64:$src1, i64mem:$src2),
4991 "crc32{q} \t{$src2, $src1|$src1, $src2}",
4993 (int_x86_sse42_crc64_64 GR64:$src1,
4994 (load addr:$src2)))]>,
4996 def CRC64r64 : SS42FI<0xF1, MRMSrcReg, (outs GR64:$dst),
4997 (ins GR64:$src1, GR64:$src2),
4998 "crc32{q} \t{$src2, $src1|$src1, $src2}",
5000 (int_x86_sse42_crc64_64 GR64:$src1, GR64:$src2))]>,
5004 //===----------------------------------------------------------------------===//
5005 // AES-NI Instructions
5006 //===----------------------------------------------------------------------===//
5008 multiclass AESI_binop_rm_int<bits<8> opc, string OpcodeStr,
5009 Intrinsic IntId128, bit Is2Addr = 1> {
5010 def rr : AES8I<opc, MRMSrcReg, (outs VR128:$dst),
5011 (ins VR128:$src1, VR128:$src2),
5013 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5014 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5015 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
5017 def rm : AES8I<opc, MRMSrcMem, (outs VR128:$dst),
5018 (ins VR128:$src1, i128mem:$src2),
5020 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5021 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5023 (IntId128 VR128:$src1,
5024 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
5027 // Perform One Round of an AES Encryption/Decryption Flow
5028 let isAsmParserOnly = 1, Predicates = [HasAVX, HasAES] in {
5029 defm VAESENC : AESI_binop_rm_int<0xDC, "vaesenc",
5030 int_x86_aesni_aesenc, 0>, VEX_4V;
5031 defm VAESENCLAST : AESI_binop_rm_int<0xDD, "vaesenclast",
5032 int_x86_aesni_aesenclast, 0>, VEX_4V;
5033 defm VAESDEC : AESI_binop_rm_int<0xDE, "vaesdec",
5034 int_x86_aesni_aesdec, 0>, VEX_4V;
5035 defm VAESDECLAST : AESI_binop_rm_int<0xDF, "vaesdeclast",
5036 int_x86_aesni_aesdeclast, 0>, VEX_4V;
5039 let Constraints = "$src1 = $dst" in {
5040 defm AESENC : AESI_binop_rm_int<0xDC, "aesenc",
5041 int_x86_aesni_aesenc>;
5042 defm AESENCLAST : AESI_binop_rm_int<0xDD, "aesenclast",
5043 int_x86_aesni_aesenclast>;
5044 defm AESDEC : AESI_binop_rm_int<0xDE, "aesdec",
5045 int_x86_aesni_aesdec>;
5046 defm AESDECLAST : AESI_binop_rm_int<0xDF, "aesdeclast",
5047 int_x86_aesni_aesdeclast>;
5050 def : Pat<(v2i64 (int_x86_aesni_aesenc VR128:$src1, VR128:$src2)),
5051 (AESENCrr VR128:$src1, VR128:$src2)>;
5052 def : Pat<(v2i64 (int_x86_aesni_aesenc VR128:$src1, (memop addr:$src2))),
5053 (AESENCrm VR128:$src1, addr:$src2)>;
5054 def : Pat<(v2i64 (int_x86_aesni_aesenclast VR128:$src1, VR128:$src2)),
5055 (AESENCLASTrr VR128:$src1, VR128:$src2)>;
5056 def : Pat<(v2i64 (int_x86_aesni_aesenclast VR128:$src1, (memop addr:$src2))),
5057 (AESENCLASTrm VR128:$src1, addr:$src2)>;
5058 def : Pat<(v2i64 (int_x86_aesni_aesdec VR128:$src1, VR128:$src2)),
5059 (AESDECrr VR128:$src1, VR128:$src2)>;
5060 def : Pat<(v2i64 (int_x86_aesni_aesdec VR128:$src1, (memop addr:$src2))),
5061 (AESDECrm VR128:$src1, addr:$src2)>;
5062 def : Pat<(v2i64 (int_x86_aesni_aesdeclast VR128:$src1, VR128:$src2)),
5063 (AESDECLASTrr VR128:$src1, VR128:$src2)>;
5064 def : Pat<(v2i64 (int_x86_aesni_aesdeclast VR128:$src1, (memop addr:$src2))),
5065 (AESDECLASTrm VR128:$src1, addr:$src2)>;
5067 // Perform the AES InvMixColumn Transformation
5068 let isAsmParserOnly = 1, Predicates = [HasAVX, HasAES] in {
5069 def VAESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
5071 "vaesimc\t{$src1, $dst|$dst, $src1}",
5073 (int_x86_aesni_aesimc VR128:$src1))]>,
5075 def VAESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
5076 (ins i128mem:$src1),
5077 "vaesimc\t{$src1, $dst|$dst, $src1}",
5079 (int_x86_aesni_aesimc (bitconvert (memopv2i64 addr:$src1))))]>,
5082 def AESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
5084 "aesimc\t{$src1, $dst|$dst, $src1}",
5086 (int_x86_aesni_aesimc VR128:$src1))]>,
5088 def AESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
5089 (ins i128mem:$src1),
5090 "aesimc\t{$src1, $dst|$dst, $src1}",
5092 (int_x86_aesni_aesimc (bitconvert (memopv2i64 addr:$src1))))]>,
5095 // AES Round Key Generation Assist
5096 let isAsmParserOnly = 1, Predicates = [HasAVX, HasAES] in {
5097 def VAESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
5098 (ins VR128:$src1, i8imm:$src2),
5099 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5101 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
5103 def VAESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
5104 (ins i128mem:$src1, i8imm:$src2),
5105 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5107 (int_x86_aesni_aeskeygenassist (bitconvert (memopv2i64 addr:$src1)),
5111 def AESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
5112 (ins VR128:$src1, i8imm:$src2),
5113 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5115 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
5117 def AESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
5118 (ins i128mem:$src1, i8imm:$src2),
5119 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5121 (int_x86_aesni_aeskeygenassist (bitconvert (memopv2i64 addr:$src1)),