1 //====- X86InstrSSE.td - Describe the X86 Instruction Set -------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by the Evan Cheng and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86 SSE instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
16 //===----------------------------------------------------------------------===//
17 // SSE specific DAG Nodes.
18 //===----------------------------------------------------------------------===//
20 def X86loadp : SDNode<"X86ISD::LOAD_PACK", SDTLoad,
22 def X86loadu : SDNode<"X86ISD::LOAD_UA", SDTLoad,
24 def X86fand : SDNode<"X86ISD::FAND", SDTFPBinOp,
25 [SDNPCommutative, SDNPAssociative]>;
26 def X86fxor : SDNode<"X86ISD::FXOR", SDTFPBinOp,
27 [SDNPCommutative, SDNPAssociative]>;
28 def X86comi : SDNode<"X86ISD::COMI", SDTX86CmpTest,
30 def X86ucomi : SDNode<"X86ISD::UCOMI", SDTX86CmpTest,
32 def X86s2vec : SDNode<"X86ISD::S2VEC",
33 SDTypeProfile<1, 1, []>, []>;
34 def X86pextrw : SDNode<"X86ISD::PEXTRW",
35 SDTypeProfile<1, 2, []>, []>;
36 def X86pinsrw : SDNode<"X86ISD::PINSRW",
37 SDTypeProfile<1, 3, []>, []>;
39 //===----------------------------------------------------------------------===//
40 // SSE pattern fragments
41 //===----------------------------------------------------------------------===//
43 def X86loadpf32 : PatFrag<(ops node:$ptr), (f32 (X86loadp node:$ptr))>;
44 def X86loadpf64 : PatFrag<(ops node:$ptr), (f64 (X86loadp node:$ptr))>;
46 def loadv4f32 : PatFrag<(ops node:$ptr), (v4f32 (load node:$ptr))>;
47 def loadv2f64 : PatFrag<(ops node:$ptr), (v2f64 (load node:$ptr))>;
48 def loadv16i8 : PatFrag<(ops node:$ptr), (v16i8 (load node:$ptr))>;
49 def loadv8i16 : PatFrag<(ops node:$ptr), (v8i16 (load node:$ptr))>;
50 def loadv4i32 : PatFrag<(ops node:$ptr), (v4i32 (load node:$ptr))>;
51 def loadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (load node:$ptr))>;
53 def bc_v4f32 : PatFrag<(ops node:$in), (v4f32 (bitconvert node:$in))>;
54 def bc_v2f64 : PatFrag<(ops node:$in), (v2f64 (bitconvert node:$in))>;
55 def bc_v16i8 : PatFrag<(ops node:$in), (v16i8 (bitconvert node:$in))>;
56 def bc_v8i16 : PatFrag<(ops node:$in), (v8i16 (bitconvert node:$in))>;
57 def bc_v4i32 : PatFrag<(ops node:$in), (v4i32 (bitconvert node:$in))>;
58 def bc_v2i64 : PatFrag<(ops node:$in), (v2i64 (bitconvert node:$in))>;
60 def fp32imm0 : PatLeaf<(f32 fpimm), [{
61 return N->isExactlyValue(+0.0);
64 def PSxLDQ_imm : SDNodeXForm<imm, [{
65 // Transformation function: imm >> 3
66 return getI32Imm(N->getValue() >> 3);
69 // SHUFFLE_get_shuf_imm xform function: convert vector_shuffle mask to PSHUF*,
71 def SHUFFLE_get_shuf_imm : SDNodeXForm<build_vector, [{
72 return getI8Imm(X86::getShuffleSHUFImmediate(N));
75 // SHUFFLE_get_pshufhw_imm xform function: convert vector_shuffle mask to
77 def SHUFFLE_get_pshufhw_imm : SDNodeXForm<build_vector, [{
78 return getI8Imm(X86::getShufflePSHUFHWImmediate(N));
81 // SHUFFLE_get_pshuflw_imm xform function: convert vector_shuffle mask to
83 def SHUFFLE_get_pshuflw_imm : SDNodeXForm<build_vector, [{
84 return getI8Imm(X86::getShufflePSHUFLWImmediate(N));
87 def SSE_splat_mask : PatLeaf<(build_vector), [{
88 return X86::isSplatMask(N);
89 }], SHUFFLE_get_shuf_imm>;
91 def SSE_splat_v2_mask : PatLeaf<(build_vector), [{
92 return X86::isSplatMask(N);
95 def MOVHLPS_shuffle_mask : PatLeaf<(build_vector), [{
96 return X86::isMOVHLPSMask(N);
99 def MOVHP_shuffle_mask : PatLeaf<(build_vector), [{
100 return X86::isMOVHPMask(N);
103 def MOVLP_shuffle_mask : PatLeaf<(build_vector), [{
104 return X86::isMOVLPMask(N);
107 def MOVL_shuffle_mask : PatLeaf<(build_vector), [{
108 return X86::isMOVLMask(N);
111 def MOVSHDUP_shuffle_mask : PatLeaf<(build_vector), [{
112 return X86::isMOVSHDUPMask(N);
115 def MOVSLDUP_shuffle_mask : PatLeaf<(build_vector), [{
116 return X86::isMOVSLDUPMask(N);
119 def UNPCKL_shuffle_mask : PatLeaf<(build_vector), [{
120 return X86::isUNPCKLMask(N);
123 def UNPCKH_shuffle_mask : PatLeaf<(build_vector), [{
124 return X86::isUNPCKHMask(N);
127 def UNPCKL_v_undef_shuffle_mask : PatLeaf<(build_vector), [{
128 return X86::isUNPCKL_v_undef_Mask(N);
131 def PSHUFD_shuffle_mask : PatLeaf<(build_vector), [{
132 return X86::isPSHUFDMask(N);
133 }], SHUFFLE_get_shuf_imm>;
135 def PSHUFHW_shuffle_mask : PatLeaf<(build_vector), [{
136 return X86::isPSHUFHWMask(N);
137 }], SHUFFLE_get_pshufhw_imm>;
139 def PSHUFLW_shuffle_mask : PatLeaf<(build_vector), [{
140 return X86::isPSHUFLWMask(N);
141 }], SHUFFLE_get_pshuflw_imm>;
143 def SHUFP_unary_shuffle_mask : PatLeaf<(build_vector), [{
144 return X86::isPSHUFDMask(N);
145 }], SHUFFLE_get_shuf_imm>;
147 def SHUFP_shuffle_mask : PatLeaf<(build_vector), [{
148 return X86::isSHUFPMask(N);
149 }], SHUFFLE_get_shuf_imm>;
151 def PSHUFD_binary_shuffle_mask : PatLeaf<(build_vector), [{
152 return X86::isSHUFPMask(N);
153 }], SHUFFLE_get_shuf_imm>;
155 //===----------------------------------------------------------------------===//
156 // SSE scalar FP Instructions
157 //===----------------------------------------------------------------------===//
159 // Instruction templates
160 // SSI - SSE1 instructions with XS prefix.
161 // SDI - SSE2 instructions with XD prefix.
162 // PSI - SSE1 instructions with TB prefix.
163 // PDI - SSE2 instructions with TB and OpSize prefixes.
164 // PSIi8 - SSE1 instructions with ImmT == Imm8 and TB prefix.
165 // PDIi8 - SSE2 instructions with ImmT == Imm8 and TB and OpSize prefixes.
166 // S3I - SSE3 instructions with TB and OpSize prefixes.
167 // S3SI - SSE3 instructions with XS prefix.
168 // S3DI - SSE3 instructions with XD prefix.
169 class SSI<bits<8> o, Format F, dag ops, string asm, list<dag> pattern>
170 : I<o, F, ops, asm, pattern>, XS, Requires<[HasSSE1]>;
171 class SDI<bits<8> o, Format F, dag ops, string asm, list<dag> pattern>
172 : I<o, F, ops, asm, pattern>, XD, Requires<[HasSSE2]>;
173 class PSI<bits<8> o, Format F, dag ops, string asm, list<dag> pattern>
174 : I<o, F, ops, asm, pattern>, TB, Requires<[HasSSE1]>;
175 class PDI<bits<8> o, Format F, dag ops, string asm, list<dag> pattern>
176 : I<o, F, ops, asm, pattern>, TB, OpSize, Requires<[HasSSE2]>;
177 class PSIi8<bits<8> o, Format F, dag ops, string asm, list<dag> pattern>
178 : Ii8<o, F, ops, asm, pattern>, TB, Requires<[HasSSE1]>;
179 class PDIi8<bits<8> o, Format F, dag ops, string asm, list<dag> pattern>
180 : Ii8<o, F, ops, asm, pattern>, TB, OpSize, Requires<[HasSSE2]>;
182 class S3SI<bits<8> o, Format F, dag ops, string asm, list<dag> pattern>
183 : I<o, F, ops, asm, pattern>, XS, Requires<[HasSSE3]>;
184 class S3DI<bits<8> o, Format F, dag ops, string asm, list<dag> pattern>
185 : I<o, F, ops, asm, pattern>, XD, Requires<[HasSSE3]>;
186 class S3I<bits<8> o, Format F, dag ops, string asm, list<dag> pattern>
187 : I<o, F, ops, asm, pattern>, TB, OpSize, Requires<[HasSSE3]>;
189 //===----------------------------------------------------------------------===//
190 // Helpers for defining instructions that directly correspond to intrinsics.
191 class SS_Intr<bits<8> o, string asm, Intrinsic IntId>
192 : SSI<o, MRMSrcReg, (ops VR128:$dst, VR128:$src), asm,
193 [(set VR128:$dst, (v4f32 (IntId VR128:$src)))]>;
194 class SS_Intm<bits<8> o, string asm, Intrinsic IntId>
195 : SSI<o, MRMSrcMem, (ops VR128:$dst, f32mem:$src), asm,
196 [(set VR128:$dst, (v4f32 (IntId (load addr:$src))))]>;
197 class SD_Intr<bits<8> o, string asm, Intrinsic IntId>
198 : SDI<o, MRMSrcReg, (ops VR128:$dst, VR128:$src), asm,
199 [(set VR128:$dst, (v2f64 (IntId VR128:$src)))]>;
200 class SD_Intm<bits<8> o, string asm, Intrinsic IntId>
201 : SDI<o, MRMSrcMem, (ops VR128:$dst, f64mem:$src), asm,
202 [(set VR128:$dst, (v2f64 (IntId (load addr:$src))))]>;
204 class SS_Intrr<bits<8> o, string asm, Intrinsic IntId>
205 : SSI<o, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2), asm,
206 [(set VR128:$dst, (v4f32 (IntId VR128:$src1, VR128:$src2)))]>;
207 class SS_Intrm<bits<8> o, string asm, Intrinsic IntId>
208 : SSI<o, MRMSrcMem, (ops VR128:$dst, VR128:$src1, f32mem:$src2), asm,
209 [(set VR128:$dst, (v4f32 (IntId VR128:$src1, (load addr:$src2))))]>;
210 class SD_Intrr<bits<8> o, string asm, Intrinsic IntId>
211 : SDI<o, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2), asm,
212 [(set VR128:$dst, (v2f64 (IntId VR128:$src1, VR128:$src2)))]>;
213 class SD_Intrm<bits<8> o, string asm, Intrinsic IntId>
214 : SDI<o, MRMSrcMem, (ops VR128:$dst, VR128:$src1, f64mem:$src2), asm,
215 [(set VR128:$dst, (v2f64 (IntId VR128:$src1, (load addr:$src2))))]>;
217 class PS_Intr<bits<8> o, string asm, Intrinsic IntId>
218 : PSI<o, MRMSrcReg, (ops VR128:$dst, VR128:$src), asm,
219 [(set VR128:$dst, (IntId VR128:$src))]>;
220 class PS_Intm<bits<8> o, string asm, Intrinsic IntId>
221 : PSI<o, MRMSrcMem, (ops VR128:$dst, f32mem:$src), asm,
222 [(set VR128:$dst, (IntId (loadv4f32 addr:$src)))]>;
223 class PD_Intr<bits<8> o, string asm, Intrinsic IntId>
224 : PDI<o, MRMSrcReg, (ops VR128:$dst, VR128:$src), asm,
225 [(set VR128:$dst, (IntId VR128:$src))]>;
226 class PD_Intm<bits<8> o, string asm, Intrinsic IntId>
227 : PDI<o, MRMSrcMem, (ops VR128:$dst, f64mem:$src), asm,
228 [(set VR128:$dst, (IntId (loadv2f64 addr:$src)))]>;
230 class PS_Intrr<bits<8> o, string asm, Intrinsic IntId>
231 : PSI<o, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2), asm,
232 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
233 class PS_Intrm<bits<8> o, string asm, Intrinsic IntId>
234 : PSI<o, MRMSrcMem, (ops VR128:$dst, VR128:$src1, f32mem:$src2), asm,
235 [(set VR128:$dst, (IntId VR128:$src1, (loadv4f32 addr:$src2)))]>;
236 class PD_Intrr<bits<8> o, string asm, Intrinsic IntId>
237 : PDI<o, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2), asm,
238 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
239 class PD_Intrm<bits<8> o, string asm, Intrinsic IntId>
240 : PDI<o, MRMSrcMem, (ops VR128:$dst, VR128:$src1, f64mem:$src2), asm,
241 [(set VR128:$dst, (IntId VR128:$src1, (loadv2f64 addr:$src2)))]>;
243 class S3D_Intrr<bits<8> o, string asm, Intrinsic IntId>
244 : S3DI<o, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2), asm,
245 [(set VR128:$dst, (v4f32 (IntId VR128:$src1, VR128:$src2)))]>;
246 class S3D_Intrm<bits<8> o, string asm, Intrinsic IntId>
247 : S3DI<o, MRMSrcMem, (ops VR128:$dst, VR128:$src1, f128mem:$src2), asm,
248 [(set VR128:$dst, (v4f32 (IntId VR128:$src1,
249 (loadv4f32 addr:$src2))))]>;
250 class S3_Intrr<bits<8> o, string asm, Intrinsic IntId>
251 : S3I<o, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2), asm,
252 [(set VR128:$dst, (v2f64 (IntId VR128:$src1, VR128:$src2)))]>;
253 class S3_Intrm<bits<8> o, string asm, Intrinsic IntId>
254 : S3I<o, MRMSrcMem, (ops VR128:$dst, VR128:$src1, f128mem:$src2), asm,
255 [(set VR128:$dst, (v2f64 (IntId VR128:$src1,
256 (loadv2f64 addr:$src2))))]>;
258 // Some 'special' instructions
259 def IMPLICIT_DEF_FR32 : I<0, Pseudo, (ops FR32:$dst),
260 "#IMPLICIT_DEF $dst",
261 [(set FR32:$dst, (undef))]>, Requires<[HasSSE2]>;
262 def IMPLICIT_DEF_FR64 : I<0, Pseudo, (ops FR64:$dst),
263 "#IMPLICIT_DEF $dst",
264 [(set FR64:$dst, (undef))]>, Requires<[HasSSE2]>;
266 // CMOV* - Used to implement the SSE SELECT DAG operation. Expanded by the
267 // scheduler into a branch sequence.
268 let usesCustomDAGSchedInserter = 1 in { // Expanded by the scheduler.
269 def CMOV_FR32 : I<0, Pseudo,
270 (ops FR32:$dst, FR32:$t, FR32:$f, i8imm:$cond),
271 "#CMOV_FR32 PSEUDO!",
272 [(set FR32:$dst, (X86cmov FR32:$t, FR32:$f, imm:$cond))]>;
273 def CMOV_FR64 : I<0, Pseudo,
274 (ops FR64:$dst, FR64:$t, FR64:$f, i8imm:$cond),
275 "#CMOV_FR64 PSEUDO!",
276 [(set FR64:$dst, (X86cmov FR64:$t, FR64:$f, imm:$cond))]>;
277 def CMOV_V4F32 : I<0, Pseudo,
278 (ops VR128:$dst, VR128:$t, VR128:$f, i8imm:$cond),
279 "#CMOV_V4F32 PSEUDO!",
281 (v4f32 (X86cmov VR128:$t, VR128:$f, imm:$cond)))]>;
282 def CMOV_V2F64 : I<0, Pseudo,
283 (ops VR128:$dst, VR128:$t, VR128:$f, i8imm:$cond),
284 "#CMOV_V2F64 PSEUDO!",
286 (v2f64 (X86cmov VR128:$t, VR128:$f, imm:$cond)))]>;
287 def CMOV_V2I64 : I<0, Pseudo,
288 (ops VR128:$dst, VR128:$t, VR128:$f, i8imm:$cond),
289 "#CMOV_V2I64 PSEUDO!",
291 (v2i64 (X86cmov VR128:$t, VR128:$f, imm:$cond)))]>;
295 def MOVSSrr : SSI<0x10, MRMSrcReg, (ops FR32:$dst, FR32:$src),
296 "movss {$src, $dst|$dst, $src}", []>;
297 def MOVSSrm : SSI<0x10, MRMSrcMem, (ops FR32:$dst, f32mem:$src),
298 "movss {$src, $dst|$dst, $src}",
299 [(set FR32:$dst, (loadf32 addr:$src))]>;
300 def MOVSDrr : SDI<0x10, MRMSrcReg, (ops FR64:$dst, FR64:$src),
301 "movsd {$src, $dst|$dst, $src}", []>;
302 def MOVSDrm : SDI<0x10, MRMSrcMem, (ops FR64:$dst, f64mem:$src),
303 "movsd {$src, $dst|$dst, $src}",
304 [(set FR64:$dst, (loadf64 addr:$src))]>;
306 def MOVSSmr : SSI<0x11, MRMDestMem, (ops f32mem:$dst, FR32:$src),
307 "movss {$src, $dst|$dst, $src}",
308 [(store FR32:$src, addr:$dst)]>;
309 def MOVSDmr : SDI<0x11, MRMDestMem, (ops f64mem:$dst, FR64:$src),
310 "movsd {$src, $dst|$dst, $src}",
311 [(store FR64:$src, addr:$dst)]>;
313 // Arithmetic instructions
314 let isTwoAddress = 1 in {
315 let isCommutable = 1 in {
316 def ADDSSrr : SSI<0x58, MRMSrcReg, (ops FR32:$dst, FR32:$src1, FR32:$src2),
317 "addss {$src2, $dst|$dst, $src2}",
318 [(set FR32:$dst, (fadd FR32:$src1, FR32:$src2))]>;
319 def ADDSDrr : SDI<0x58, MRMSrcReg, (ops FR64:$dst, FR64:$src1, FR64:$src2),
320 "addsd {$src2, $dst|$dst, $src2}",
321 [(set FR64:$dst, (fadd FR64:$src1, FR64:$src2))]>;
322 def MULSSrr : SSI<0x59, MRMSrcReg, (ops FR32:$dst, FR32:$src1, FR32:$src2),
323 "mulss {$src2, $dst|$dst, $src2}",
324 [(set FR32:$dst, (fmul FR32:$src1, FR32:$src2))]>;
325 def MULSDrr : SDI<0x59, MRMSrcReg, (ops FR64:$dst, FR64:$src1, FR64:$src2),
326 "mulsd {$src2, $dst|$dst, $src2}",
327 [(set FR64:$dst, (fmul FR64:$src1, FR64:$src2))]>;
330 def ADDSSrm : SSI<0x58, MRMSrcMem, (ops FR32:$dst, FR32:$src1, f32mem:$src2),
331 "addss {$src2, $dst|$dst, $src2}",
332 [(set FR32:$dst, (fadd FR32:$src1, (loadf32 addr:$src2)))]>;
333 def ADDSDrm : SDI<0x58, MRMSrcMem, (ops FR64:$dst, FR64:$src1, f64mem:$src2),
334 "addsd {$src2, $dst|$dst, $src2}",
335 [(set FR64:$dst, (fadd FR64:$src1, (loadf64 addr:$src2)))]>;
336 def MULSSrm : SSI<0x59, MRMSrcMem, (ops FR32:$dst, FR32:$src1, f32mem:$src2),
337 "mulss {$src2, $dst|$dst, $src2}",
338 [(set FR32:$dst, (fmul FR32:$src1, (loadf32 addr:$src2)))]>;
339 def MULSDrm : SDI<0x59, MRMSrcMem, (ops FR64:$dst, FR64:$src1, f64mem:$src2),
340 "mulsd {$src2, $dst|$dst, $src2}",
341 [(set FR64:$dst, (fmul FR64:$src1, (loadf64 addr:$src2)))]>;
343 def DIVSSrr : SSI<0x5E, MRMSrcReg, (ops FR32:$dst, FR32:$src1, FR32:$src2),
344 "divss {$src2, $dst|$dst, $src2}",
345 [(set FR32:$dst, (fdiv FR32:$src1, FR32:$src2))]>;
346 def DIVSSrm : SSI<0x5E, MRMSrcMem, (ops FR32:$dst, FR32:$src1, f32mem:$src2),
347 "divss {$src2, $dst|$dst, $src2}",
348 [(set FR32:$dst, (fdiv FR32:$src1, (loadf32 addr:$src2)))]>;
349 def DIVSDrr : SDI<0x5E, MRMSrcReg, (ops FR64:$dst, FR64:$src1, FR64:$src2),
350 "divsd {$src2, $dst|$dst, $src2}",
351 [(set FR64:$dst, (fdiv FR64:$src1, FR64:$src2))]>;
352 def DIVSDrm : SDI<0x5E, MRMSrcMem, (ops FR64:$dst, FR64:$src1, f64mem:$src2),
353 "divsd {$src2, $dst|$dst, $src2}",
354 [(set FR64:$dst, (fdiv FR64:$src1, (loadf64 addr:$src2)))]>;
356 def SUBSSrr : SSI<0x5C, MRMSrcReg, (ops FR32:$dst, FR32:$src1, FR32:$src2),
357 "subss {$src2, $dst|$dst, $src2}",
358 [(set FR32:$dst, (fsub FR32:$src1, FR32:$src2))]>;
359 def SUBSSrm : SSI<0x5C, MRMSrcMem, (ops FR32:$dst, FR32:$src1, f32mem:$src2),
360 "subss {$src2, $dst|$dst, $src2}",
361 [(set FR32:$dst, (fsub FR32:$src1, (loadf32 addr:$src2)))]>;
362 def SUBSDrr : SDI<0x5C, MRMSrcReg, (ops FR64:$dst, FR64:$src1, FR64:$src2),
363 "subsd {$src2, $dst|$dst, $src2}",
364 [(set FR64:$dst, (fsub FR64:$src1, FR64:$src2))]>;
365 def SUBSDrm : SDI<0x5C, MRMSrcMem, (ops FR64:$dst, FR64:$src1, f64mem:$src2),
366 "subsd {$src2, $dst|$dst, $src2}",
367 [(set FR64:$dst, (fsub FR64:$src1, (loadf64 addr:$src2)))]>;
370 def SQRTSSr : SSI<0x51, MRMSrcReg, (ops FR32:$dst, FR32:$src),
371 "sqrtss {$src, $dst|$dst, $src}",
372 [(set FR32:$dst, (fsqrt FR32:$src))]>;
373 def SQRTSSm : SSI<0x51, MRMSrcMem, (ops FR32:$dst, f32mem:$src),
374 "sqrtss {$src, $dst|$dst, $src}",
375 [(set FR32:$dst, (fsqrt (loadf32 addr:$src)))]>;
376 def SQRTSDr : SDI<0x51, MRMSrcReg, (ops FR64:$dst, FR64:$src),
377 "sqrtsd {$src, $dst|$dst, $src}",
378 [(set FR64:$dst, (fsqrt FR64:$src))]>;
379 def SQRTSDm : SDI<0x51, MRMSrcMem, (ops FR64:$dst, f64mem:$src),
380 "sqrtsd {$src, $dst|$dst, $src}",
381 [(set FR64:$dst, (fsqrt (loadf64 addr:$src)))]>;
383 def RSQRTSSr : SSI<0x52, MRMSrcReg, (ops FR32:$dst, FR32:$src),
384 "rsqrtss {$src, $dst|$dst, $src}", []>;
385 def RSQRTSSm : SSI<0x52, MRMSrcMem, (ops FR32:$dst, f32mem:$src),
386 "rsqrtss {$src, $dst|$dst, $src}", []>;
387 def RCPSSr : SSI<0x53, MRMSrcReg, (ops FR32:$dst, FR32:$src),
388 "rcpss {$src, $dst|$dst, $src}", []>;
389 def RCPSSm : SSI<0x53, MRMSrcMem, (ops FR32:$dst, f32mem:$src),
390 "rcpss {$src, $dst|$dst, $src}", []>;
392 let isTwoAddress = 1 in {
393 let isCommutable = 1 in {
394 def MAXSSrr : SSI<0x5F, MRMSrcReg, (ops FR32:$dst, FR32:$src1, FR32:$src2),
395 "maxss {$src2, $dst|$dst, $src2}", []>;
396 def MAXSDrr : SDI<0x5F, MRMSrcReg, (ops FR64:$dst, FR32:$src1, FR64:$src2),
397 "maxsd {$src2, $dst|$dst, $src2}", []>;
398 def MINSSrr : SSI<0x5D, MRMSrcReg, (ops FR32:$dst, FR32:$src1, FR32:$src2),
399 "minss {$src2, $dst|$dst, $src2}", []>;
400 def MINSDrr : SDI<0x5D, MRMSrcReg, (ops FR64:$dst, FR32:$src1, FR64:$src2),
401 "minsd {$src2, $dst|$dst, $src2}", []>;
403 def MAXSSrm : SSI<0x5F, MRMSrcMem, (ops FR32:$dst, FR32:$src1, f32mem:$src2),
404 "maxss {$src2, $dst|$dst, $src2}", []>;
405 def MAXSDrm : SDI<0x5F, MRMSrcMem, (ops FR64:$dst, FR32:$src1, f64mem:$src2),
406 "maxsd {$src2, $dst|$dst, $src2}", []>;
407 def MINSSrm : SSI<0x5D, MRMSrcMem, (ops FR32:$dst, FR32:$src1, f32mem:$src2),
408 "minss {$src2, $dst|$dst, $src2}", []>;
409 def MINSDrm : SDI<0x5D, MRMSrcMem, (ops FR64:$dst, FR32:$src1, f64mem:$src2),
410 "minsd {$src2, $dst|$dst, $src2}", []>;
413 // Aliases to match intrinsics which expect XMM operand(s).
414 let isTwoAddress = 1 in {
415 let isCommutable = 1 in {
416 def Int_ADDSSrr : SS_Intrr<0x58, "addss {$src2, $dst|$dst, $src2}",
418 def Int_ADDSDrr : SD_Intrr<0x58, "addsd {$src2, $dst|$dst, $src2}",
419 int_x86_sse2_add_sd>;
420 def Int_MULSSrr : SS_Intrr<0x59, "mulss {$src2, $dst|$dst, $src2}",
422 def Int_MULSDrr : SD_Intrr<0x59, "mulsd {$src2, $dst|$dst, $src2}",
423 int_x86_sse2_mul_sd>;
426 def Int_ADDSSrm : SS_Intrm<0x58, "addss {$src2, $dst|$dst, $src2}",
428 def Int_ADDSDrm : SD_Intrm<0x58, "addsd {$src2, $dst|$dst, $src2}",
429 int_x86_sse2_add_sd>;
430 def Int_MULSSrm : SS_Intrm<0x59, "mulss {$src2, $dst|$dst, $src2}",
432 def Int_MULSDrm : SD_Intrm<0x59, "mulsd {$src2, $dst|$dst, $src2}",
433 int_x86_sse2_mul_sd>;
435 def Int_DIVSSrr : SS_Intrr<0x5E, "divss {$src2, $dst|$dst, $src2}",
437 def Int_DIVSSrm : SS_Intrm<0x5E, "divss {$src2, $dst|$dst, $src2}",
439 def Int_DIVSDrr : SD_Intrr<0x5E, "divsd {$src2, $dst|$dst, $src2}",
440 int_x86_sse2_div_sd>;
441 def Int_DIVSDrm : SD_Intrm<0x5E, "divsd {$src2, $dst|$dst, $src2}",
442 int_x86_sse2_div_sd>;
444 def Int_SUBSSrr : SS_Intrr<0x5C, "subss {$src2, $dst|$dst, $src2}",
446 def Int_SUBSSrm : SS_Intrm<0x5C, "subss {$src2, $dst|$dst, $src2}",
448 def Int_SUBSDrr : SD_Intrr<0x5C, "subsd {$src2, $dst|$dst, $src2}",
449 int_x86_sse2_sub_sd>;
450 def Int_SUBSDrm : SD_Intrm<0x5C, "subsd {$src2, $dst|$dst, $src2}",
451 int_x86_sse2_sub_sd>;
454 def Int_SQRTSSr : SS_Intr<0x51, "sqrtss {$src, $dst|$dst, $src}",
455 int_x86_sse_sqrt_ss>;
456 def Int_SQRTSSm : SS_Intm<0x51, "sqrtss {$src, $dst|$dst, $src}",
457 int_x86_sse_sqrt_ss>;
458 def Int_SQRTSDr : SD_Intr<0x51, "sqrtsd {$src, $dst|$dst, $src}",
459 int_x86_sse2_sqrt_sd>;
460 def Int_SQRTSDm : SD_Intm<0x51, "sqrtsd {$src, $dst|$dst, $src}",
461 int_x86_sse2_sqrt_sd>;
463 def Int_RSQRTSSr : SS_Intr<0x52, "rsqrtss {$src, $dst|$dst, $src}",
464 int_x86_sse_rsqrt_ss>;
465 def Int_RSQRTSSm : SS_Intm<0x52, "rsqrtss {$src, $dst|$dst, $src}",
466 int_x86_sse_rsqrt_ss>;
467 def Int_RCPSSr : SS_Intr<0x53, "rcpss {$src, $dst|$dst, $src}",
469 def Int_RCPSSm : SS_Intm<0x53, "rcpss {$src, $dst|$dst, $src}",
472 let isTwoAddress = 1 in {
473 let isCommutable = 1 in {
474 def Int_MAXSSrr : SS_Intrr<0x5F, "maxss {$src2, $dst|$dst, $src2}",
476 def Int_MAXSDrr : SD_Intrr<0x5F, "maxsd {$src2, $dst|$dst, $src2}",
477 int_x86_sse2_max_sd>;
478 def Int_MINSSrr : SS_Intrr<0x5D, "minss {$src2, $dst|$dst, $src2}",
480 def Int_MINSDrr : SD_Intrr<0x5D, "minsd {$src2, $dst|$dst, $src2}",
481 int_x86_sse2_min_sd>;
483 def Int_MAXSSrm : SS_Intrm<0x5F, "maxss {$src2, $dst|$dst, $src2}",
485 def Int_MAXSDrm : SD_Intrm<0x5F, "maxsd {$src2, $dst|$dst, $src2}",
486 int_x86_sse2_max_sd>;
487 def Int_MINSSrm : SS_Intrm<0x5D, "minss {$src2, $dst|$dst, $src2}",
489 def Int_MINSDrm : SD_Intrm<0x5D, "minsd {$src2, $dst|$dst, $src2}",
490 int_x86_sse2_min_sd>;
493 // Conversion instructions
494 def CVTTSS2SIrr: SSI<0x2C, MRMSrcReg, (ops GR32:$dst, FR32:$src),
495 "cvttss2si {$src, $dst|$dst, $src}",
496 [(set GR32:$dst, (fp_to_sint FR32:$src))]>;
497 def CVTTSS2SIrm: SSI<0x2C, MRMSrcMem, (ops GR32:$dst, f32mem:$src),
498 "cvttss2si {$src, $dst|$dst, $src}",
499 [(set GR32:$dst, (fp_to_sint (loadf32 addr:$src)))]>;
500 def CVTTSD2SIrr: SDI<0x2C, MRMSrcReg, (ops GR32:$dst, FR64:$src),
501 "cvttsd2si {$src, $dst|$dst, $src}",
502 [(set GR32:$dst, (fp_to_sint FR64:$src))]>;
503 def CVTTSD2SIrm: SDI<0x2C, MRMSrcMem, (ops GR32:$dst, f64mem:$src),
504 "cvttsd2si {$src, $dst|$dst, $src}",
505 [(set GR32:$dst, (fp_to_sint (loadf64 addr:$src)))]>;
506 def CVTSD2SSrr: SDI<0x5A, MRMSrcReg, (ops FR32:$dst, FR64:$src),
507 "cvtsd2ss {$src, $dst|$dst, $src}",
508 [(set FR32:$dst, (fround FR64:$src))]>;
509 def CVTSD2SSrm: SDI<0x5A, MRMSrcMem, (ops FR32:$dst, f64mem:$src),
510 "cvtsd2ss {$src, $dst|$dst, $src}",
511 [(set FR32:$dst, (fround (loadf64 addr:$src)))]>;
512 def CVTSI2SSrr: SSI<0x2A, MRMSrcReg, (ops FR32:$dst, GR32:$src),
513 "cvtsi2ss {$src, $dst|$dst, $src}",
514 [(set FR32:$dst, (sint_to_fp GR32:$src))]>;
515 def CVTSI2SSrm: SSI<0x2A, MRMSrcMem, (ops FR32:$dst, i32mem:$src),
516 "cvtsi2ss {$src, $dst|$dst, $src}",
517 [(set FR32:$dst, (sint_to_fp (loadi32 addr:$src)))]>;
518 def CVTSI2SDrr: SDI<0x2A, MRMSrcReg, (ops FR64:$dst, GR32:$src),
519 "cvtsi2sd {$src, $dst|$dst, $src}",
520 [(set FR64:$dst, (sint_to_fp GR32:$src))]>;
521 def CVTSI2SDrm: SDI<0x2A, MRMSrcMem, (ops FR64:$dst, i32mem:$src),
522 "cvtsi2sd {$src, $dst|$dst, $src}",
523 [(set FR64:$dst, (sint_to_fp (loadi32 addr:$src)))]>;
525 // SSE2 instructions with XS prefix
526 def CVTSS2SDrr: I<0x5A, MRMSrcReg, (ops FR64:$dst, FR32:$src),
527 "cvtss2sd {$src, $dst|$dst, $src}",
528 [(set FR64:$dst, (fextend FR32:$src))]>, XS,
530 def CVTSS2SDrm: I<0x5A, MRMSrcMem, (ops FR64:$dst, f32mem:$src),
531 "cvtss2sd {$src, $dst|$dst, $src}",
532 [(set FR64:$dst, (extload addr:$src, f32))]>, XS,
535 // Match intrinsics which expect XMM operand(s).
536 def Int_CVTSS2SIrr: SSI<0x2D, MRMSrcReg, (ops GR32:$dst, VR128:$src),
537 "cvtss2si {$src, $dst|$dst, $src}",
538 [(set GR32:$dst, (int_x86_sse_cvtss2si VR128:$src))]>;
539 def Int_CVTSS2SIrm: SSI<0x2D, MRMSrcMem, (ops GR32:$dst, f32mem:$src),
540 "cvtss2si {$src, $dst|$dst, $src}",
541 [(set GR32:$dst, (int_x86_sse_cvtss2si
542 (loadv4f32 addr:$src)))]>;
543 def Int_CVTSD2SIrr: SDI<0x2D, MRMSrcReg, (ops GR32:$dst, VR128:$src),
544 "cvtsd2si {$src, $dst|$dst, $src}",
545 [(set GR32:$dst, (int_x86_sse2_cvtsd2si VR128:$src))]>;
546 def Int_CVTSD2SIrm: SDI<0x2D, MRMSrcMem, (ops GR32:$dst, f128mem:$src),
547 "cvtsd2si {$src, $dst|$dst, $src}",
548 [(set GR32:$dst, (int_x86_sse2_cvtsd2si
549 (loadv2f64 addr:$src)))]>;
551 // Aliases for intrinsics
552 def Int_CVTTSS2SIrr: SSI<0x2C, MRMSrcReg, (ops GR32:$dst, VR128:$src),
553 "cvttss2si {$src, $dst|$dst, $src}",
554 [(set GR32:$dst, (int_x86_sse_cvttss2si VR128:$src))]>;
555 def Int_CVTTSS2SIrm: SSI<0x2C, MRMSrcMem, (ops GR32:$dst, f32mem:$src),
556 "cvttss2si {$src, $dst|$dst, $src}",
557 [(set GR32:$dst, (int_x86_sse_cvttss2si
558 (loadv4f32 addr:$src)))]>;
559 def Int_CVTTSD2SIrr: SDI<0x2C, MRMSrcReg, (ops GR32:$dst, VR128:$src),
560 "cvttsd2si {$src, $dst|$dst, $src}",
561 [(set GR32:$dst, (int_x86_sse2_cvttsd2si VR128:$src))]>;
562 def Int_CVTTSD2SIrm: SDI<0x2C, MRMSrcMem, (ops GR32:$dst, f128mem:$src),
563 "cvttsd2si {$src, $dst|$dst, $src}",
564 [(set GR32:$dst, (int_x86_sse2_cvttsd2si
565 (loadv2f64 addr:$src)))]>;
567 let isTwoAddress = 1 in {
568 def Int_CVTSI2SSrr: SSI<0x2A, MRMSrcReg,
569 (ops VR128:$dst, VR128:$src1, GR32:$src2),
570 "cvtsi2ss {$src2, $dst|$dst, $src2}",
571 [(set VR128:$dst, (int_x86_sse_cvtsi2ss VR128:$src1,
573 def Int_CVTSI2SSrm: SSI<0x2A, MRMSrcMem,
574 (ops VR128:$dst, VR128:$src1, i32mem:$src2),
575 "cvtsi2ss {$src2, $dst|$dst, $src2}",
576 [(set VR128:$dst, (int_x86_sse_cvtsi2ss VR128:$src1,
577 (loadi32 addr:$src2)))]>;
580 // Comparison instructions
581 let isTwoAddress = 1 in {
582 def CMPSSrr : SSI<0xC2, MRMSrcReg,
583 (ops FR32:$dst, FR32:$src1, FR32:$src, SSECC:$cc),
584 "cmp${cc}ss {$src, $dst|$dst, $src}",
586 def CMPSSrm : SSI<0xC2, MRMSrcMem,
587 (ops FR32:$dst, FR32:$src1, f32mem:$src, SSECC:$cc),
588 "cmp${cc}ss {$src, $dst|$dst, $src}", []>;
589 def CMPSDrr : SDI<0xC2, MRMSrcReg,
590 (ops FR64:$dst, FR64:$src1, FR64:$src, SSECC:$cc),
591 "cmp${cc}sd {$src, $dst|$dst, $src}", []>;
592 def CMPSDrm : SDI<0xC2, MRMSrcMem,
593 (ops FR64:$dst, FR64:$src1, f64mem:$src, SSECC:$cc),
594 "cmp${cc}sd {$src, $dst|$dst, $src}", []>;
597 def UCOMISSrr: PSI<0x2E, MRMSrcReg, (ops FR32:$src1, FR32:$src2),
598 "ucomiss {$src2, $src1|$src1, $src2}",
599 [(X86cmp FR32:$src1, FR32:$src2)]>;
600 def UCOMISSrm: PSI<0x2E, MRMSrcMem, (ops FR32:$src1, f32mem:$src2),
601 "ucomiss {$src2, $src1|$src1, $src2}",
602 [(X86cmp FR32:$src1, (loadf32 addr:$src2))]>;
603 def UCOMISDrr: PDI<0x2E, MRMSrcReg, (ops FR64:$src1, FR64:$src2),
604 "ucomisd {$src2, $src1|$src1, $src2}",
605 [(X86cmp FR64:$src1, FR64:$src2)]>;
606 def UCOMISDrm: PDI<0x2E, MRMSrcMem, (ops FR64:$src1, f64mem:$src2),
607 "ucomisd {$src2, $src1|$src1, $src2}",
608 [(X86cmp FR64:$src1, (loadf64 addr:$src2))]>;
610 // Aliases to match intrinsics which expect XMM operand(s).
611 let isTwoAddress = 1 in {
612 def Int_CMPSSrr : SSI<0xC2, MRMSrcReg,
613 (ops VR128:$dst, VR128:$src1, VR128:$src, SSECC:$cc),
614 "cmp${cc}ss {$src, $dst|$dst, $src}",
615 [(set VR128:$dst, (int_x86_sse_cmp_ss VR128:$src1,
616 VR128:$src, imm:$cc))]>;
617 def Int_CMPSSrm : SSI<0xC2, MRMSrcMem,
618 (ops VR128:$dst, VR128:$src1, f32mem:$src, SSECC:$cc),
619 "cmp${cc}ss {$src, $dst|$dst, $src}",
620 [(set VR128:$dst, (int_x86_sse_cmp_ss VR128:$src1,
621 (load addr:$src), imm:$cc))]>;
622 def Int_CMPSDrr : SDI<0xC2, MRMSrcReg,
623 (ops VR128:$dst, VR128:$src1, VR128:$src, SSECC:$cc),
624 "cmp${cc}sd {$src, $dst|$dst, $src}", []>;
625 def Int_CMPSDrm : SDI<0xC2, MRMSrcMem,
626 (ops VR128:$dst, VR128:$src1, f64mem:$src, SSECC:$cc),
627 "cmp${cc}sd {$src, $dst|$dst, $src}", []>;
630 def Int_UCOMISSrr: PSI<0x2E, MRMSrcReg, (ops VR128:$src1, VR128:$src2),
631 "ucomiss {$src2, $src1|$src1, $src2}",
632 [(X86ucomi (v4f32 VR128:$src1), VR128:$src2)]>;
633 def Int_UCOMISSrm: PSI<0x2E, MRMSrcMem, (ops VR128:$src1, f128mem:$src2),
634 "ucomiss {$src2, $src1|$src1, $src2}",
635 [(X86ucomi (v4f32 VR128:$src1), (loadv4f32 addr:$src2))]>;
636 def Int_UCOMISDrr: PDI<0x2E, MRMSrcReg, (ops VR128:$src1, VR128:$src2),
637 "ucomisd {$src2, $src1|$src1, $src2}",
638 [(X86ucomi (v2f64 VR128:$src1), (v2f64 VR128:$src2))]>;
639 def Int_UCOMISDrm: PDI<0x2E, MRMSrcMem, (ops VR128:$src1, f128mem:$src2),
640 "ucomisd {$src2, $src1|$src1, $src2}",
641 [(X86ucomi (v2f64 VR128:$src1), (loadv2f64 addr:$src2))]>;
643 def Int_COMISSrr: PSI<0x2F, MRMSrcReg, (ops VR128:$src1, VR128:$src2),
644 "comiss {$src2, $src1|$src1, $src2}",
645 [(X86comi (v4f32 VR128:$src1), VR128:$src2)]>;
646 def Int_COMISSrm: PSI<0x2F, MRMSrcMem, (ops VR128:$src1, f128mem:$src2),
647 "comiss {$src2, $src1|$src1, $src2}",
648 [(X86comi (v4f32 VR128:$src1), (loadv4f32 addr:$src2))]>;
649 def Int_COMISDrr: PDI<0x2F, MRMSrcReg, (ops VR128:$src1, VR128:$src2),
650 "comisd {$src2, $src1|$src1, $src2}",
651 [(X86comi (v2f64 VR128:$src1), (v2f64 VR128:$src2))]>;
652 def Int_COMISDrm: PDI<0x2F, MRMSrcMem, (ops VR128:$src1, f128mem:$src2),
653 "comisd {$src2, $src1|$src1, $src2}",
654 [(X86comi (v2f64 VR128:$src1), (loadv2f64 addr:$src2))]>;
656 // Aliases of packed instructions for scalar use. These all have names that
659 // Alias instructions that map fld0 to pxor for sse.
660 // FIXME: remove when we can teach regalloc that xor reg, reg is ok.
661 def FsFLD0SS : I<0xEF, MRMInitReg, (ops FR32:$dst),
662 "pxor $dst, $dst", [(set FR32:$dst, fp32imm0)]>,
663 Requires<[HasSSE1]>, TB, OpSize;
664 def FsFLD0SD : I<0xEF, MRMInitReg, (ops FR64:$dst),
665 "pxor $dst, $dst", [(set FR64:$dst, fp64imm0)]>,
666 Requires<[HasSSE2]>, TB, OpSize;
668 // Alias instructions to do FR32 / FR64 reg-to-reg copy using movaps / movapd.
669 // Upper bits are disregarded.
670 def FsMOVAPSrr : PSI<0x28, MRMSrcReg, (ops FR32:$dst, FR32:$src),
671 "movaps {$src, $dst|$dst, $src}", []>;
672 def FsMOVAPDrr : PDI<0x28, MRMSrcReg, (ops FR64:$dst, FR64:$src),
673 "movapd {$src, $dst|$dst, $src}", []>;
675 // Alias instructions to load FR32 / FR64 from f128mem using movaps / movapd.
676 // Upper bits are disregarded.
677 def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (ops FR32:$dst, f128mem:$src),
678 "movaps {$src, $dst|$dst, $src}",
679 [(set FR32:$dst, (X86loadpf32 addr:$src))]>;
680 def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (ops FR64:$dst, f128mem:$src),
681 "movapd {$src, $dst|$dst, $src}",
682 [(set FR64:$dst, (X86loadpf64 addr:$src))]>;
684 // Alias bitwise logical operations using SSE logical ops on packed FP values.
685 let isTwoAddress = 1 in {
686 let isCommutable = 1 in {
687 def FsANDPSrr : PSI<0x54, MRMSrcReg, (ops FR32:$dst, FR32:$src1, FR32:$src2),
688 "andps {$src2, $dst|$dst, $src2}",
689 [(set FR32:$dst, (X86fand FR32:$src1, FR32:$src2))]>;
690 def FsANDPDrr : PDI<0x54, MRMSrcReg, (ops FR64:$dst, FR64:$src1, FR64:$src2),
691 "andpd {$src2, $dst|$dst, $src2}",
692 [(set FR64:$dst, (X86fand FR64:$src1, FR64:$src2))]>;
693 def FsORPSrr : PSI<0x56, MRMSrcReg, (ops FR32:$dst, FR32:$src1, FR32:$src2),
694 "orps {$src2, $dst|$dst, $src2}", []>;
695 def FsORPDrr : PDI<0x56, MRMSrcReg, (ops FR64:$dst, FR64:$src1, FR64:$src2),
696 "orpd {$src2, $dst|$dst, $src2}", []>;
697 def FsXORPSrr : PSI<0x57, MRMSrcReg, (ops FR32:$dst, FR32:$src1, FR32:$src2),
698 "xorps {$src2, $dst|$dst, $src2}",
699 [(set FR32:$dst, (X86fxor FR32:$src1, FR32:$src2))]>;
700 def FsXORPDrr : PDI<0x57, MRMSrcReg, (ops FR64:$dst, FR64:$src1, FR64:$src2),
701 "xorpd {$src2, $dst|$dst, $src2}",
702 [(set FR64:$dst, (X86fxor FR64:$src1, FR64:$src2))]>;
704 def FsANDPSrm : PSI<0x54, MRMSrcMem, (ops FR32:$dst, FR32:$src1, f128mem:$src2),
705 "andps {$src2, $dst|$dst, $src2}",
706 [(set FR32:$dst, (X86fand FR32:$src1,
707 (X86loadpf32 addr:$src2)))]>;
708 def FsANDPDrm : PDI<0x54, MRMSrcMem, (ops FR64:$dst, FR64:$src1, f128mem:$src2),
709 "andpd {$src2, $dst|$dst, $src2}",
710 [(set FR64:$dst, (X86fand FR64:$src1,
711 (X86loadpf64 addr:$src2)))]>;
712 def FsORPSrm : PSI<0x56, MRMSrcMem, (ops FR32:$dst, FR32:$src1, f128mem:$src2),
713 "orps {$src2, $dst|$dst, $src2}", []>;
714 def FsORPDrm : PDI<0x56, MRMSrcMem, (ops FR64:$dst, FR64:$src1, f128mem:$src2),
715 "orpd {$src2, $dst|$dst, $src2}", []>;
716 def FsXORPSrm : PSI<0x57, MRMSrcMem, (ops FR32:$dst, FR32:$src1, f128mem:$src2),
717 "xorps {$src2, $dst|$dst, $src2}",
718 [(set FR32:$dst, (X86fxor FR32:$src1,
719 (X86loadpf32 addr:$src2)))]>;
720 def FsXORPDrm : PDI<0x57, MRMSrcMem, (ops FR64:$dst, FR64:$src1, f128mem:$src2),
721 "xorpd {$src2, $dst|$dst, $src2}",
722 [(set FR64:$dst, (X86fxor FR64:$src1,
723 (X86loadpf64 addr:$src2)))]>;
725 def FsANDNPSrr : PSI<0x55, MRMSrcReg, (ops FR32:$dst, FR32:$src1, FR32:$src2),
726 "andnps {$src2, $dst|$dst, $src2}", []>;
727 def FsANDNPSrm : PSI<0x55, MRMSrcMem, (ops FR32:$dst, FR32:$src1, f128mem:$src2),
728 "andnps {$src2, $dst|$dst, $src2}", []>;
729 def FsANDNPDrr : PDI<0x55, MRMSrcReg, (ops FR64:$dst, FR64:$src1, FR64:$src2),
730 "andnpd {$src2, $dst|$dst, $src2}", []>;
731 def FsANDNPDrm : PDI<0x55, MRMSrcMem, (ops FR64:$dst, FR64:$src1, f128mem:$src2),
732 "andnpd {$src2, $dst|$dst, $src2}", []>;
735 //===----------------------------------------------------------------------===//
736 // SSE packed FP Instructions
737 //===----------------------------------------------------------------------===//
739 // Some 'special' instructions
740 def IMPLICIT_DEF_VR128 : I<0, Pseudo, (ops VR128:$dst),
741 "#IMPLICIT_DEF $dst",
742 [(set VR128:$dst, (v4f32 (undef)))]>,
746 def MOVAPSrr : PSI<0x28, MRMSrcReg, (ops VR128:$dst, VR128:$src),
747 "movaps {$src, $dst|$dst, $src}", []>;
748 def MOVAPSrm : PSI<0x28, MRMSrcMem, (ops VR128:$dst, f128mem:$src),
749 "movaps {$src, $dst|$dst, $src}",
750 [(set VR128:$dst, (loadv4f32 addr:$src))]>;
751 def MOVAPDrr : PDI<0x28, MRMSrcReg, (ops VR128:$dst, VR128:$src),
752 "movapd {$src, $dst|$dst, $src}", []>;
753 def MOVAPDrm : PDI<0x28, MRMSrcMem, (ops VR128:$dst, f128mem:$src),
754 "movapd {$src, $dst|$dst, $src}",
755 [(set VR128:$dst, (loadv2f64 addr:$src))]>;
757 def MOVAPSmr : PSI<0x29, MRMDestMem, (ops f128mem:$dst, VR128:$src),
758 "movaps {$src, $dst|$dst, $src}",
759 [(store (v4f32 VR128:$src), addr:$dst)]>;
760 def MOVAPDmr : PDI<0x29, MRMDestMem, (ops f128mem:$dst, VR128:$src),
761 "movapd {$src, $dst|$dst, $src}",
762 [(store (v2f64 VR128:$src), addr:$dst)]>;
764 def MOVUPSrr : PSI<0x10, MRMSrcReg, (ops VR128:$dst, VR128:$src),
765 "movups {$src, $dst|$dst, $src}", []>;
766 def MOVUPSrm : PSI<0x10, MRMSrcMem, (ops VR128:$dst, f128mem:$src),
767 "movups {$src, $dst|$dst, $src}",
768 [(set VR128:$dst, (int_x86_sse_loadu_ps addr:$src))]>;
769 def MOVUPSmr : PSI<0x11, MRMDestMem, (ops f128mem:$dst, VR128:$src),
770 "movups {$src, $dst|$dst, $src}",
771 [(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>;
772 def MOVUPDrr : PDI<0x10, MRMSrcReg, (ops VR128:$dst, VR128:$src),
773 "movupd {$src, $dst|$dst, $src}", []>;
774 def MOVUPDrm : PDI<0x10, MRMSrcMem, (ops VR128:$dst, f128mem:$src),
775 "movupd {$src, $dst|$dst, $src}",
776 [(set VR128:$dst, (int_x86_sse2_loadu_pd addr:$src))]>;
777 def MOVUPDmr : PDI<0x11, MRMDestMem, (ops f128mem:$dst, VR128:$src),
778 "movupd {$src, $dst|$dst, $src}",
779 [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>;
781 let isTwoAddress = 1 in {
782 let AddedComplexity = 20 in {
783 def MOVLPSrm : PSI<0x12, MRMSrcMem, (ops VR128:$dst, VR128:$src1, f64mem:$src2),
784 "movlps {$src2, $dst|$dst, $src2}",
786 (v4f32 (vector_shuffle VR128:$src1,
787 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2)))),
788 MOVLP_shuffle_mask)))]>;
789 def MOVLPDrm : PDI<0x12, MRMSrcMem, (ops VR128:$dst, VR128:$src1, f64mem:$src2),
790 "movlpd {$src2, $dst|$dst, $src2}",
792 (v2f64 (vector_shuffle VR128:$src1,
793 (scalar_to_vector (loadf64 addr:$src2)),
794 MOVLP_shuffle_mask)))]>;
795 def MOVHPSrm : PSI<0x16, MRMSrcMem, (ops VR128:$dst, VR128:$src1, f64mem:$src2),
796 "movhps {$src2, $dst|$dst, $src2}",
798 (v4f32 (vector_shuffle VR128:$src1,
799 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2)))),
800 MOVHP_shuffle_mask)))]>;
801 def MOVHPDrm : PDI<0x16, MRMSrcMem, (ops VR128:$dst, VR128:$src1, f64mem:$src2),
802 "movhpd {$src2, $dst|$dst, $src2}",
804 (v2f64 (vector_shuffle VR128:$src1,
805 (scalar_to_vector (loadf64 addr:$src2)),
806 MOVHP_shuffle_mask)))]>;
810 def MOVLPSmr : PSI<0x13, MRMDestMem, (ops f64mem:$dst, VR128:$src),
811 "movlps {$src, $dst|$dst, $src}",
812 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
813 (iPTR 0))), addr:$dst)]>;
814 def MOVLPDmr : PDI<0x13, MRMDestMem, (ops f64mem:$dst, VR128:$src),
815 "movlpd {$src, $dst|$dst, $src}",
816 [(store (f64 (vector_extract (v2f64 VR128:$src),
817 (iPTR 0))), addr:$dst)]>;
819 // v2f64 extract element 1 is always custom lowered to unpack high to low
820 // and extract element 0 so the non-store version isn't too horrible.
821 def MOVHPSmr : PSI<0x17, MRMDestMem, (ops f64mem:$dst, VR128:$src),
822 "movhps {$src, $dst|$dst, $src}",
823 [(store (f64 (vector_extract
824 (v2f64 (vector_shuffle
825 (bc_v2f64 (v4f32 VR128:$src)), (undef),
826 UNPCKH_shuffle_mask)), (iPTR 0))),
828 def MOVHPDmr : PDI<0x17, MRMDestMem, (ops f64mem:$dst, VR128:$src),
829 "movhpd {$src, $dst|$dst, $src}",
830 [(store (f64 (vector_extract
831 (v2f64 (vector_shuffle VR128:$src, (undef),
832 UNPCKH_shuffle_mask)), (iPTR 0))),
835 let isTwoAddress = 1 in {
836 let AddedComplexity = 20 in {
837 def MOVLHPSrr : PSI<0x16, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
838 "movlhps {$src2, $dst|$dst, $src2}",
840 (v4f32 (vector_shuffle VR128:$src1, VR128:$src2,
841 MOVHP_shuffle_mask)))]>;
843 def MOVHLPSrr : PSI<0x12, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
844 "movhlps {$src2, $dst|$dst, $src2}",
846 (v4f32 (vector_shuffle VR128:$src1, VR128:$src2,
847 MOVHLPS_shuffle_mask)))]>;
851 def MOVSHDUPrr : S3SI<0x16, MRMSrcReg, (ops VR128:$dst, VR128:$src),
852 "movshdup {$src, $dst|$dst, $src}",
853 [(set VR128:$dst, (v4f32 (vector_shuffle
855 MOVSHDUP_shuffle_mask)))]>;
856 def MOVSHDUPrm : S3SI<0x16, MRMSrcMem, (ops VR128:$dst, f128mem:$src),
857 "movshdup {$src, $dst|$dst, $src}",
858 [(set VR128:$dst, (v4f32 (vector_shuffle
859 (loadv4f32 addr:$src), (undef),
860 MOVSHDUP_shuffle_mask)))]>;
862 def MOVSLDUPrr : S3SI<0x12, MRMSrcReg, (ops VR128:$dst, VR128:$src),
863 "movsldup {$src, $dst|$dst, $src}",
864 [(set VR128:$dst, (v4f32 (vector_shuffle
866 MOVSLDUP_shuffle_mask)))]>;
867 def MOVSLDUPrm : S3SI<0x12, MRMSrcMem, (ops VR128:$dst, f128mem:$src),
868 "movsldup {$src, $dst|$dst, $src}",
869 [(set VR128:$dst, (v4f32 (vector_shuffle
870 (loadv4f32 addr:$src), (undef),
871 MOVSLDUP_shuffle_mask)))]>;
873 def MOVDDUPrr : S3DI<0x12, MRMSrcReg, (ops VR128:$dst, VR128:$src),
874 "movddup {$src, $dst|$dst, $src}",
875 [(set VR128:$dst, (v2f64 (vector_shuffle
877 SSE_splat_v2_mask)))]>;
878 def MOVDDUPrm : S3DI<0x12, MRMSrcMem, (ops VR128:$dst, f64mem:$src),
879 "movddup {$src, $dst|$dst, $src}",
880 [(set VR128:$dst, (v2f64 (vector_shuffle
881 (scalar_to_vector (loadf64 addr:$src)),
883 SSE_splat_v2_mask)))]>;
885 // SSE2 instructions without OpSize prefix
886 def Int_CVTDQ2PSrr : I<0x5B, MRMSrcReg, (ops VR128:$dst, VR128:$src),
887 "cvtdq2ps {$src, $dst|$dst, $src}",
888 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
889 TB, Requires<[HasSSE2]>;
890 def Int_CVTDQ2PSrm : I<0x5B, MRMSrcMem, (ops VR128:$dst, i128mem:$src),
891 "cvtdq2ps {$src, $dst|$dst, $src}",
892 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps
893 (bc_v4i32 (loadv2i64 addr:$src))))]>,
894 TB, Requires<[HasSSE2]>;
896 // SSE2 instructions with XS prefix
897 def Int_CVTDQ2PDrr : I<0xE6, MRMSrcReg, (ops VR128:$dst, VR128:$src),
898 "cvtdq2pd {$src, $dst|$dst, $src}",
899 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
900 XS, Requires<[HasSSE2]>;
901 def Int_CVTDQ2PDrm : I<0xE6, MRMSrcMem, (ops VR128:$dst, i64mem:$src),
902 "cvtdq2pd {$src, $dst|$dst, $src}",
903 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd
904 (bc_v4i32 (loadv2i64 addr:$src))))]>,
905 XS, Requires<[HasSSE2]>;
907 def Int_CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (ops VR128:$dst, VR128:$src),
908 "cvtps2dq {$src, $dst|$dst, $src}",
909 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>;
910 def Int_CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (ops VR128:$dst, f128mem:$src),
911 "cvtps2dq {$src, $dst|$dst, $src}",
912 [(set VR128:$dst, (int_x86_sse2_cvtps2dq
913 (loadv4f32 addr:$src)))]>;
914 // SSE2 packed instructions with XS prefix
915 def Int_CVTTPS2DQrr : I<0x5B, MRMSrcReg, (ops VR128:$dst, VR128:$src),
916 "cvttps2dq {$src, $dst|$dst, $src}",
917 [(set VR128:$dst, (int_x86_sse2_cvttps2dq VR128:$src))]>,
918 XS, Requires<[HasSSE2]>;
919 def Int_CVTTPS2DQrm : I<0x5B, MRMSrcMem, (ops VR128:$dst, f128mem:$src),
920 "cvttps2dq {$src, $dst|$dst, $src}",
921 [(set VR128:$dst, (int_x86_sse2_cvttps2dq
922 (loadv4f32 addr:$src)))]>,
923 XS, Requires<[HasSSE2]>;
925 // SSE2 packed instructions with XD prefix
926 def Int_CVTPD2DQrr : I<0xE6, MRMSrcReg, (ops VR128:$dst, VR128:$src),
927 "cvtpd2dq {$src, $dst|$dst, $src}",
928 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
929 XD, Requires<[HasSSE2]>;
930 def Int_CVTPD2DQrm : I<0xE6, MRMSrcMem, (ops VR128:$dst, f128mem:$src),
931 "cvtpd2dq {$src, $dst|$dst, $src}",
932 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
933 (loadv2f64 addr:$src)))]>,
934 XD, Requires<[HasSSE2]>;
935 def Int_CVTTPD2DQrr : PDI<0xE6, MRMSrcReg, (ops VR128:$dst, VR128:$src),
936 "cvttpd2dq {$src, $dst|$dst, $src}",
937 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>;
938 def Int_CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (ops VR128:$dst, f128mem:$src),
939 "cvttpd2dq {$src, $dst|$dst, $src}",
940 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
941 (loadv2f64 addr:$src)))]>;
943 // SSE2 instructions without OpSize prefix
944 def Int_CVTPS2PDrr : I<0x5A, MRMSrcReg, (ops VR128:$dst, VR128:$src),
945 "cvtps2pd {$src, $dst|$dst, $src}",
946 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
947 TB, Requires<[HasSSE2]>;
948 def Int_CVTPS2PDrm : I<0x5A, MRMSrcReg, (ops VR128:$dst, f64mem:$src),
949 "cvtps2pd {$src, $dst|$dst, $src}",
950 [(set VR128:$dst, (int_x86_sse2_cvtps2pd
951 (loadv4f32 addr:$src)))]>,
952 TB, Requires<[HasSSE2]>;
954 def Int_CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (ops VR128:$dst, VR128:$src),
955 "cvtpd2ps {$src, $dst|$dst, $src}",
956 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
957 def Int_CVTPD2PSrm : PDI<0x5A, MRMSrcReg, (ops VR128:$dst, f128mem:$src),
958 "cvtpd2ps {$src, $dst|$dst, $src}",
959 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps
960 (loadv2f64 addr:$src)))]>;
962 // Match intrinsics which expect XMM operand(s).
963 // Aliases for intrinsics
964 let isTwoAddress = 1 in {
965 def Int_CVTSI2SDrr: SDI<0x2A, MRMSrcReg,
966 (ops VR128:$dst, VR128:$src1, GR32:$src2),
967 "cvtsi2sd {$src2, $dst|$dst, $src2}",
968 [(set VR128:$dst, (int_x86_sse2_cvtsi2sd VR128:$src1,
970 def Int_CVTSI2SDrm: SDI<0x2A, MRMSrcMem,
971 (ops VR128:$dst, VR128:$src1, i32mem:$src2),
972 "cvtsi2sd {$src2, $dst|$dst, $src2}",
973 [(set VR128:$dst, (int_x86_sse2_cvtsi2sd VR128:$src1,
974 (loadi32 addr:$src2)))]>;
975 def Int_CVTSD2SSrr: SDI<0x5A, MRMSrcReg,
976 (ops VR128:$dst, VR128:$src1, VR128:$src2),
977 "cvtsd2ss {$src2, $dst|$dst, $src2}",
978 [(set VR128:$dst, (int_x86_sse2_cvtsd2ss VR128:$src1,
980 def Int_CVTSD2SSrm: SDI<0x5A, MRMSrcMem,
981 (ops VR128:$dst, VR128:$src1, f64mem:$src2),
982 "cvtsd2ss {$src2, $dst|$dst, $src2}",
983 [(set VR128:$dst, (int_x86_sse2_cvtsd2ss VR128:$src1,
984 (loadv2f64 addr:$src2)))]>;
985 def Int_CVTSS2SDrr: I<0x5A, MRMSrcReg,
986 (ops VR128:$dst, VR128:$src1, VR128:$src2),
987 "cvtss2sd {$src2, $dst|$dst, $src2}",
988 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
991 def Int_CVTSS2SDrm: I<0x5A, MRMSrcMem,
992 (ops VR128:$dst, VR128:$src1, f32mem:$src2),
993 "cvtss2sd {$src2, $dst|$dst, $src2}",
994 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
995 (loadv4f32 addr:$src2)))]>, XS,
1000 let isTwoAddress = 1 in {
1001 let isCommutable = 1 in {
1002 def ADDPSrr : PSI<0x58, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
1003 "addps {$src2, $dst|$dst, $src2}",
1004 [(set VR128:$dst, (v4f32 (fadd VR128:$src1, VR128:$src2)))]>;
1005 def ADDPDrr : PDI<0x58, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
1006 "addpd {$src2, $dst|$dst, $src2}",
1007 [(set VR128:$dst, (v2f64 (fadd VR128:$src1, VR128:$src2)))]>;
1008 def MULPSrr : PSI<0x59, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
1009 "mulps {$src2, $dst|$dst, $src2}",
1010 [(set VR128:$dst, (v4f32 (fmul VR128:$src1, VR128:$src2)))]>;
1011 def MULPDrr : PDI<0x59, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
1012 "mulpd {$src2, $dst|$dst, $src2}",
1013 [(set VR128:$dst, (v2f64 (fmul VR128:$src1, VR128:$src2)))]>;
1016 def ADDPSrm : PSI<0x58, MRMSrcMem, (ops VR128:$dst, VR128:$src1, f128mem:$src2),
1017 "addps {$src2, $dst|$dst, $src2}",
1018 [(set VR128:$dst, (v4f32 (fadd VR128:$src1,
1019 (load addr:$src2))))]>;
1020 def ADDPDrm : PDI<0x58, MRMSrcMem, (ops VR128:$dst, VR128:$src1, f128mem:$src2),
1021 "addpd {$src2, $dst|$dst, $src2}",
1022 [(set VR128:$dst, (v2f64 (fadd VR128:$src1,
1023 (load addr:$src2))))]>;
1024 def MULPSrm : PSI<0x59, MRMSrcMem, (ops VR128:$dst, VR128:$src1, f128mem:$src2),
1025 "mulps {$src2, $dst|$dst, $src2}",
1026 [(set VR128:$dst, (v4f32 (fmul VR128:$src1,
1027 (load addr:$src2))))]>;
1028 def MULPDrm : PDI<0x59, MRMSrcMem, (ops VR128:$dst, VR128:$src1, f128mem:$src2),
1029 "mulpd {$src2, $dst|$dst, $src2}",
1030 [(set VR128:$dst, (v2f64 (fmul VR128:$src1,
1031 (load addr:$src2))))]>;
1033 def DIVPSrr : PSI<0x5E, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
1034 "divps {$src2, $dst|$dst, $src2}",
1035 [(set VR128:$dst, (v4f32 (fdiv VR128:$src1, VR128:$src2)))]>;
1036 def DIVPSrm : PSI<0x5E, MRMSrcMem, (ops VR128:$dst, VR128:$src1, f128mem:$src2),
1037 "divps {$src2, $dst|$dst, $src2}",
1038 [(set VR128:$dst, (v4f32 (fdiv VR128:$src1,
1039 (load addr:$src2))))]>;
1040 def DIVPDrr : PDI<0x5E, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
1041 "divpd {$src2, $dst|$dst, $src2}",
1042 [(set VR128:$dst, (v2f64 (fdiv VR128:$src1, VR128:$src2)))]>;
1043 def DIVPDrm : PDI<0x5E, MRMSrcMem, (ops VR128:$dst, VR128:$src1, f128mem:$src2),
1044 "divpd {$src2, $dst|$dst, $src2}",
1045 [(set VR128:$dst, (v2f64 (fdiv VR128:$src1,
1046 (load addr:$src2))))]>;
1048 def SUBPSrr : PSI<0x5C, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
1049 "subps {$src2, $dst|$dst, $src2}",
1050 [(set VR128:$dst, (v4f32 (fsub VR128:$src1, VR128:$src2)))]>;
1051 def SUBPSrm : PSI<0x5C, MRMSrcMem, (ops VR128:$dst, VR128:$src1, f128mem:$src2),
1052 "subps {$src2, $dst|$dst, $src2}",
1053 [(set VR128:$dst, (v4f32 (fsub VR128:$src1,
1054 (load addr:$src2))))]>;
1055 def SUBPDrr : PDI<0x5C, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
1056 "subpd {$src2, $dst|$dst, $src2}",
1057 [(set VR128:$dst, (v2f64 (fsub VR128:$src1, VR128:$src2)))]>;
1058 def SUBPDrm : PDI<0x5C, MRMSrcMem, (ops VR128:$dst, VR128:$src1, f128mem:$src2),
1059 "subpd {$src2, $dst|$dst, $src2}",
1060 [(set VR128:$dst, (v2f64 (fsub VR128:$src1,
1061 (load addr:$src2))))]>;
1063 def ADDSUBPSrr : S3DI<0xD0, MRMSrcReg,
1064 (ops VR128:$dst, VR128:$src1, VR128:$src2),
1065 "addsubps {$src2, $dst|$dst, $src2}",
1066 [(set VR128:$dst, (int_x86_sse3_addsub_ps VR128:$src1,
1068 def ADDSUBPSrm : S3DI<0xD0, MRMSrcMem,
1069 (ops VR128:$dst, VR128:$src1, f128mem:$src2),
1070 "addsubps {$src2, $dst|$dst, $src2}",
1071 [(set VR128:$dst, (int_x86_sse3_addsub_ps VR128:$src1,
1072 (loadv4f32 addr:$src2)))]>;
1073 def ADDSUBPDrr : S3I<0xD0, MRMSrcReg,
1074 (ops VR128:$dst, VR128:$src1, VR128:$src2),
1075 "addsubpd {$src2, $dst|$dst, $src2}",
1076 [(set VR128:$dst, (int_x86_sse3_addsub_pd VR128:$src1,
1078 def ADDSUBPDrm : S3I<0xD0, MRMSrcMem,
1079 (ops VR128:$dst, VR128:$src1, f128mem:$src2),
1080 "addsubpd {$src2, $dst|$dst, $src2}",
1081 [(set VR128:$dst, (int_x86_sse3_addsub_pd VR128:$src1,
1082 (loadv2f64 addr:$src2)))]>;
1085 def SQRTPSr : PS_Intr<0x51, "sqrtps {$src, $dst|$dst, $src}",
1086 int_x86_sse_sqrt_ps>;
1087 def SQRTPSm : PS_Intm<0x51, "sqrtps {$src, $dst|$dst, $src}",
1088 int_x86_sse_sqrt_ps>;
1089 def SQRTPDr : PD_Intr<0x51, "sqrtpd {$src, $dst|$dst, $src}",
1090 int_x86_sse2_sqrt_pd>;
1091 def SQRTPDm : PD_Intm<0x51, "sqrtpd {$src, $dst|$dst, $src}",
1092 int_x86_sse2_sqrt_pd>;
1094 def RSQRTPSr : PS_Intr<0x52, "rsqrtps {$src, $dst|$dst, $src}",
1095 int_x86_sse_rsqrt_ps>;
1096 def RSQRTPSm : PS_Intm<0x52, "rsqrtps {$src, $dst|$dst, $src}",
1097 int_x86_sse_rsqrt_ps>;
1098 def RCPPSr : PS_Intr<0x53, "rcpps {$src, $dst|$dst, $src}",
1099 int_x86_sse_rcp_ps>;
1100 def RCPPSm : PS_Intm<0x53, "rcpps {$src, $dst|$dst, $src}",
1101 int_x86_sse_rcp_ps>;
1103 let isTwoAddress = 1 in {
1104 let isCommutable = 1 in {
1105 def MAXPSrr : PS_Intrr<0x5F, "maxps {$src2, $dst|$dst, $src2}",
1106 int_x86_sse_max_ps>;
1107 def MAXPDrr : PD_Intrr<0x5F, "maxpd {$src2, $dst|$dst, $src2}",
1108 int_x86_sse2_max_pd>;
1109 def MINPSrr : PS_Intrr<0x5D, "minps {$src2, $dst|$dst, $src2}",
1110 int_x86_sse_min_ps>;
1111 def MINPDrr : PD_Intrr<0x5D, "minpd {$src2, $dst|$dst, $src2}",
1112 int_x86_sse2_min_pd>;
1114 def MAXPSrm : PS_Intrm<0x5F, "maxps {$src2, $dst|$dst, $src2}",
1115 int_x86_sse_max_ps>;
1116 def MAXPDrm : PD_Intrm<0x5F, "maxpd {$src2, $dst|$dst, $src2}",
1117 int_x86_sse2_max_pd>;
1118 def MINPSrm : PS_Intrm<0x5D, "minps {$src2, $dst|$dst, $src2}",
1119 int_x86_sse_min_ps>;
1120 def MINPDrm : PD_Intrm<0x5D, "minpd {$src2, $dst|$dst, $src2}",
1121 int_x86_sse2_min_pd>;
1125 let isTwoAddress = 1 in {
1126 let isCommutable = 1 in {
1127 def ANDPSrr : PSI<0x54, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
1128 "andps {$src2, $dst|$dst, $src2}",
1129 [(set VR128:$dst, (v2i64 (and VR128:$src1, VR128:$src2)))]>;
1130 def ANDPDrr : PDI<0x54, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
1131 "andpd {$src2, $dst|$dst, $src2}",
1133 (and (bc_v2i64 (v2f64 VR128:$src1)),
1134 (bc_v2i64 (v2f64 VR128:$src2))))]>;
1135 def ORPSrr : PSI<0x56, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
1136 "orps {$src2, $dst|$dst, $src2}",
1137 [(set VR128:$dst, (v2i64 (or VR128:$src1, VR128:$src2)))]>;
1138 def ORPDrr : PDI<0x56, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
1139 "orpd {$src2, $dst|$dst, $src2}",
1141 (or (bc_v2i64 (v2f64 VR128:$src1)),
1142 (bc_v2i64 (v2f64 VR128:$src2))))]>;
1143 def XORPSrr : PSI<0x57, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
1144 "xorps {$src2, $dst|$dst, $src2}",
1145 [(set VR128:$dst, (v2i64 (xor VR128:$src1, VR128:$src2)))]>;
1146 def XORPDrr : PDI<0x57, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
1147 "xorpd {$src2, $dst|$dst, $src2}",
1149 (xor (bc_v2i64 (v2f64 VR128:$src1)),
1150 (bc_v2i64 (v2f64 VR128:$src2))))]>;
1152 def ANDPSrm : PSI<0x54, MRMSrcMem, (ops VR128:$dst, VR128:$src1, f128mem:$src2),
1153 "andps {$src2, $dst|$dst, $src2}",
1154 [(set VR128:$dst, (and VR128:$src1,
1155 (bc_v2i64 (loadv4f32 addr:$src2))))]>;
1156 def ANDPDrm : PDI<0x54, MRMSrcMem, (ops VR128:$dst, VR128:$src1, f128mem:$src2),
1157 "andpd {$src2, $dst|$dst, $src2}",
1159 (and (bc_v2i64 (v2f64 VR128:$src1)),
1160 (bc_v2i64 (loadv2f64 addr:$src2))))]>;
1161 def ORPSrm : PSI<0x56, MRMSrcMem, (ops VR128:$dst, VR128:$src1, f128mem:$src2),
1162 "orps {$src2, $dst|$dst, $src2}",
1163 [(set VR128:$dst, (or VR128:$src1,
1164 (bc_v2i64 (loadv4f32 addr:$src2))))]>;
1165 def ORPDrm : PDI<0x56, MRMSrcMem, (ops VR128:$dst, VR128:$src1, f128mem:$src2),
1166 "orpd {$src2, $dst|$dst, $src2}",
1168 (or (bc_v2i64 (v2f64 VR128:$src1)),
1169 (bc_v2i64 (loadv2f64 addr:$src2))))]>;
1170 def XORPSrm : PSI<0x57, MRMSrcMem, (ops VR128:$dst, VR128:$src1, f128mem:$src2),
1171 "xorps {$src2, $dst|$dst, $src2}",
1172 [(set VR128:$dst, (xor VR128:$src1,
1173 (bc_v2i64 (loadv4f32 addr:$src2))))]>;
1174 def XORPDrm : PDI<0x57, MRMSrcMem, (ops VR128:$dst, VR128:$src1, f128mem:$src2),
1175 "xorpd {$src2, $dst|$dst, $src2}",
1177 (xor (bc_v2i64 (v2f64 VR128:$src1)),
1178 (bc_v2i64 (loadv2f64 addr:$src2))))]>;
1179 def ANDNPSrr : PSI<0x55, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
1180 "andnps {$src2, $dst|$dst, $src2}",
1181 [(set VR128:$dst, (v2i64 (and (xor VR128:$src1,
1182 (bc_v2i64 (v4i32 immAllOnesV))),
1184 def ANDNPSrm : PSI<0x55, MRMSrcMem, (ops VR128:$dst, VR128:$src1,f128mem:$src2),
1185 "andnps {$src2, $dst|$dst, $src2}",
1186 [(set VR128:$dst, (v2i64 (and (xor VR128:$src1,
1187 (bc_v2i64 (v4i32 immAllOnesV))),
1188 (bc_v2i64 (loadv4f32 addr:$src2)))))]>;
1189 def ANDNPDrr : PDI<0x55, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
1190 "andnpd {$src2, $dst|$dst, $src2}",
1192 (and (vnot (bc_v2i64 (v2f64 VR128:$src1))),
1193 (bc_v2i64 (v2f64 VR128:$src2))))]>;
1194 def ANDNPDrm : PDI<0x55, MRMSrcMem, (ops VR128:$dst, VR128:$src1,f128mem:$src2),
1195 "andnpd {$src2, $dst|$dst, $src2}",
1197 (and (vnot (bc_v2i64 (v2f64 VR128:$src1))),
1198 (bc_v2i64 (loadv2f64 addr:$src2))))]>;
1201 let isTwoAddress = 1 in {
1202 def CMPPSrri : PSIi8<0xC2, MRMSrcReg,
1203 (ops VR128:$dst, VR128:$src1, VR128:$src, SSECC:$cc),
1204 "cmp${cc}ps {$src, $dst|$dst, $src}",
1205 [(set VR128:$dst, (int_x86_sse_cmp_ps VR128:$src1,
1206 VR128:$src, imm:$cc))]>;
1207 def CMPPSrmi : PSIi8<0xC2, MRMSrcMem,
1208 (ops VR128:$dst, VR128:$src1, f128mem:$src, SSECC:$cc),
1209 "cmp${cc}ps {$src, $dst|$dst, $src}",
1210 [(set VR128:$dst, (int_x86_sse_cmp_ps VR128:$src1,
1211 (load addr:$src), imm:$cc))]>;
1212 def CMPPDrri : PDIi8<0xC2, MRMSrcReg,
1213 (ops VR128:$dst, VR128:$src1, VR128:$src, SSECC:$cc),
1214 "cmp${cc}pd {$src, $dst|$dst, $src}",
1215 [(set VR128:$dst, (int_x86_sse2_cmp_pd VR128:$src1,
1216 VR128:$src, imm:$cc))]>;
1217 def CMPPDrmi : PDIi8<0xC2, MRMSrcMem,
1218 (ops VR128:$dst, VR128:$src1, f128mem:$src, SSECC:$cc),
1219 "cmp${cc}pd {$src, $dst|$dst, $src}",
1220 [(set VR128:$dst, (int_x86_sse2_cmp_pd VR128:$src1,
1221 (load addr:$src), imm:$cc))]>;
1224 // Shuffle and unpack instructions
1225 let isTwoAddress = 1 in {
1226 let isConvertibleToThreeAddress = 1 in // Convert to pshufd
1227 def SHUFPSrri : PSIi8<0xC6, MRMSrcReg,
1228 (ops VR128:$dst, VR128:$src1, VR128:$src2, i32i8imm:$src3),
1229 "shufps {$src3, $src2, $dst|$dst, $src2, $src3}",
1230 [(set VR128:$dst, (v4f32 (vector_shuffle
1231 VR128:$src1, VR128:$src2,
1232 SHUFP_shuffle_mask:$src3)))]>;
1233 def SHUFPSrmi : PSIi8<0xC6, MRMSrcMem,
1234 (ops VR128:$dst, VR128:$src1, f128mem:$src2, i32i8imm:$src3),
1235 "shufps {$src3, $src2, $dst|$dst, $src2, $src3}",
1236 [(set VR128:$dst, (v4f32 (vector_shuffle
1237 VR128:$src1, (load addr:$src2),
1238 SHUFP_shuffle_mask:$src3)))]>;
1239 def SHUFPDrri : PDIi8<0xC6, MRMSrcReg,
1240 (ops VR128:$dst, VR128:$src1, VR128:$src2, i8imm:$src3),
1241 "shufpd {$src3, $src2, $dst|$dst, $src2, $src3}",
1242 [(set VR128:$dst, (v2f64 (vector_shuffle
1243 VR128:$src1, VR128:$src2,
1244 SHUFP_shuffle_mask:$src3)))]>;
1245 def SHUFPDrmi : PDIi8<0xC6, MRMSrcMem,
1246 (ops VR128:$dst, VR128:$src1, f128mem:$src2, i8imm:$src3),
1247 "shufpd {$src3, $src2, $dst|$dst, $src2, $src3}",
1248 [(set VR128:$dst, (v2f64 (vector_shuffle
1249 VR128:$src1, (load addr:$src2),
1250 SHUFP_shuffle_mask:$src3)))]>;
1252 let AddedComplexity = 10 in {
1253 def UNPCKHPSrr : PSI<0x15, MRMSrcReg,
1254 (ops VR128:$dst, VR128:$src1, VR128:$src2),
1255 "unpckhps {$src2, $dst|$dst, $src2}",
1256 [(set VR128:$dst, (v4f32 (vector_shuffle
1257 VR128:$src1, VR128:$src2,
1258 UNPCKH_shuffle_mask)))]>;
1259 def UNPCKHPSrm : PSI<0x15, MRMSrcMem,
1260 (ops VR128:$dst, VR128:$src1, f128mem:$src2),
1261 "unpckhps {$src2, $dst|$dst, $src2}",
1262 [(set VR128:$dst, (v4f32 (vector_shuffle
1263 VR128:$src1, (load addr:$src2),
1264 UNPCKH_shuffle_mask)))]>;
1265 def UNPCKHPDrr : PDI<0x15, MRMSrcReg,
1266 (ops VR128:$dst, VR128:$src1, VR128:$src2),
1267 "unpckhpd {$src2, $dst|$dst, $src2}",
1268 [(set VR128:$dst, (v2f64 (vector_shuffle
1269 VR128:$src1, VR128:$src2,
1270 UNPCKH_shuffle_mask)))]>;
1271 def UNPCKHPDrm : PDI<0x15, MRMSrcMem,
1272 (ops VR128:$dst, VR128:$src1, f128mem:$src2),
1273 "unpckhpd {$src2, $dst|$dst, $src2}",
1274 [(set VR128:$dst, (v2f64 (vector_shuffle
1275 VR128:$src1, (load addr:$src2),
1276 UNPCKH_shuffle_mask)))]>;
1278 def UNPCKLPSrr : PSI<0x14, MRMSrcReg,
1279 (ops VR128:$dst, VR128:$src1, VR128:$src2),
1280 "unpcklps {$src2, $dst|$dst, $src2}",
1281 [(set VR128:$dst, (v4f32 (vector_shuffle
1282 VR128:$src1, VR128:$src2,
1283 UNPCKL_shuffle_mask)))]>;
1284 def UNPCKLPSrm : PSI<0x14, MRMSrcMem,
1285 (ops VR128:$dst, VR128:$src1, f128mem:$src2),
1286 "unpcklps {$src2, $dst|$dst, $src2}",
1287 [(set VR128:$dst, (v4f32 (vector_shuffle
1288 VR128:$src1, (load addr:$src2),
1289 UNPCKL_shuffle_mask)))]>;
1290 def UNPCKLPDrr : PDI<0x14, MRMSrcReg,
1291 (ops VR128:$dst, VR128:$src1, VR128:$src2),
1292 "unpcklpd {$src2, $dst|$dst, $src2}",
1293 [(set VR128:$dst, (v2f64 (vector_shuffle
1294 VR128:$src1, VR128:$src2,
1295 UNPCKL_shuffle_mask)))]>;
1296 def UNPCKLPDrm : PDI<0x14, MRMSrcMem,
1297 (ops VR128:$dst, VR128:$src1, f128mem:$src2),
1298 "unpcklpd {$src2, $dst|$dst, $src2}",
1299 [(set VR128:$dst, (v2f64 (vector_shuffle
1300 VR128:$src1, (load addr:$src2),
1301 UNPCKL_shuffle_mask)))]>;
1302 } // AddedComplexity
1306 let isTwoAddress = 1 in {
1307 def HADDPSrr : S3D_Intrr<0x7C, "haddps {$src2, $dst|$dst, $src2}",
1308 int_x86_sse3_hadd_ps>;
1309 def HADDPSrm : S3D_Intrm<0x7C, "haddps {$src2, $dst|$dst, $src2}",
1310 int_x86_sse3_hadd_ps>;
1311 def HADDPDrr : S3_Intrr<0x7C, "haddpd {$src2, $dst|$dst, $src2}",
1312 int_x86_sse3_hadd_pd>;
1313 def HADDPDrm : S3_Intrm<0x7C, "haddpd {$src2, $dst|$dst, $src2}",
1314 int_x86_sse3_hadd_pd>;
1315 def HSUBPSrr : S3D_Intrr<0x7D, "hsubps {$src2, $dst|$dst, $src2}",
1316 int_x86_sse3_hsub_ps>;
1317 def HSUBPSrm : S3D_Intrm<0x7D, "hsubps {$src2, $dst|$dst, $src2}",
1318 int_x86_sse3_hsub_ps>;
1319 def HSUBPDrr : S3_Intrr<0x7D, "hsubpd {$src2, $dst|$dst, $src2}",
1320 int_x86_sse3_hsub_pd>;
1321 def HSUBPDrm : S3_Intrm<0x7D, "hsubpd {$src2, $dst|$dst, $src2}",
1322 int_x86_sse3_hsub_pd>;
1325 //===----------------------------------------------------------------------===//
1326 // SSE integer instructions
1327 //===----------------------------------------------------------------------===//
1329 // Move Instructions
1330 def MOVDQArr : PDI<0x6F, MRMSrcReg, (ops VR128:$dst, VR128:$src),
1331 "movdqa {$src, $dst|$dst, $src}", []>;
1332 def MOVDQArm : PDI<0x6F, MRMSrcMem, (ops VR128:$dst, i128mem:$src),
1333 "movdqa {$src, $dst|$dst, $src}",
1334 [(set VR128:$dst, (loadv2i64 addr:$src))]>;
1335 def MOVDQAmr : PDI<0x7F, MRMDestMem, (ops i128mem:$dst, VR128:$src),
1336 "movdqa {$src, $dst|$dst, $src}",
1337 [(store (v2i64 VR128:$src), addr:$dst)]>;
1338 def MOVDQUrm : I<0x6F, MRMSrcMem, (ops VR128:$dst, i128mem:$src),
1339 "movdqu {$src, $dst|$dst, $src}",
1340 [(set VR128:$dst, (int_x86_sse2_loadu_dq addr:$src))]>,
1341 XS, Requires<[HasSSE2]>;
1342 def MOVDQUmr : I<0x7F, MRMDestMem, (ops i128mem:$dst, VR128:$src),
1343 "movdqu {$src, $dst|$dst, $src}",
1344 [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
1345 XS, Requires<[HasSSE2]>;
1346 def LDDQUrm : S3DI<0xF0, MRMSrcMem, (ops VR128:$dst, i128mem:$src),
1347 "lddqu {$src, $dst|$dst, $src}",
1348 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>;
1350 // 128-bit Integer Arithmetic
1351 let isTwoAddress = 1 in {
1352 let isCommutable = 1 in {
1353 def PADDBrr : PDI<0xFC, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
1354 "paddb {$src2, $dst|$dst, $src2}",
1355 [(set VR128:$dst, (v16i8 (add VR128:$src1, VR128:$src2)))]>;
1356 def PADDWrr : PDI<0xFD, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
1357 "paddw {$src2, $dst|$dst, $src2}",
1358 [(set VR128:$dst, (v8i16 (add VR128:$src1, VR128:$src2)))]>;
1359 def PADDDrr : PDI<0xFE, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
1360 "paddd {$src2, $dst|$dst, $src2}",
1361 [(set VR128:$dst, (v4i32 (add VR128:$src1, VR128:$src2)))]>;
1363 def PADDQrr : PDI<0xD4, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
1364 "paddq {$src2, $dst|$dst, $src2}",
1365 [(set VR128:$dst, (v2i64 (add VR128:$src1, VR128:$src2)))]>;
1367 def PADDBrm : PDI<0xFC, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2),
1368 "paddb {$src2, $dst|$dst, $src2}",
1369 [(set VR128:$dst, (add VR128:$src1,
1370 (bc_v16i8 (loadv2i64 addr:$src2))))]>;
1371 def PADDWrm : PDI<0xFD, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2),
1372 "paddw {$src2, $dst|$dst, $src2}",
1373 [(set VR128:$dst, (add VR128:$src1,
1374 (bc_v8i16 (loadv2i64 addr:$src2))))]>;
1375 def PADDDrm : PDI<0xFE, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2),
1376 "paddd {$src2, $dst|$dst, $src2}",
1377 [(set VR128:$dst, (add VR128:$src1,
1378 (bc_v4i32 (loadv2i64 addr:$src2))))]>;
1379 def PADDQrm : PDI<0xD4, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2),
1380 "paddd {$src2, $dst|$dst, $src2}",
1381 [(set VR128:$dst, (add VR128:$src1,
1382 (loadv2i64 addr:$src2)))]>;
1384 let isCommutable = 1 in {
1385 def PADDSBrr : PDI<0xEC, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
1386 "paddsb {$src2, $dst|$dst, $src2}",
1387 [(set VR128:$dst, (int_x86_sse2_padds_b VR128:$src1,
1389 def PADDSWrr : PDI<0xED, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
1390 "paddsw {$src2, $dst|$dst, $src2}",
1391 [(set VR128:$dst, (int_x86_sse2_padds_w VR128:$src1,
1393 def PADDUSBrr : PDI<0xDC, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
1394 "paddusb {$src2, $dst|$dst, $src2}",
1395 [(set VR128:$dst, (int_x86_sse2_paddus_b VR128:$src1,
1397 def PADDUSWrr : PDI<0xDD, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
1398 "paddusw {$src2, $dst|$dst, $src2}",
1399 [(set VR128:$dst, (int_x86_sse2_paddus_w VR128:$src1,
1402 def PADDSBrm : PDI<0xEC, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2),
1403 "paddsb {$src2, $dst|$dst, $src2}",
1404 [(set VR128:$dst, (int_x86_sse2_padds_b VR128:$src1,
1405 (bc_v16i8 (loadv2i64 addr:$src2))))]>;
1406 def PADDSWrm : PDI<0xED, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2),
1407 "paddsw {$src2, $dst|$dst, $src2}",
1408 [(set VR128:$dst, (int_x86_sse2_padds_w VR128:$src1,
1409 (bc_v8i16 (loadv2i64 addr:$src2))))]>;
1410 def PADDUSBrm : PDI<0xDC, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2),
1411 "paddusb {$src2, $dst|$dst, $src2}",
1412 [(set VR128:$dst, (int_x86_sse2_paddus_b VR128:$src1,
1413 (bc_v16i8 (loadv2i64 addr:$src2))))]>;
1414 def PADDUSWrm : PDI<0xDD, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2),
1415 "paddusw {$src2, $dst|$dst, $src2}",
1416 [(set VR128:$dst, (int_x86_sse2_paddus_w VR128:$src1,
1417 (bc_v8i16 (loadv2i64 addr:$src2))))]>;
1420 def PSUBBrr : PDI<0xF8, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
1421 "psubb {$src2, $dst|$dst, $src2}",
1422 [(set VR128:$dst, (v16i8 (sub VR128:$src1, VR128:$src2)))]>;
1423 def PSUBWrr : PDI<0xF9, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
1424 "psubw {$src2, $dst|$dst, $src2}",
1425 [(set VR128:$dst, (v8i16 (sub VR128:$src1, VR128:$src2)))]>;
1426 def PSUBDrr : PDI<0xFA, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
1427 "psubd {$src2, $dst|$dst, $src2}",
1428 [(set VR128:$dst, (v4i32 (sub VR128:$src1, VR128:$src2)))]>;
1429 def PSUBQrr : PDI<0xFB, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
1430 "psubq {$src2, $dst|$dst, $src2}",
1431 [(set VR128:$dst, (v2i64 (sub VR128:$src1, VR128:$src2)))]>;
1433 def PSUBBrm : PDI<0xF8, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2),
1434 "psubb {$src2, $dst|$dst, $src2}",
1435 [(set VR128:$dst, (sub VR128:$src1,
1436 (bc_v16i8 (loadv2i64 addr:$src2))))]>;
1437 def PSUBWrm : PDI<0xF9, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2),
1438 "psubw {$src2, $dst|$dst, $src2}",
1439 [(set VR128:$dst, (sub VR128:$src1,
1440 (bc_v8i16 (loadv2i64 addr:$src2))))]>;
1441 def PSUBDrm : PDI<0xFA, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2),
1442 "psubd {$src2, $dst|$dst, $src2}",
1443 [(set VR128:$dst, (sub VR128:$src1,
1444 (bc_v4i32 (loadv2i64 addr:$src2))))]>;
1445 def PSUBQrm : PDI<0xFB, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2),
1446 "psubd {$src2, $dst|$dst, $src2}",
1447 [(set VR128:$dst, (sub VR128:$src1,
1448 (loadv2i64 addr:$src2)))]>;
1450 def PSUBSBrr : PDI<0xE8, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
1451 "psubsb {$src2, $dst|$dst, $src2}",
1452 [(set VR128:$dst, (int_x86_sse2_psubs_b VR128:$src1,
1454 def PSUBSWrr : PDI<0xE9, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
1455 "psubsw {$src2, $dst|$dst, $src2}",
1456 [(set VR128:$dst, (int_x86_sse2_psubs_w VR128:$src1,
1458 def PSUBUSBrr : PDI<0xD8, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
1459 "psubusb {$src2, $dst|$dst, $src2}",
1460 [(set VR128:$dst, (int_x86_sse2_psubus_b VR128:$src1,
1462 def PSUBUSWrr : PDI<0xD9, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
1463 "psubusw {$src2, $dst|$dst, $src2}",
1464 [(set VR128:$dst, (int_x86_sse2_psubus_w VR128:$src1,
1467 def PSUBSBrm : PDI<0xE8, MRMSrcMem,
1468 (ops VR128:$dst, VR128:$src1, i128mem:$src2),
1469 "psubsb {$src2, $dst|$dst, $src2}",
1470 [(set VR128:$dst, (int_x86_sse2_psubs_b VR128:$src1,
1471 (bc_v16i8 (loadv2i64 addr:$src2))))]>;
1472 def PSUBSWrm : PDI<0xE9, MRMSrcMem,
1473 (ops VR128:$dst, VR128:$src1, i128mem:$src2),
1474 "psubsw {$src2, $dst|$dst, $src2}",
1475 [(set VR128:$dst, (int_x86_sse2_psubs_w VR128:$src1,
1476 (bc_v8i16 (loadv2i64 addr:$src2))))]>;
1477 def PSUBUSBrm : PDI<0xD8, MRMSrcMem,
1478 (ops VR128:$dst, VR128:$src1, i128mem:$src2),
1479 "psubusb {$src2, $dst|$dst, $src2}",
1480 [(set VR128:$dst, (int_x86_sse2_psubus_b VR128:$src1,
1481 (bc_v16i8 (loadv2i64 addr:$src2))))]>;
1482 def PSUBUSWrm : PDI<0xD9, MRMSrcMem,
1483 (ops VR128:$dst, VR128:$src1, i128mem:$src2),
1484 "psubusw {$src2, $dst|$dst, $src2}",
1485 [(set VR128:$dst, (int_x86_sse2_psubus_w VR128:$src1,
1486 (bc_v8i16 (loadv2i64 addr:$src2))))]>;
1488 let isCommutable = 1 in {
1489 def PMULHUWrr : PDI<0xE4, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
1490 "pmulhuw {$src2, $dst|$dst, $src2}",
1491 [(set VR128:$dst, (int_x86_sse2_pmulhu_w VR128:$src1,
1493 def PMULHWrr : PDI<0xE5, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
1494 "pmulhw {$src2, $dst|$dst, $src2}",
1495 [(set VR128:$dst, (int_x86_sse2_pmulh_w VR128:$src1,
1497 def PMULLWrr : PDI<0xD5, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
1498 "pmullw {$src2, $dst|$dst, $src2}",
1499 [(set VR128:$dst, (v8i16 (mul VR128:$src1, VR128:$src2)))]>;
1500 def PMULUDQrr : PDI<0xF4, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
1501 "pmuludq {$src2, $dst|$dst, $src2}",
1502 [(set VR128:$dst, (int_x86_sse2_pmulu_dq VR128:$src1,
1505 def PMULHUWrm : PDI<0xE4, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2),
1506 "pmulhuw {$src2, $dst|$dst, $src2}",
1507 [(set VR128:$dst, (int_x86_sse2_pmulhu_w VR128:$src1,
1508 (bc_v8i16 (loadv2i64 addr:$src2))))]>;
1509 def PMULHWrm : PDI<0xE5, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2),
1510 "pmulhw {$src2, $dst|$dst, $src2}",
1511 [(set VR128:$dst, (int_x86_sse2_pmulh_w VR128:$src1,
1512 (bc_v8i16 (loadv2i64 addr:$src2))))]>;
1513 def PMULLWrm : PDI<0xD5, MRMSrcMem,
1514 (ops VR128:$dst, VR128:$src1, i128mem:$src2),
1515 "pmullw {$src2, $dst|$dst, $src2}",
1516 [(set VR128:$dst, (v8i16 (mul VR128:$src1,
1517 (bc_v8i16 (loadv2i64 addr:$src2)))))]>;
1518 def PMULUDQrm : PDI<0xF4, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2),
1519 "pmuludq {$src2, $dst|$dst, $src2}",
1520 [(set VR128:$dst, (int_x86_sse2_pmulu_dq VR128:$src1,
1521 (bc_v4i32 (loadv2i64 addr:$src2))))]>;
1523 let isCommutable = 1 in {
1524 def PMADDWDrr : PDI<0xF5, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
1525 "pmaddwd {$src2, $dst|$dst, $src2}",
1526 [(set VR128:$dst, (int_x86_sse2_pmadd_wd VR128:$src1,
1529 def PMADDWDrm : PDI<0xF5, MRMSrcMem,
1530 (ops VR128:$dst, VR128:$src1, i128mem:$src2),
1531 "pmaddwd {$src2, $dst|$dst, $src2}",
1532 [(set VR128:$dst, (int_x86_sse2_pmadd_wd VR128:$src1,
1533 (bc_v8i16 (loadv2i64 addr:$src2))))]>;
1535 let isCommutable = 1 in {
1536 def PAVGBrr : PDI<0xE0, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
1537 "pavgb {$src2, $dst|$dst, $src2}",
1538 [(set VR128:$dst, (int_x86_sse2_pavg_b VR128:$src1,
1540 def PAVGWrr : PDI<0xE3, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
1541 "pavgw {$src2, $dst|$dst, $src2}",
1542 [(set VR128:$dst, (int_x86_sse2_pavg_w VR128:$src1,
1545 def PAVGBrm : PDI<0xE0, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2),
1546 "pavgb {$src2, $dst|$dst, $src2}",
1547 [(set VR128:$dst, (int_x86_sse2_pavg_b VR128:$src1,
1548 (bc_v16i8 (loadv2i64 addr:$src2))))]>;
1549 def PAVGWrm : PDI<0xE3, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2),
1550 "pavgw {$src2, $dst|$dst, $src2}",
1551 [(set VR128:$dst, (int_x86_sse2_pavg_w VR128:$src1,
1552 (bc_v8i16 (loadv2i64 addr:$src2))))]>;
1554 let isCommutable = 1 in {
1555 def PMAXUBrr : PDI<0xDE, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
1556 "pmaxub {$src2, $dst|$dst, $src2}",
1557 [(set VR128:$dst, (int_x86_sse2_pmaxu_b VR128:$src1,
1559 def PMAXSWrr : PDI<0xEE, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
1560 "pmaxsw {$src2, $dst|$dst, $src2}",
1561 [(set VR128:$dst, (int_x86_sse2_pmaxs_w VR128:$src1,
1564 def PMAXUBrm : PDI<0xDE, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2),
1565 "pmaxub {$src2, $dst|$dst, $src2}",
1566 [(set VR128:$dst, (int_x86_sse2_pmaxu_b VR128:$src1,
1567 (bc_v16i8 (loadv2i64 addr:$src2))))]>;
1568 def PMAXSWrm : PDI<0xEE, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2),
1569 "pmaxsw {$src2, $dst|$dst, $src2}",
1570 [(set VR128:$dst, (int_x86_sse2_pmaxs_w VR128:$src1,
1571 (bc_v8i16 (loadv2i64 addr:$src2))))]>;
1573 let isCommutable = 1 in {
1574 def PMINUBrr : PDI<0xDA, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
1575 "pminub {$src2, $dst|$dst, $src2}",
1576 [(set VR128:$dst, (int_x86_sse2_pminu_b VR128:$src1,
1578 def PMINSWrr : PDI<0xEA, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
1579 "pminsw {$src2, $dst|$dst, $src2}",
1580 [(set VR128:$dst, (int_x86_sse2_pmins_w VR128:$src1,
1583 def PMINUBrm : PDI<0xDA, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2),
1584 "pminub {$src2, $dst|$dst, $src2}",
1585 [(set VR128:$dst, (int_x86_sse2_pminu_b VR128:$src1,
1586 (bc_v16i8 (loadv2i64 addr:$src2))))]>;
1587 def PMINSWrm : PDI<0xEA, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2),
1588 "pminsw {$src2, $dst|$dst, $src2}",
1589 [(set VR128:$dst, (int_x86_sse2_pmins_w VR128:$src1,
1590 (bc_v8i16 (loadv2i64 addr:$src2))))]>;
1593 let isCommutable = 1 in {
1594 def PSADBWrr : PDI<0xE0, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
1595 "psadbw {$src2, $dst|$dst, $src2}",
1596 [(set VR128:$dst, (int_x86_sse2_psad_bw VR128:$src1,
1599 def PSADBWrm : PDI<0xE0, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2),
1600 "psadbw {$src2, $dst|$dst, $src2}",
1601 [(set VR128:$dst, (int_x86_sse2_psad_bw VR128:$src1,
1602 (bc_v16i8 (loadv2i64 addr:$src2))))]>;
1605 let isTwoAddress = 1 in {
1606 def PSLLWrr : PDIi8<0xF1, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
1607 "psllw {$src2, $dst|$dst, $src2}",
1608 [(set VR128:$dst, (int_x86_sse2_psll_w VR128:$src1,
1610 def PSLLWrm : PDIi8<0xF1, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2),
1611 "psllw {$src2, $dst|$dst, $src2}",
1612 [(set VR128:$dst, (int_x86_sse2_psll_w VR128:$src1,
1613 (bc_v4i32 (loadv2i64 addr:$src2))))]>;
1614 def PSLLWri : PDIi8<0x71, MRM6r, (ops VR128:$dst, VR128:$src1, i32i8imm:$src2),
1615 "psllw {$src2, $dst|$dst, $src2}",
1616 [(set VR128:$dst, (int_x86_sse2_psll_w VR128:$src1,
1617 (scalar_to_vector (i32 imm:$src2))))]>;
1618 def PSLLDrr : PDIi8<0xF2, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
1619 "pslld {$src2, $dst|$dst, $src2}",
1620 [(set VR128:$dst, (int_x86_sse2_psll_d VR128:$src1,
1622 def PSLLDrm : PDIi8<0xF2, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2),
1623 "pslld {$src2, $dst|$dst, $src2}",
1624 [(set VR128:$dst, (int_x86_sse2_psll_d VR128:$src1,
1625 (bc_v4i32 (loadv2i64 addr:$src2))))]>;
1626 def PSLLDri : PDIi8<0x72, MRM6r, (ops VR128:$dst, VR128:$src1, i32i8imm:$src2),
1627 "pslld {$src2, $dst|$dst, $src2}",
1628 [(set VR128:$dst, (int_x86_sse2_psll_d VR128:$src1,
1629 (scalar_to_vector (i32 imm:$src2))))]>;
1630 def PSLLQrr : PDIi8<0xF3, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
1631 "psllq {$src2, $dst|$dst, $src2}",
1632 [(set VR128:$dst, (int_x86_sse2_psll_q VR128:$src1,
1634 def PSLLQrm : PDIi8<0xF3, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2),
1635 "psllq {$src2, $dst|$dst, $src2}",
1636 [(set VR128:$dst, (int_x86_sse2_psll_q VR128:$src1,
1637 (bc_v4i32 (loadv2i64 addr:$src2))))]>;
1638 def PSLLQri : PDIi8<0x73, MRM6r, (ops VR128:$dst, VR128:$src1, i32i8imm:$src2),
1639 "psllq {$src2, $dst|$dst, $src2}",
1640 [(set VR128:$dst, (int_x86_sse2_psll_q VR128:$src1,
1641 (scalar_to_vector (i32 imm:$src2))))]>;
1642 def PSLLDQri : PDIi8<0x73, MRM7r, (ops VR128:$dst, VR128:$src1, i32i8imm:$src2),
1643 "pslldq {$src2, $dst|$dst, $src2}", []>;
1645 def PSRLWrr : PDIi8<0xD1, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
1646 "psrlw {$src2, $dst|$dst, $src2}",
1647 [(set VR128:$dst, (int_x86_sse2_psrl_w VR128:$src1,
1649 def PSRLWrm : PDIi8<0xD1, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2),
1650 "psrlw {$src2, $dst|$dst, $src2}",
1651 [(set VR128:$dst, (int_x86_sse2_psrl_w VR128:$src1,
1652 (bc_v4i32 (loadv2i64 addr:$src2))))]>;
1653 def PSRLWri : PDIi8<0x71, MRM2r, (ops VR128:$dst, VR128:$src1, i32i8imm:$src2),
1654 "psrlw {$src2, $dst|$dst, $src2}",
1655 [(set VR128:$dst, (int_x86_sse2_psrl_w VR128:$src1,
1656 (scalar_to_vector (i32 imm:$src2))))]>;
1657 def PSRLDrr : PDIi8<0xD2, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
1658 "psrld {$src2, $dst|$dst, $src2}",
1659 [(set VR128:$dst, (int_x86_sse2_psrl_d VR128:$src1,
1661 def PSRLDrm : PDIi8<0xD2, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2),
1662 "psrld {$src2, $dst|$dst, $src2}",
1663 [(set VR128:$dst, (int_x86_sse2_psrl_d VR128:$src1,
1664 (bc_v4i32 (loadv2i64 addr:$src2))))]>;
1665 def PSRLDri : PDIi8<0x72, MRM2r, (ops VR128:$dst, VR128:$src1, i32i8imm:$src2),
1666 "psrld {$src2, $dst|$dst, $src2}",
1667 [(set VR128:$dst, (int_x86_sse2_psrl_d VR128:$src1,
1668 (scalar_to_vector (i32 imm:$src2))))]>;
1669 def PSRLQrr : PDIi8<0xD3, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
1670 "psrlq {$src2, $dst|$dst, $src2}",
1671 [(set VR128:$dst, (int_x86_sse2_psrl_q VR128:$src1,
1673 def PSRLQrm : PDIi8<0xD3, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2),
1674 "psrlq {$src2, $dst|$dst, $src2}",
1675 [(set VR128:$dst, (int_x86_sse2_psrl_q VR128:$src1,
1676 (bc_v4i32 (loadv2i64 addr:$src2))))]>;
1677 def PSRLQri : PDIi8<0x73, MRM2r, (ops VR128:$dst, VR128:$src1, i32i8imm:$src2),
1678 "psrlq {$src2, $dst|$dst, $src2}",
1679 [(set VR128:$dst, (int_x86_sse2_psrl_q VR128:$src1,
1680 (scalar_to_vector (i32 imm:$src2))))]>;
1681 def PSRLDQri : PDIi8<0x73, MRM3r, (ops VR128:$dst, VR128:$src1, i32i8imm:$src2),
1682 "psrldq {$src2, $dst|$dst, $src2}", []>;
1684 def PSRAWrr : PDIi8<0xE1, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
1685 "psraw {$src2, $dst|$dst, $src2}",
1686 [(set VR128:$dst, (int_x86_sse2_psra_w VR128:$src1,
1688 def PSRAWrm : PDIi8<0xE1, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2),
1689 "psraw {$src2, $dst|$dst, $src2}",
1690 [(set VR128:$dst, (int_x86_sse2_psra_w VR128:$src1,
1691 (bc_v4i32 (loadv2i64 addr:$src2))))]>;
1692 def PSRAWri : PDIi8<0x71, MRM4r, (ops VR128:$dst, VR128:$src1, i32i8imm:$src2),
1693 "psraw {$src2, $dst|$dst, $src2}",
1694 [(set VR128:$dst, (int_x86_sse2_psra_w VR128:$src1,
1695 (scalar_to_vector (i32 imm:$src2))))]>;
1696 def PSRADrr : PDIi8<0xE2, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
1697 "psrad {$src2, $dst|$dst, $src2}",
1698 [(set VR128:$dst, (int_x86_sse2_psra_d VR128:$src1,
1700 def PSRADrm : PDIi8<0xE2, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2),
1701 "psrad {$src2, $dst|$dst, $src2}",
1702 [(set VR128:$dst, (int_x86_sse2_psra_d VR128:$src1,
1703 (bc_v4i32 (loadv2i64 addr:$src2))))]>;
1704 def PSRADri : PDIi8<0x72, MRM4r, (ops VR128:$dst, VR128:$src1, i32i8imm:$src2),
1705 "psrad {$src2, $dst|$dst, $src2}",
1706 [(set VR128:$dst, (int_x86_sse2_psra_d VR128:$src1,
1707 (scalar_to_vector (i32 imm:$src2))))]>;
1711 let isTwoAddress = 1 in {
1712 let isCommutable = 1 in {
1713 def PANDrr : PDI<0xDB, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
1714 "pand {$src2, $dst|$dst, $src2}",
1715 [(set VR128:$dst, (v2i64 (and VR128:$src1, VR128:$src2)))]>;
1716 def PORrr : PDI<0xEB, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
1717 "por {$src2, $dst|$dst, $src2}",
1718 [(set VR128:$dst, (v2i64 (or VR128:$src1, VR128:$src2)))]>;
1719 def PXORrr : PDI<0xEF, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
1720 "pxor {$src2, $dst|$dst, $src2}",
1721 [(set VR128:$dst, (v2i64 (xor VR128:$src1, VR128:$src2)))]>;
1724 def PANDrm : PDI<0xDB, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2),
1725 "pand {$src2, $dst|$dst, $src2}",
1726 [(set VR128:$dst, (v2i64 (and VR128:$src1,
1727 (load addr:$src2))))]>;
1728 def PORrm : PDI<0xEB, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2),
1729 "por {$src2, $dst|$dst, $src2}",
1730 [(set VR128:$dst, (v2i64 (or VR128:$src1,
1731 (load addr:$src2))))]>;
1732 def PXORrm : PDI<0xEF, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2),
1733 "pxor {$src2, $dst|$dst, $src2}",
1734 [(set VR128:$dst, (v2i64 (xor VR128:$src1,
1735 (load addr:$src2))))]>;
1737 def PANDNrr : PDI<0xDF, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
1738 "pandn {$src2, $dst|$dst, $src2}",
1739 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
1742 def PANDNrm : PDI<0xDF, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2),
1743 "pandn {$src2, $dst|$dst, $src2}",
1744 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
1745 (load addr:$src2))))]>;
1748 // SSE2 Integer comparison
1749 let isTwoAddress = 1 in {
1750 def PCMPEQBrr : PDI<0x74, MRMSrcReg,
1751 (ops VR128:$dst, VR128:$src1, VR128:$src2),
1752 "pcmpeqb {$src2, $dst|$dst, $src2}",
1753 [(set VR128:$dst, (int_x86_sse2_pcmpeq_b VR128:$src1,
1755 def PCMPEQBrm : PDI<0x74, MRMSrcMem,
1756 (ops VR128:$dst, VR128:$src1, i128mem:$src2),
1757 "pcmpeqb {$src2, $dst|$dst, $src2}",
1758 [(set VR128:$dst, (int_x86_sse2_pcmpeq_b VR128:$src1,
1759 (bc_v16i8 (loadv2i64 addr:$src2))))]>;
1760 def PCMPEQWrr : PDI<0x75, MRMSrcReg,
1761 (ops VR128:$dst, VR128:$src1, VR128:$src2),
1762 "pcmpeqw {$src2, $dst|$dst, $src2}",
1763 [(set VR128:$dst, (int_x86_sse2_pcmpeq_w VR128:$src1,
1765 def PCMPEQWrm : PDI<0x75, MRMSrcMem,
1766 (ops VR128:$dst, VR128:$src1, i128mem:$src2),
1767 "pcmpeqw {$src2, $dst|$dst, $src2}",
1768 [(set VR128:$dst, (int_x86_sse2_pcmpeq_w VR128:$src1,
1769 (bc_v8i16 (loadv2i64 addr:$src2))))]>;
1770 def PCMPEQDrr : PDI<0x76, MRMSrcReg,
1771 (ops VR128:$dst, VR128:$src1, VR128:$src2),
1772 "pcmpeqd {$src2, $dst|$dst, $src2}",
1773 [(set VR128:$dst, (int_x86_sse2_pcmpeq_d VR128:$src1,
1775 def PCMPEQDrm : PDI<0x76, MRMSrcMem,
1776 (ops VR128:$dst, VR128:$src1, i128mem:$src2),
1777 "pcmpeqd {$src2, $dst|$dst, $src2}",
1778 [(set VR128:$dst, (int_x86_sse2_pcmpeq_d VR128:$src1,
1779 (bc_v4i32 (loadv2i64 addr:$src2))))]>;
1781 def PCMPGTBrr : PDI<0x64, MRMSrcReg,
1782 (ops VR128:$dst, VR128:$src1, VR128:$src2),
1783 "pcmpgtb {$src2, $dst|$dst, $src2}",
1784 [(set VR128:$dst, (int_x86_sse2_pcmpgt_b VR128:$src1,
1786 def PCMPGTBrm : PDI<0x64, MRMSrcMem,
1787 (ops VR128:$dst, VR128:$src1, i128mem:$src2),
1788 "pcmpgtb {$src2, $dst|$dst, $src2}",
1789 [(set VR128:$dst, (int_x86_sse2_pcmpgt_b VR128:$src1,
1790 (bc_v16i8 (loadv2i64 addr:$src2))))]>;
1791 def PCMPGTWrr : PDI<0x65, MRMSrcReg,
1792 (ops VR128:$dst, VR128:$src1, VR128:$src2),
1793 "pcmpgtw {$src2, $dst|$dst, $src2}",
1794 [(set VR128:$dst, (int_x86_sse2_pcmpgt_w VR128:$src1,
1796 def PCMPGTWrm : PDI<0x65, MRMSrcMem,
1797 (ops VR128:$dst, VR128:$src1, i128mem:$src2),
1798 "pcmpgtw {$src2, $dst|$dst, $src2}",
1799 [(set VR128:$dst, (int_x86_sse2_pcmpgt_w VR128:$src1,
1800 (bc_v8i16 (loadv2i64 addr:$src2))))]>;
1801 def PCMPGTDrr : PDI<0x66, MRMSrcReg,
1802 (ops VR128:$dst, VR128:$src1, VR128:$src2),
1803 "pcmpgtd {$src2, $dst|$dst, $src2}",
1804 [(set VR128:$dst, (int_x86_sse2_pcmpgt_d VR128:$src1,
1806 def PCMPGTDrm : PDI<0x66, MRMSrcMem,
1807 (ops VR128:$dst, VR128:$src1, i128mem:$src2),
1808 "pcmpgtd {$src2, $dst|$dst, $src2}",
1809 [(set VR128:$dst, (int_x86_sse2_pcmpgt_d VR128:$src1,
1810 (bc_v4i32 (loadv2i64 addr:$src2))))]>;
1813 // Pack instructions
1814 let isTwoAddress = 1 in {
1815 def PACKSSWBrr : PDI<0x63, MRMSrcReg, (ops VR128:$dst, VR128:$src1,
1817 "packsswb {$src2, $dst|$dst, $src2}",
1818 [(set VR128:$dst, (v8i16 (int_x86_sse2_packsswb_128
1821 def PACKSSWBrm : PDI<0x63, MRMSrcMem, (ops VR128:$dst, VR128:$src1,
1823 "packsswb {$src2, $dst|$dst, $src2}",
1824 [(set VR128:$dst, (v8i16 (int_x86_sse2_packsswb_128
1826 (bc_v8i16 (loadv2f64 addr:$src2)))))]>;
1827 def PACKSSDWrr : PDI<0x6B, MRMSrcReg, (ops VR128:$dst, VR128:$src1,
1829 "packssdw {$src2, $dst|$dst, $src2}",
1830 [(set VR128:$dst, (v4i32 (int_x86_sse2_packssdw_128
1833 def PACKSSDWrm : PDI<0x6B, MRMSrcMem, (ops VR128:$dst, VR128:$src1,
1835 "packssdw {$src2, $dst|$dst, $src2}",
1836 [(set VR128:$dst, (v4i32 (int_x86_sse2_packssdw_128
1838 (bc_v4i32 (loadv2i64 addr:$src2)))))]>;
1839 def PACKUSWBrr : PDI<0x67, MRMSrcReg, (ops VR128:$dst, VR128:$src1,
1841 "packuswb {$src2, $dst|$dst, $src2}",
1842 [(set VR128:$dst, (v8i16 (int_x86_sse2_packuswb_128
1845 def PACKUSWBrm : PDI<0x67, MRMSrcMem, (ops VR128:$dst, VR128:$src1,
1847 "packuswb {$src2, $dst|$dst, $src2}",
1848 [(set VR128:$dst, (v8i16 (int_x86_sse2_packuswb_128
1850 (bc_v8i16 (loadv2i64 addr:$src2)))))]>;
1853 // Shuffle and unpack instructions
1854 def PSHUFDri : PDIi8<0x70, MRMSrcReg,
1855 (ops VR128:$dst, VR128:$src1, i8imm:$src2),
1856 "pshufd {$src2, $src1, $dst|$dst, $src1, $src2}",
1857 [(set VR128:$dst, (v4i32 (vector_shuffle
1858 VR128:$src1, (undef),
1859 PSHUFD_shuffle_mask:$src2)))]>;
1860 def PSHUFDmi : PDIi8<0x70, MRMSrcMem,
1861 (ops VR128:$dst, i128mem:$src1, i8imm:$src2),
1862 "pshufd {$src2, $src1, $dst|$dst, $src1, $src2}",
1863 [(set VR128:$dst, (v4i32 (vector_shuffle
1864 (bc_v4i32 (loadv2i64 addr:$src1)),
1866 PSHUFD_shuffle_mask:$src2)))]>;
1868 // SSE2 with ImmT == Imm8 and XS prefix.
1869 def PSHUFHWri : Ii8<0x70, MRMSrcReg,
1870 (ops VR128:$dst, VR128:$src1, i8imm:$src2),
1871 "pshufhw {$src2, $src1, $dst|$dst, $src1, $src2}",
1872 [(set VR128:$dst, (v8i16 (vector_shuffle
1873 VR128:$src1, (undef),
1874 PSHUFHW_shuffle_mask:$src2)))]>,
1875 XS, Requires<[HasSSE2]>;
1876 def PSHUFHWmi : Ii8<0x70, MRMSrcMem,
1877 (ops VR128:$dst, i128mem:$src1, i8imm:$src2),
1878 "pshufhw {$src2, $src1, $dst|$dst, $src1, $src2}",
1879 [(set VR128:$dst, (v8i16 (vector_shuffle
1880 (bc_v8i16 (loadv2i64 addr:$src1)),
1882 PSHUFHW_shuffle_mask:$src2)))]>,
1883 XS, Requires<[HasSSE2]>;
1885 // SSE2 with ImmT == Imm8 and XD prefix.
1886 def PSHUFLWri : Ii8<0x70, MRMSrcReg,
1887 (ops VR128:$dst, VR128:$src1, i32i8imm:$src2),
1888 "pshuflw {$src2, $src1, $dst|$dst, $src1, $src2}",
1889 [(set VR128:$dst, (v8i16 (vector_shuffle
1890 VR128:$src1, (undef),
1891 PSHUFLW_shuffle_mask:$src2)))]>,
1892 XD, Requires<[HasSSE2]>;
1893 def PSHUFLWmi : Ii8<0x70, MRMSrcMem,
1894 (ops VR128:$dst, i128mem:$src1, i32i8imm:$src2),
1895 "pshuflw {$src2, $src1, $dst|$dst, $src1, $src2}",
1896 [(set VR128:$dst, (v8i16 (vector_shuffle
1897 (bc_v8i16 (loadv2i64 addr:$src1)),
1899 PSHUFLW_shuffle_mask:$src2)))]>,
1900 XD, Requires<[HasSSE2]>;
1902 let isTwoAddress = 1 in {
1903 def PUNPCKLBWrr : PDI<0x60, MRMSrcReg,
1904 (ops VR128:$dst, VR128:$src1, VR128:$src2),
1905 "punpcklbw {$src2, $dst|$dst, $src2}",
1907 (v16i8 (vector_shuffle VR128:$src1, VR128:$src2,
1908 UNPCKL_shuffle_mask)))]>;
1909 def PUNPCKLBWrm : PDI<0x60, MRMSrcMem,
1910 (ops VR128:$dst, VR128:$src1, i128mem:$src2),
1911 "punpcklbw {$src2, $dst|$dst, $src2}",
1913 (v16i8 (vector_shuffle VR128:$src1,
1914 (bc_v16i8 (loadv2i64 addr:$src2)),
1915 UNPCKL_shuffle_mask)))]>;
1916 def PUNPCKLWDrr : PDI<0x61, MRMSrcReg,
1917 (ops VR128:$dst, VR128:$src1, VR128:$src2),
1918 "punpcklwd {$src2, $dst|$dst, $src2}",
1920 (v8i16 (vector_shuffle VR128:$src1, VR128:$src2,
1921 UNPCKL_shuffle_mask)))]>;
1922 def PUNPCKLWDrm : PDI<0x61, MRMSrcMem,
1923 (ops VR128:$dst, VR128:$src1, i128mem:$src2),
1924 "punpcklwd {$src2, $dst|$dst, $src2}",
1926 (v8i16 (vector_shuffle VR128:$src1,
1927 (bc_v8i16 (loadv2i64 addr:$src2)),
1928 UNPCKL_shuffle_mask)))]>;
1929 def PUNPCKLDQrr : PDI<0x62, MRMSrcReg,
1930 (ops VR128:$dst, VR128:$src1, VR128:$src2),
1931 "punpckldq {$src2, $dst|$dst, $src2}",
1933 (v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
1934 UNPCKL_shuffle_mask)))]>;
1935 def PUNPCKLDQrm : PDI<0x62, MRMSrcMem,
1936 (ops VR128:$dst, VR128:$src1, i128mem:$src2),
1937 "punpckldq {$src2, $dst|$dst, $src2}",
1939 (v4i32 (vector_shuffle VR128:$src1,
1940 (bc_v4i32 (loadv2i64 addr:$src2)),
1941 UNPCKL_shuffle_mask)))]>;
1942 def PUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
1943 (ops VR128:$dst, VR128:$src1, VR128:$src2),
1944 "punpcklqdq {$src2, $dst|$dst, $src2}",
1946 (v2i64 (vector_shuffle VR128:$src1, VR128:$src2,
1947 UNPCKL_shuffle_mask)))]>;
1948 def PUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
1949 (ops VR128:$dst, VR128:$src1, i128mem:$src2),
1950 "punpcklqdq {$src2, $dst|$dst, $src2}",
1952 (v2i64 (vector_shuffle VR128:$src1,
1953 (loadv2i64 addr:$src2),
1954 UNPCKL_shuffle_mask)))]>;
1956 def PUNPCKHBWrr : PDI<0x68, MRMSrcReg,
1957 (ops VR128:$dst, VR128:$src1, VR128:$src2),
1958 "punpckhbw {$src2, $dst|$dst, $src2}",
1960 (v16i8 (vector_shuffle VR128:$src1, VR128:$src2,
1961 UNPCKH_shuffle_mask)))]>;
1962 def PUNPCKHBWrm : PDI<0x68, MRMSrcMem,
1963 (ops VR128:$dst, VR128:$src1, i128mem:$src2),
1964 "punpckhbw {$src2, $dst|$dst, $src2}",
1966 (v16i8 (vector_shuffle VR128:$src1,
1967 (bc_v16i8 (loadv2i64 addr:$src2)),
1968 UNPCKH_shuffle_mask)))]>;
1969 def PUNPCKHWDrr : PDI<0x69, MRMSrcReg,
1970 (ops VR128:$dst, VR128:$src1, VR128:$src2),
1971 "punpckhwd {$src2, $dst|$dst, $src2}",
1973 (v8i16 (vector_shuffle VR128:$src1, VR128:$src2,
1974 UNPCKH_shuffle_mask)))]>;
1975 def PUNPCKHWDrm : PDI<0x69, MRMSrcMem,
1976 (ops VR128:$dst, VR128:$src1, i128mem:$src2),
1977 "punpckhwd {$src2, $dst|$dst, $src2}",
1979 (v8i16 (vector_shuffle VR128:$src1,
1980 (bc_v8i16 (loadv2i64 addr:$src2)),
1981 UNPCKH_shuffle_mask)))]>;
1982 def PUNPCKHDQrr : PDI<0x6A, MRMSrcReg,
1983 (ops VR128:$dst, VR128:$src1, VR128:$src2),
1984 "punpckhdq {$src2, $dst|$dst, $src2}",
1986 (v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
1987 UNPCKH_shuffle_mask)))]>;
1988 def PUNPCKHDQrm : PDI<0x6A, MRMSrcMem,
1989 (ops VR128:$dst, VR128:$src1, i128mem:$src2),
1990 "punpckhdq {$src2, $dst|$dst, $src2}",
1992 (v4i32 (vector_shuffle VR128:$src1,
1993 (bc_v4i32 (loadv2i64 addr:$src2)),
1994 UNPCKH_shuffle_mask)))]>;
1995 def PUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
1996 (ops VR128:$dst, VR128:$src1, VR128:$src2),
1997 "punpckhqdq {$src2, $dst|$dst, $src2}",
1999 (v2i64 (vector_shuffle VR128:$src1, VR128:$src2,
2000 UNPCKH_shuffle_mask)))]>;
2001 def PUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
2002 (ops VR128:$dst, VR128:$src1, i128mem:$src2),
2003 "punpckhqdq {$src2, $dst|$dst, $src2}",
2005 (v2i64 (vector_shuffle VR128:$src1,
2006 (loadv2i64 addr:$src2),
2007 UNPCKH_shuffle_mask)))]>;
2011 def PEXTRWri : PDIi8<0xC5, MRMSrcReg,
2012 (ops GR32:$dst, VR128:$src1, i32i8imm:$src2),
2013 "pextrw {$src2, $src1, $dst|$dst, $src1, $src2}",
2014 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
2015 (i32 imm:$src2)))]>;
2016 let isTwoAddress = 1 in {
2017 def PINSRWrri : PDIi8<0xC4, MRMSrcReg,
2018 (ops VR128:$dst, VR128:$src1, GR32:$src2, i32i8imm:$src3),
2019 "pinsrw {$src3, $src2, $dst|$dst, $src2, $src3}",
2020 [(set VR128:$dst, (v8i16 (X86pinsrw (v8i16 VR128:$src1),
2021 GR32:$src2, (iPTR imm:$src3))))]>;
2022 def PINSRWrmi : PDIi8<0xC4, MRMSrcMem,
2023 (ops VR128:$dst, VR128:$src1, i16mem:$src2, i32i8imm:$src3),
2024 "pinsrw {$src3, $src2, $dst|$dst, $src2, $src3}",
2026 (v8i16 (X86pinsrw (v8i16 VR128:$src1),
2027 (i32 (anyext (loadi16 addr:$src2))),
2028 (iPTR imm:$src3))))]>;
2031 //===----------------------------------------------------------------------===//
2032 // Miscellaneous Instructions
2033 //===----------------------------------------------------------------------===//
2036 def MOVMSKPSrr : PSI<0x50, MRMSrcReg, (ops GR32:$dst, VR128:$src),
2037 "movmskps {$src, $dst|$dst, $src}",
2038 [(set GR32:$dst, (int_x86_sse_movmsk_ps VR128:$src))]>;
2039 def MOVMSKPDrr : PSI<0x50, MRMSrcReg, (ops GR32:$dst, VR128:$src),
2040 "movmskpd {$src, $dst|$dst, $src}",
2041 [(set GR32:$dst, (int_x86_sse2_movmsk_pd VR128:$src))]>;
2043 def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (ops GR32:$dst, VR128:$src),
2044 "pmovmskb {$src, $dst|$dst, $src}",
2045 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>;
2047 // Conditional store
2048 def MASKMOVDQU : PDI<0xF7, RawFrm, (ops VR128:$src, VR128:$mask),
2049 "maskmovdqu {$mask, $src|$src, $mask}",
2050 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>,
2053 // Prefetching loads
2054 def PREFETCHT0 : PSI<0x18, MRM1m, (ops i8mem:$src),
2055 "prefetcht0 $src", []>;
2056 def PREFETCHT1 : PSI<0x18, MRM2m, (ops i8mem:$src),
2057 "prefetcht1 $src", []>;
2058 def PREFETCHT2 : PSI<0x18, MRM3m, (ops i8mem:$src),
2059 "prefetcht2 $src", []>;
2060 def PREFETCHTNTA : PSI<0x18, MRM0m, (ops i8mem:$src),
2061 "prefetchtnta $src", []>;
2063 // Non-temporal stores
2064 def MOVNTPSmr : PSI<0x2B, MRMDestMem, (ops i128mem:$dst, VR128:$src),
2065 "movntps {$src, $dst|$dst, $src}",
2066 [(int_x86_sse_movnt_ps addr:$dst, VR128:$src)]>;
2067 def MOVNTPDmr : PDI<0x2B, MRMDestMem, (ops i128mem:$dst, VR128:$src),
2068 "movntpd {$src, $dst|$dst, $src}",
2069 [(int_x86_sse2_movnt_pd addr:$dst, VR128:$src)]>;
2070 def MOVNTDQmr : PDI<0xE7, MRMDestMem, (ops f128mem:$dst, VR128:$src),
2071 "movntdq {$src, $dst|$dst, $src}",
2072 [(int_x86_sse2_movnt_dq addr:$dst, VR128:$src)]>;
2073 def MOVNTImr : I<0xC3, MRMDestMem, (ops i32mem:$dst, GR32:$src),
2074 "movnti {$src, $dst|$dst, $src}",
2075 [(int_x86_sse2_movnt_i addr:$dst, GR32:$src)]>,
2076 TB, Requires<[HasSSE2]>;
2079 def CLFLUSH : I<0xAE, MRM7m, (ops i8mem:$src),
2080 "clflush $src", [(int_x86_sse2_clflush addr:$src)]>,
2081 TB, Requires<[HasSSE2]>;
2083 // Load, store, and memory fence
2084 def SFENCE : I<0xAE, MRM7m, (ops),
2085 "sfence", [(int_x86_sse_sfence)]>, TB, Requires<[HasSSE1]>;
2086 def LFENCE : I<0xAE, MRM5m, (ops),
2087 "lfence", [(int_x86_sse2_lfence)]>, TB, Requires<[HasSSE2]>;
2088 def MFENCE : I<0xAE, MRM6m, (ops),
2089 "mfence", [(int_x86_sse2_mfence)]>, TB, Requires<[HasSSE2]>;
2092 def LDMXCSR : I<0xAE, MRM5m, (ops i32mem:$src),
2094 [(int_x86_sse_ldmxcsr addr:$src)]>, TB, Requires<[HasSSE1]>;
2095 def STMXCSR : I<0xAE, MRM3m, (ops i32mem:$dst),
2097 [(int_x86_sse_stmxcsr addr:$dst)]>, TB, Requires<[HasSSE1]>;
2099 // Thread synchronization
2100 def MONITOR : I<0xC8, RawFrm, (ops), "monitor",
2101 [(int_x86_sse3_monitor EAX, ECX, EDX)]>,
2102 TB, Requires<[HasSSE3]>;
2103 def MWAIT : I<0xC9, RawFrm, (ops), "mwait",
2104 [(int_x86_sse3_mwait ECX, EAX)]>,
2105 TB, Requires<[HasSSE3]>;
2107 //===----------------------------------------------------------------------===//
2108 // Alias Instructions
2109 //===----------------------------------------------------------------------===//
2111 // Alias instructions that map zero vector to pxor / xorp* for sse.
2112 // FIXME: remove when we can teach regalloc that xor reg, reg is ok.
2113 def V_SET0 : PSI<0x57, MRMInitReg, (ops VR128:$dst),
2115 [(set VR128:$dst, (v4f32 immAllZerosV))]>;
2117 def V_SETALLONES : PDI<0x76, MRMInitReg, (ops VR128:$dst),
2118 "pcmpeqd $dst, $dst",
2119 [(set VR128:$dst, (v2f64 immAllOnesV))]>;
2121 // FR32 / FR64 to 128-bit vector conversion.
2122 def MOVSS2PSrr : SSI<0x10, MRMSrcReg, (ops VR128:$dst, FR32:$src),
2123 "movss {$src, $dst|$dst, $src}",
2125 (v4f32 (scalar_to_vector FR32:$src)))]>;
2126 def MOVSS2PSrm : SSI<0x10, MRMSrcMem, (ops VR128:$dst, f32mem:$src),
2127 "movss {$src, $dst|$dst, $src}",
2129 (v4f32 (scalar_to_vector (loadf32 addr:$src))))]>;
2130 def MOVSD2PDrr : SDI<0x10, MRMSrcReg, (ops VR128:$dst, FR64:$src),
2131 "movsd {$src, $dst|$dst, $src}",
2133 (v2f64 (scalar_to_vector FR64:$src)))]>;
2134 def MOVSD2PDrm : SDI<0x10, MRMSrcMem, (ops VR128:$dst, f64mem:$src),
2135 "movsd {$src, $dst|$dst, $src}",
2137 (v2f64 (scalar_to_vector (loadf64 addr:$src))))]>;
2139 def MOVDI2PDIrr : PDI<0x6E, MRMSrcReg, (ops VR128:$dst, GR32:$src),
2140 "movd {$src, $dst|$dst, $src}",
2142 (v4i32 (scalar_to_vector GR32:$src)))]>;
2143 def MOVDI2PDIrm : PDI<0x6E, MRMSrcMem, (ops VR128:$dst, i32mem:$src),
2144 "movd {$src, $dst|$dst, $src}",
2146 (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>;
2147 // SSE2 instructions with XS prefix
2148 def MOVQI2PQIrr : I<0x7E, MRMSrcReg, (ops VR128:$dst, VR64:$src),
2149 "movq {$src, $dst|$dst, $src}",
2151 (v2i64 (scalar_to_vector VR64:$src)))]>, XS,
2152 Requires<[HasSSE2]>;
2153 def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (ops VR128:$dst, i64mem:$src),
2154 "movq {$src, $dst|$dst, $src}",
2156 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
2157 Requires<[HasSSE2]>;
2158 // FIXME: may not be able to eliminate this movss with coalescing the src and
2159 // dest register classes are different. We really want to write this pattern
2161 // def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
2162 // (f32 FR32:$src)>;
2163 def MOVPS2SSrr : SSI<0x10, MRMSrcReg, (ops FR32:$dst, VR128:$src),
2164 "movss {$src, $dst|$dst, $src}",
2165 [(set FR32:$dst, (vector_extract (v4f32 VR128:$src),
2167 def MOVPS2SSmr : SSI<0x11, MRMDestMem, (ops f32mem:$dst, VR128:$src),
2168 "movss {$src, $dst|$dst, $src}",
2169 [(store (f32 (vector_extract (v4f32 VR128:$src),
2170 (iPTR 0))), addr:$dst)]>;
2171 def MOVPD2SDrr : SDI<0x10, MRMSrcReg, (ops FR64:$dst, VR128:$src),
2172 "movsd {$src, $dst|$dst, $src}",
2173 [(set FR64:$dst, (vector_extract (v2f64 VR128:$src),
2175 def MOVPD2SDmr : SDI<0x11, MRMDestMem, (ops f64mem:$dst, VR128:$src),
2176 "movsd {$src, $dst|$dst, $src}",
2177 [(store (f64 (vector_extract (v2f64 VR128:$src),
2178 (iPTR 0))), addr:$dst)]>;
2179 def MOVPDI2DIrr : PDI<0x7E, MRMDestReg, (ops GR32:$dst, VR128:$src),
2180 "movd {$src, $dst|$dst, $src}",
2181 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
2183 def MOVPDI2DImr : PDI<0x7E, MRMDestMem, (ops i32mem:$dst, VR128:$src),
2184 "movd {$src, $dst|$dst, $src}",
2185 [(store (i32 (vector_extract (v4i32 VR128:$src),
2186 (iPTR 0))), addr:$dst)]>;
2188 // Move to lower bits of a VR128, leaving upper bits alone.
2189 // Three operand (but two address) aliases.
2190 let isTwoAddress = 1 in {
2191 def MOVLSS2PSrr : SSI<0x10, MRMSrcReg, (ops VR128:$dst, VR128:$src1, FR32:$src2),
2192 "movss {$src2, $dst|$dst, $src2}", []>;
2193 def MOVLSD2PDrr : SDI<0x10, MRMSrcReg, (ops VR128:$dst, VR128:$src1, FR64:$src2),
2194 "movsd {$src2, $dst|$dst, $src2}", []>;
2196 let AddedComplexity = 20 in {
2197 def MOVLPSrr : SSI<0x10, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
2198 "movss {$src2, $dst|$dst, $src2}",
2200 (v4f32 (vector_shuffle VR128:$src1, VR128:$src2,
2201 MOVL_shuffle_mask)))]>;
2202 def MOVLPDrr : SDI<0x10, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
2203 "movsd {$src2, $dst|$dst, $src2}",
2205 (v2f64 (vector_shuffle VR128:$src1, VR128:$src2,
2206 MOVL_shuffle_mask)))]>;
2210 // Store / copy lower 64-bits of a XMM register.
2211 def MOVLQ128mr : PDI<0xD6, MRMDestMem, (ops i64mem:$dst, VR128:$src),
2212 "movq {$src, $dst|$dst, $src}",
2213 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>;
2215 // Move to lower bits of a VR128 and zeroing upper bits.
2216 // Loading from memory automatically zeroing upper bits.
2217 let AddedComplexity = 20 in {
2218 def MOVZSS2PSrm : SSI<0x10, MRMSrcMem, (ops VR128:$dst, f32mem:$src),
2219 "movss {$src, $dst|$dst, $src}",
2220 [(set VR128:$dst, (v4f32 (vector_shuffle immAllZerosV,
2221 (v4f32 (scalar_to_vector (loadf32 addr:$src))),
2222 MOVL_shuffle_mask)))]>;
2223 def MOVZSD2PDrm : SDI<0x10, MRMSrcMem, (ops VR128:$dst, f64mem:$src),
2224 "movsd {$src, $dst|$dst, $src}",
2225 [(set VR128:$dst, (v2f64 (vector_shuffle immAllZerosV,
2226 (v2f64 (scalar_to_vector (loadf64 addr:$src))),
2227 MOVL_shuffle_mask)))]>;
2228 // movd / movq to XMM register zero-extends
2229 def MOVZDI2PDIrr : PDI<0x6E, MRMSrcReg, (ops VR128:$dst, GR32:$src),
2230 "movd {$src, $dst|$dst, $src}",
2231 [(set VR128:$dst, (v4i32 (vector_shuffle immAllZerosV,
2232 (v4i32 (scalar_to_vector GR32:$src)),
2233 MOVL_shuffle_mask)))]>;
2234 def MOVZDI2PDIrm : PDI<0x6E, MRMSrcMem, (ops VR128:$dst, i32mem:$src),
2235 "movd {$src, $dst|$dst, $src}",
2236 [(set VR128:$dst, (v4i32 (vector_shuffle immAllZerosV,
2237 (v4i32 (scalar_to_vector (loadi32 addr:$src))),
2238 MOVL_shuffle_mask)))]>;
2239 // Moving from XMM to XMM but still clear upper 64 bits.
2240 def MOVZQI2PQIrr : I<0x7E, MRMSrcReg, (ops VR128:$dst, VR128:$src),
2241 "movq {$src, $dst|$dst, $src}",
2242 [(set VR128:$dst, (int_x86_sse2_movl_dq VR128:$src))]>,
2243 XS, Requires<[HasSSE2]>;
2244 def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (ops VR128:$dst, i64mem:$src),
2245 "movq {$src, $dst|$dst, $src}",
2246 [(set VR128:$dst, (int_x86_sse2_movl_dq
2247 (bc_v4i32 (loadv2i64 addr:$src))))]>,
2248 XS, Requires<[HasSSE2]>;
2251 //===----------------------------------------------------------------------===//
2252 // Non-Instruction Patterns
2253 //===----------------------------------------------------------------------===//
2255 // 128-bit vector undef's.
2256 def : Pat<(v2f64 (undef)), (IMPLICIT_DEF_VR128)>, Requires<[HasSSE2]>;
2257 def : Pat<(v16i8 (undef)), (IMPLICIT_DEF_VR128)>, Requires<[HasSSE2]>;
2258 def : Pat<(v8i16 (undef)), (IMPLICIT_DEF_VR128)>, Requires<[HasSSE2]>;
2259 def : Pat<(v4i32 (undef)), (IMPLICIT_DEF_VR128)>, Requires<[HasSSE2]>;
2260 def : Pat<(v2i64 (undef)), (IMPLICIT_DEF_VR128)>, Requires<[HasSSE2]>;
2262 // 128-bit vector all zero's.
2263 def : Pat<(v16i8 immAllZerosV), (V_SET0)>, Requires<[HasSSE2]>;
2264 def : Pat<(v8i16 immAllZerosV), (V_SET0)>, Requires<[HasSSE2]>;
2265 def : Pat<(v4i32 immAllZerosV), (V_SET0)>, Requires<[HasSSE2]>;
2266 def : Pat<(v2i64 immAllZerosV), (V_SET0)>, Requires<[HasSSE2]>;
2267 def : Pat<(v2f64 immAllZerosV), (V_SET0)>, Requires<[HasSSE2]>;
2269 // 128-bit vector all one's.
2270 def : Pat<(v16i8 immAllOnesV), (V_SETALLONES)>, Requires<[HasSSE2]>;
2271 def : Pat<(v8i16 immAllOnesV), (V_SETALLONES)>, Requires<[HasSSE2]>;
2272 def : Pat<(v4i32 immAllOnesV), (V_SETALLONES)>, Requires<[HasSSE2]>;
2273 def : Pat<(v2i64 immAllOnesV), (V_SETALLONES)>, Requires<[HasSSE2]>;
2274 def : Pat<(v4f32 immAllOnesV), (V_SETALLONES)>, Requires<[HasSSE1]>;
2276 // Store 128-bit integer vector values.
2277 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
2278 (MOVDQAmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
2279 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
2280 (MOVDQAmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
2281 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
2282 (MOVDQAmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
2284 // Scalar to v8i16 / v16i8. The source may be a GR32, but only the lower 8 or
2286 def : Pat<(v8i16 (X86s2vec GR32:$src)), (MOVDI2PDIrr GR32:$src)>,
2287 Requires<[HasSSE2]>;
2288 def : Pat<(v16i8 (X86s2vec GR32:$src)), (MOVDI2PDIrr GR32:$src)>,
2289 Requires<[HasSSE2]>;
2292 def : Pat<(v2i64 (bitconvert (v4i32 VR128:$src))), (v2i64 VR128:$src)>,
2293 Requires<[HasSSE2]>;
2294 def : Pat<(v2i64 (bitconvert (v8i16 VR128:$src))), (v2i64 VR128:$src)>,
2295 Requires<[HasSSE2]>;
2296 def : Pat<(v2i64 (bitconvert (v16i8 VR128:$src))), (v2i64 VR128:$src)>,
2297 Requires<[HasSSE2]>;
2298 def : Pat<(v2i64 (bitconvert (v2f64 VR128:$src))), (v2i64 VR128:$src)>,
2299 Requires<[HasSSE2]>;
2300 def : Pat<(v2i64 (bitconvert (v4f32 VR128:$src))), (v2i64 VR128:$src)>,
2301 Requires<[HasSSE2]>;
2302 def : Pat<(v4i32 (bitconvert (v2i64 VR128:$src))), (v4i32 VR128:$src)>,
2303 Requires<[HasSSE2]>;
2304 def : Pat<(v4i32 (bitconvert (v8i16 VR128:$src))), (v4i32 VR128:$src)>,
2305 Requires<[HasSSE2]>;
2306 def : Pat<(v4i32 (bitconvert (v16i8 VR128:$src))), (v4i32 VR128:$src)>,
2307 Requires<[HasSSE2]>;
2308 def : Pat<(v4i32 (bitconvert (v2f64 VR128:$src))), (v4i32 VR128:$src)>,
2309 Requires<[HasSSE2]>;
2310 def : Pat<(v4i32 (bitconvert (v4f32 VR128:$src))), (v4i32 VR128:$src)>,
2311 Requires<[HasSSE2]>;
2312 def : Pat<(v8i16 (bitconvert (v2i64 VR128:$src))), (v8i16 VR128:$src)>,
2313 Requires<[HasSSE2]>;
2314 def : Pat<(v8i16 (bitconvert (v4i32 VR128:$src))), (v8i16 VR128:$src)>,
2315 Requires<[HasSSE2]>;
2316 def : Pat<(v8i16 (bitconvert (v16i8 VR128:$src))), (v8i16 VR128:$src)>,
2317 Requires<[HasSSE2]>;
2318 def : Pat<(v8i16 (bitconvert (v2f64 VR128:$src))), (v8i16 VR128:$src)>,
2319 Requires<[HasSSE2]>;
2320 def : Pat<(v8i16 (bitconvert (v4f32 VR128:$src))), (v8i16 VR128:$src)>,
2321 Requires<[HasSSE2]>;
2322 def : Pat<(v16i8 (bitconvert (v2i64 VR128:$src))), (v16i8 VR128:$src)>,
2323 Requires<[HasSSE2]>;
2324 def : Pat<(v16i8 (bitconvert (v4i32 VR128:$src))), (v16i8 VR128:$src)>,
2325 Requires<[HasSSE2]>;
2326 def : Pat<(v16i8 (bitconvert (v8i16 VR128:$src))), (v16i8 VR128:$src)>,
2327 Requires<[HasSSE2]>;
2328 def : Pat<(v16i8 (bitconvert (v2f64 VR128:$src))), (v16i8 VR128:$src)>,
2329 Requires<[HasSSE2]>;
2330 def : Pat<(v16i8 (bitconvert (v4f32 VR128:$src))), (v16i8 VR128:$src)>,
2331 Requires<[HasSSE2]>;
2332 def : Pat<(v4f32 (bitconvert (v2i64 VR128:$src))), (v4f32 VR128:$src)>,
2333 Requires<[HasSSE2]>;
2334 def : Pat<(v4f32 (bitconvert (v4i32 VR128:$src))), (v4f32 VR128:$src)>,
2335 Requires<[HasSSE2]>;
2336 def : Pat<(v4f32 (bitconvert (v8i16 VR128:$src))), (v4f32 VR128:$src)>,
2337 Requires<[HasSSE2]>;
2338 def : Pat<(v4f32 (bitconvert (v16i8 VR128:$src))), (v4f32 VR128:$src)>,
2339 Requires<[HasSSE2]>;
2340 def : Pat<(v4f32 (bitconvert (v2f64 VR128:$src))), (v4f32 VR128:$src)>,
2341 Requires<[HasSSE2]>;
2342 def : Pat<(v2f64 (bitconvert (v2i64 VR128:$src))), (v2f64 VR128:$src)>,
2343 Requires<[HasSSE2]>;
2344 def : Pat<(v2f64 (bitconvert (v4i32 VR128:$src))), (v2f64 VR128:$src)>,
2345 Requires<[HasSSE2]>;
2346 def : Pat<(v2f64 (bitconvert (v8i16 VR128:$src))), (v2f64 VR128:$src)>,
2347 Requires<[HasSSE2]>;
2348 def : Pat<(v2f64 (bitconvert (v16i8 VR128:$src))), (v2f64 VR128:$src)>,
2349 Requires<[HasSSE2]>;
2350 def : Pat<(v2f64 (bitconvert (v4f32 VR128:$src))), (v2f64 VR128:$src)>,
2351 Requires<[HasSSE2]>;
2353 // Move scalar to XMM zero-extended
2354 // movd to XMM register zero-extends
2355 let AddedComplexity = 20 in {
2356 def : Pat<(v8i16 (vector_shuffle immAllZerosV,
2357 (v8i16 (X86s2vec GR32:$src)), MOVL_shuffle_mask)),
2358 (MOVZDI2PDIrr GR32:$src)>, Requires<[HasSSE2]>;
2359 def : Pat<(v16i8 (vector_shuffle immAllZerosV,
2360 (v16i8 (X86s2vec GR32:$src)), MOVL_shuffle_mask)),
2361 (MOVZDI2PDIrr GR32:$src)>, Requires<[HasSSE2]>;
2362 // Zeroing a VR128 then do a MOVS{S|D} to the lower bits.
2363 def : Pat<(v2f64 (vector_shuffle immAllZerosV,
2364 (v2f64 (scalar_to_vector FR64:$src)), MOVL_shuffle_mask)),
2365 (MOVLSD2PDrr (V_SET0), FR64:$src)>, Requires<[HasSSE2]>;
2366 def : Pat<(v4f32 (vector_shuffle immAllZerosV,
2367 (v4f32 (scalar_to_vector FR32:$src)), MOVL_shuffle_mask)),
2368 (MOVLSS2PSrr (V_SET0), FR32:$src)>, Requires<[HasSSE2]>;
2371 // Splat v2f64 / v2i64
2372 let AddedComplexity = 10 in {
2373 def : Pat<(vector_shuffle (v2f64 VR128:$src), (undef), SSE_splat_v2_mask:$sm),
2374 (UNPCKLPDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2375 def : Pat<(vector_shuffle (v2i64 VR128:$src), (undef), SSE_splat_v2_mask:$sm),
2376 (PUNPCKLQDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2380 def : Pat<(vector_shuffle (v4f32 VR128:$src), (undef), SSE_splat_mask:$sm),
2381 (SHUFPSrri VR128:$src, VR128:$src, SSE_splat_mask:$sm)>,
2382 Requires<[HasSSE1]>;
2384 // Special unary SHUFPSrri case.
2385 // FIXME: when we want non two-address code, then we should use PSHUFD?
2386 def : Pat<(vector_shuffle (v4f32 VR128:$src1), (undef),
2387 SHUFP_unary_shuffle_mask:$sm),
2388 (SHUFPSrri VR128:$src1, VR128:$src1, SHUFP_unary_shuffle_mask:$sm)>,
2389 Requires<[HasSSE1]>;
2390 // Unary v4f32 shuffle with PSHUF* in order to fold a load.
2391 def : Pat<(vector_shuffle (loadv4f32 addr:$src1), (undef),
2392 SHUFP_unary_shuffle_mask:$sm),
2393 (PSHUFDmi addr:$src1, SHUFP_unary_shuffle_mask:$sm)>,
2394 Requires<[HasSSE2]>;
2395 // Special binary v4i32 shuffle cases with SHUFPS.
2396 def : Pat<(vector_shuffle (v4i32 VR128:$src1), (v4i32 VR128:$src2),
2397 PSHUFD_binary_shuffle_mask:$sm),
2398 (SHUFPSrri VR128:$src1, VR128:$src2, PSHUFD_binary_shuffle_mask:$sm)>,
2399 Requires<[HasSSE2]>;
2400 def : Pat<(vector_shuffle (v4i32 VR128:$src1),
2401 (bc_v4i32 (loadv2i64 addr:$src2)), PSHUFD_binary_shuffle_mask:$sm),
2402 (SHUFPSrmi VR128:$src1, addr:$src2, PSHUFD_binary_shuffle_mask:$sm)>,
2403 Requires<[HasSSE2]>;
2405 // vector_shuffle v1, <undef>, <0, 0, 1, 1, ...>
2406 let AddedComplexity = 10 in {
2407 def : Pat<(v4f32 (vector_shuffle VR128:$src, (undef),
2408 UNPCKL_v_undef_shuffle_mask)),
2409 (UNPCKLPSrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2410 def : Pat<(v16i8 (vector_shuffle VR128:$src, (undef),
2411 UNPCKL_v_undef_shuffle_mask)),
2412 (PUNPCKLBWrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2413 def : Pat<(v8i16 (vector_shuffle VR128:$src, (undef),
2414 UNPCKL_v_undef_shuffle_mask)),
2415 (PUNPCKLWDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2416 def : Pat<(v4i32 (vector_shuffle VR128:$src, (undef),
2417 UNPCKL_v_undef_shuffle_mask)),
2418 (PUNPCKLDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE1]>;
2421 let AddedComplexity = 20 in {
2422 // vector_shuffle v1, <undef> <1, 1, 3, 3>
2423 def : Pat<(v4i32 (vector_shuffle VR128:$src, (undef),
2424 MOVSHDUP_shuffle_mask)),
2425 (MOVSHDUPrr VR128:$src)>, Requires<[HasSSE3]>;
2426 def : Pat<(v4i32 (vector_shuffle (bc_v4i32 (loadv2i64 addr:$src)), (undef),
2427 MOVSHDUP_shuffle_mask)),
2428 (MOVSHDUPrm addr:$src)>, Requires<[HasSSE3]>;
2430 // vector_shuffle v1, <undef> <0, 0, 2, 2>
2431 def : Pat<(v4i32 (vector_shuffle VR128:$src, (undef),
2432 MOVSLDUP_shuffle_mask)),
2433 (MOVSLDUPrr VR128:$src)>, Requires<[HasSSE3]>;
2434 def : Pat<(v4i32 (vector_shuffle (bc_v4i32 (loadv2i64 addr:$src)), (undef),
2435 MOVSLDUP_shuffle_mask)),
2436 (MOVSLDUPrm addr:$src)>, Requires<[HasSSE3]>;
2439 let AddedComplexity = 20 in {
2440 // vector_shuffle v1, v2 <0, 1, 4, 5> using MOVLHPS
2441 def : Pat<(v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
2442 MOVHP_shuffle_mask)),
2443 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
2445 // vector_shuffle v1, v2 <6, 7, 2, 3> using MOVHLPS
2446 def : Pat<(v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
2447 MOVHLPS_shuffle_mask)),
2448 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
2450 // vector_shuffle v1, undef <2, 3, ?, ?> using MOVHLPS
2451 def : Pat<(v4f32 (vector_shuffle VR128:$src1, (undef),
2452 UNPCKH_shuffle_mask)),
2453 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
2454 def : Pat<(v4i32 (vector_shuffle VR128:$src1, (undef),
2455 UNPCKH_shuffle_mask)),
2456 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
2458 // vector_shuffle v1, (load v2) <4, 5, 2, 3> using MOVLPS
2459 // vector_shuffle v1, (load v2) <0, 1, 4, 5> using MOVHPS
2460 def : Pat<(v4f32 (vector_shuffle VR128:$src1, (loadv4f32 addr:$src2),
2461 MOVLP_shuffle_mask)),
2462 (MOVLPSrm VR128:$src1, addr:$src2)>, Requires<[HasSSE1]>;
2463 def : Pat<(v2f64 (vector_shuffle VR128:$src1, (loadv2f64 addr:$src2),
2464 MOVLP_shuffle_mask)),
2465 (MOVLPDrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
2466 def : Pat<(v4f32 (vector_shuffle VR128:$src1, (loadv4f32 addr:$src2),
2467 MOVHP_shuffle_mask)),
2468 (MOVHPSrm VR128:$src1, addr:$src2)>, Requires<[HasSSE1]>;
2469 def : Pat<(v2f64 (vector_shuffle VR128:$src1, (loadv2f64 addr:$src2),
2470 MOVHP_shuffle_mask)),
2471 (MOVHPDrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
2473 def : Pat<(v4i32 (vector_shuffle VR128:$src1, (bc_v4i32 (loadv2i64 addr:$src2)),
2474 MOVLP_shuffle_mask)),
2475 (MOVLPSrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
2476 def : Pat<(v2i64 (vector_shuffle VR128:$src1, (loadv2i64 addr:$src2),
2477 MOVLP_shuffle_mask)),
2478 (MOVLPDrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
2479 def : Pat<(v4i32 (vector_shuffle VR128:$src1, (bc_v4i32 (loadv2i64 addr:$src2)),
2480 MOVHP_shuffle_mask)),
2481 (MOVHPSrm VR128:$src1, addr:$src2)>, Requires<[HasSSE1]>;
2482 def : Pat<(v2i64 (vector_shuffle VR128:$src1, (loadv2i64 addr:$src2),
2483 MOVLP_shuffle_mask)),
2484 (MOVLPDrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
2486 // Setting the lowest element in the vector.
2487 def : Pat<(v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
2488 MOVL_shuffle_mask)),
2489 (MOVLPSrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
2490 def : Pat<(v2i64 (vector_shuffle VR128:$src1, VR128:$src2,
2491 MOVL_shuffle_mask)),
2492 (MOVLPDrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
2494 // vector_shuffle v1, v2 <4, 5, 2, 3> using MOVLPDrr (movsd)
2495 def : Pat<(v4f32 (vector_shuffle VR128:$src1, VR128:$src2,
2496 MOVLP_shuffle_mask)),
2497 (MOVLPDrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
2498 def : Pat<(v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
2499 MOVLP_shuffle_mask)),
2500 (MOVLPDrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
2502 // Set lowest element and zero upper elements.
2503 def : Pat<(bc_v2i64 (vector_shuffle immAllZerosV,
2504 (v2f64 (scalar_to_vector (loadf64 addr:$src))),
2505 MOVL_shuffle_mask)),
2506 (MOVZQI2PQIrm addr:$src)>, Requires<[HasSSE2]>;
2509 // FIXME: Temporary workaround since 2-wide shuffle is broken.
2510 def : Pat<(int_x86_sse2_movs_d VR128:$src1, VR128:$src2),
2511 (v2f64 (MOVLPDrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>;
2512 def : Pat<(int_x86_sse2_loadh_pd VR128:$src1, addr:$src2),
2513 (v2f64 (MOVHPDrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>;
2514 def : Pat<(int_x86_sse2_loadl_pd VR128:$src1, addr:$src2),
2515 (v2f64 (MOVLPDrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>;
2516 def : Pat<(int_x86_sse2_shuf_pd VR128:$src1, VR128:$src2, imm:$src3),
2517 (v2f64 (SHUFPDrri VR128:$src1, VR128:$src2, imm:$src3))>,
2518 Requires<[HasSSE2]>;
2519 def : Pat<(int_x86_sse2_shuf_pd VR128:$src1, (load addr:$src2), imm:$src3),
2520 (v2f64 (SHUFPDrmi VR128:$src1, addr:$src2, imm:$src3))>,
2521 Requires<[HasSSE2]>;
2522 def : Pat<(int_x86_sse2_unpckh_pd VR128:$src1, VR128:$src2),
2523 (v2f64 (UNPCKHPDrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>;
2524 def : Pat<(int_x86_sse2_unpckh_pd VR128:$src1, (load addr:$src2)),
2525 (v2f64 (UNPCKHPDrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>;
2526 def : Pat<(int_x86_sse2_unpckl_pd VR128:$src1, VR128:$src2),
2527 (v2f64 (UNPCKLPDrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>;
2528 def : Pat<(int_x86_sse2_unpckl_pd VR128:$src1, (load addr:$src2)),
2529 (v2f64 (UNPCKLPDrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>;
2530 def : Pat<(int_x86_sse2_punpckh_qdq VR128:$src1, VR128:$src2),
2531 (v2i64 (PUNPCKHQDQrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>;
2532 def : Pat<(int_x86_sse2_punpckh_qdq VR128:$src1, (load addr:$src2)),
2533 (v2i64 (PUNPCKHQDQrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>;
2534 def : Pat<(int_x86_sse2_punpckl_qdq VR128:$src1, VR128:$src2),
2535 (v2i64 (PUNPCKLQDQrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>;
2536 def : Pat<(int_x86_sse2_punpckl_qdq VR128:$src1, (load addr:$src2)),
2537 (PUNPCKLQDQrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
2539 // 128-bit logical shifts
2540 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
2541 (v2i64 (PSLLDQri VR128:$src1, (PSxLDQ_imm imm:$src2)))>,
2542 Requires<[HasSSE2]>;
2543 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
2544 (v2i64 (PSRLDQri VR128:$src1, (PSxLDQ_imm imm:$src2)))>,
2545 Requires<[HasSSE2]>;
2547 // Some special case pandn patterns.
2548 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v4i32 immAllOnesV))),
2550 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
2551 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v8i16 immAllOnesV))),
2553 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
2554 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v16i8 immAllOnesV))),
2556 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
2558 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v4i32 immAllOnesV))),
2559 (load addr:$src2))),
2560 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
2561 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v8i16 immAllOnesV))),
2562 (load addr:$src2))),
2563 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
2564 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v16i8 immAllOnesV))),
2565 (load addr:$src2))),
2566 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
2569 def : Pat<(v4f32 (X86loadu addr:$src)), (MOVUPSrm addr:$src)>,
2570 Requires<[HasSSE1]>;