1 //===-- X86InstrSSE.td - SSE Instruction Set ---------------*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86 SSE instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
16 class OpndItins<InstrItinClass arg_rr, InstrItinClass arg_rm> {
17 InstrItinClass rr = arg_rr;
18 InstrItinClass rm = arg_rm;
19 // InstrSchedModel info.
20 X86FoldableSchedWrite Sched = WriteFAdd;
23 class SizeItins<OpndItins arg_s, OpndItins arg_d> {
29 class ShiftOpndItins<InstrItinClass arg_rr, InstrItinClass arg_rm,
30 InstrItinClass arg_ri> {
31 InstrItinClass rr = arg_rr;
32 InstrItinClass rm = arg_rm;
33 InstrItinClass ri = arg_ri;
38 let Sched = WriteFAdd in {
39 def SSE_ALU_F32S : OpndItins<
40 IIC_SSE_ALU_F32S_RR, IIC_SSE_ALU_F32S_RM
43 def SSE_ALU_F64S : OpndItins<
44 IIC_SSE_ALU_F64S_RR, IIC_SSE_ALU_F64S_RM
48 def SSE_ALU_ITINS_S : SizeItins<
49 SSE_ALU_F32S, SSE_ALU_F64S
52 let Sched = WriteFMul in {
53 def SSE_MUL_F32S : OpndItins<
54 IIC_SSE_MUL_F32S_RR, IIC_SSE_MUL_F64S_RM
57 def SSE_MUL_F64S : OpndItins<
58 IIC_SSE_MUL_F64S_RR, IIC_SSE_MUL_F64S_RM
62 def SSE_MUL_ITINS_S : SizeItins<
63 SSE_MUL_F32S, SSE_MUL_F64S
66 let Sched = WriteFDiv in {
67 def SSE_DIV_F32S : OpndItins<
68 IIC_SSE_DIV_F32S_RR, IIC_SSE_DIV_F64S_RM
71 def SSE_DIV_F64S : OpndItins<
72 IIC_SSE_DIV_F64S_RR, IIC_SSE_DIV_F64S_RM
76 def SSE_DIV_ITINS_S : SizeItins<
77 SSE_DIV_F32S, SSE_DIV_F64S
81 let Sched = WriteFAdd in {
82 def SSE_ALU_F32P : OpndItins<
83 IIC_SSE_ALU_F32P_RR, IIC_SSE_ALU_F32P_RM
86 def SSE_ALU_F64P : OpndItins<
87 IIC_SSE_ALU_F64P_RR, IIC_SSE_ALU_F64P_RM
91 def SSE_ALU_ITINS_P : SizeItins<
92 SSE_ALU_F32P, SSE_ALU_F64P
95 let Sched = WriteFMul in {
96 def SSE_MUL_F32P : OpndItins<
97 IIC_SSE_MUL_F32P_RR, IIC_SSE_MUL_F64P_RM
100 def SSE_MUL_F64P : OpndItins<
101 IIC_SSE_MUL_F64P_RR, IIC_SSE_MUL_F64P_RM
105 def SSE_MUL_ITINS_P : SizeItins<
106 SSE_MUL_F32P, SSE_MUL_F64P
109 let Sched = WriteFDiv in {
110 def SSE_DIV_F32P : OpndItins<
111 IIC_SSE_DIV_F32P_RR, IIC_SSE_DIV_F64P_RM
114 def SSE_DIV_F64P : OpndItins<
115 IIC_SSE_DIV_F64P_RR, IIC_SSE_DIV_F64P_RM
119 def SSE_DIV_ITINS_P : SizeItins<
120 SSE_DIV_F32P, SSE_DIV_F64P
123 let Sched = WriteVecLogic in
124 def SSE_VEC_BIT_ITINS_P : OpndItins<
125 IIC_SSE_BIT_P_RR, IIC_SSE_BIT_P_RM
128 def SSE_BIT_ITINS_P : OpndItins<
129 IIC_SSE_BIT_P_RR, IIC_SSE_BIT_P_RM
132 let Sched = WriteVecALU in {
133 def SSE_INTALU_ITINS_P : OpndItins<
134 IIC_SSE_INTALU_P_RR, IIC_SSE_INTALU_P_RM
137 def SSE_INTALUQ_ITINS_P : OpndItins<
138 IIC_SSE_INTALUQ_P_RR, IIC_SSE_INTALUQ_P_RM
142 let Sched = WriteVecIMul in
143 def SSE_INTMUL_ITINS_P : OpndItins<
144 IIC_SSE_INTMUL_P_RR, IIC_SSE_INTMUL_P_RM
147 def SSE_INTSHIFT_ITINS_P : ShiftOpndItins<
148 IIC_SSE_INTSH_P_RR, IIC_SSE_INTSH_P_RM, IIC_SSE_INTSH_P_RI
151 def SSE_MOVA_ITINS : OpndItins<
152 IIC_SSE_MOVA_P_RR, IIC_SSE_MOVA_P_RM
155 def SSE_MOVU_ITINS : OpndItins<
156 IIC_SSE_MOVU_P_RR, IIC_SSE_MOVU_P_RM
159 def SSE_DPPD_ITINS : OpndItins<
160 IIC_SSE_DPPD_RR, IIC_SSE_DPPD_RM
163 def SSE_DPPS_ITINS : OpndItins<
164 IIC_SSE_DPPS_RR, IIC_SSE_DPPD_RM
167 def DEFAULT_ITINS : OpndItins<
168 IIC_ALU_NONMEM, IIC_ALU_MEM
171 def SSE_EXTRACT_ITINS : OpndItins<
172 IIC_SSE_EXTRACTPS_RR, IIC_SSE_EXTRACTPS_RM
175 def SSE_INSERT_ITINS : OpndItins<
176 IIC_SSE_INSERTPS_RR, IIC_SSE_INSERTPS_RM
179 let Sched = WriteMPSAD in
180 def SSE_MPSADBW_ITINS : OpndItins<
181 IIC_SSE_MPSADBW_RR, IIC_SSE_MPSADBW_RM
184 let Sched = WriteVecIMul in
185 def SSE_PMULLD_ITINS : OpndItins<
186 IIC_SSE_PMULLD_RR, IIC_SSE_PMULLD_RM
189 // Definitions for backward compatibility.
190 // The instructions mapped on these definitions uses a different itinerary
191 // than the actual scheduling model.
192 let Sched = WriteShuffle in
193 def DEFAULT_ITINS_SHUFFLESCHED : OpndItins<
194 IIC_ALU_NONMEM, IIC_ALU_MEM
197 let Sched = WriteVecIMul in
198 def DEFAULT_ITINS_VECIMULSCHED : OpndItins<
199 IIC_ALU_NONMEM, IIC_ALU_MEM
202 let Sched = WriteShuffle in
203 def SSE_INTALU_ITINS_SHUFF_P : OpndItins<
204 IIC_SSE_INTALU_P_RR, IIC_SSE_INTALU_P_RM
207 let Sched = WriteMPSAD in
208 def DEFAULT_ITINS_MPSADSCHED : OpndItins<
209 IIC_ALU_NONMEM, IIC_ALU_MEM
212 let Sched = WriteFBlend in
213 def DEFAULT_ITINS_FBLENDSCHED : OpndItins<
214 IIC_ALU_NONMEM, IIC_ALU_MEM
217 let Sched = WriteBlend in
218 def DEFAULT_ITINS_BLENDSCHED : OpndItins<
219 IIC_ALU_NONMEM, IIC_ALU_MEM
222 let Sched = WriteVarBlend in
223 def DEFAULT_ITINS_VARBLENDSCHED : OpndItins<
224 IIC_ALU_NONMEM, IIC_ALU_MEM
227 let Sched = WriteFBlend in
228 def SSE_INTALU_ITINS_FBLEND_P : OpndItins<
229 IIC_SSE_INTALU_P_RR, IIC_SSE_INTALU_P_RM
232 let Sched = WriteBlend in
233 def SSE_INTALU_ITINS_BLEND_P : OpndItins<
234 IIC_SSE_INTALU_P_RR, IIC_SSE_INTALU_P_RM
237 //===----------------------------------------------------------------------===//
238 // SSE 1 & 2 Instructions Classes
239 //===----------------------------------------------------------------------===//
241 /// sse12_fp_scalar - SSE 1 & 2 scalar instructions class
242 multiclass sse12_fp_scalar<bits<8> opc, string OpcodeStr, SDNode OpNode,
243 RegisterClass RC, X86MemOperand x86memop,
244 Domain d, OpndItins itins, bit Is2Addr = 1> {
245 let isCommutable = 1 in {
246 def rr : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
248 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
249 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
250 [(set RC:$dst, (OpNode RC:$src1, RC:$src2))], itins.rr, d>,
251 Sched<[itins.Sched]>;
253 def rm : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
255 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
256 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
257 [(set RC:$dst, (OpNode RC:$src1, (load addr:$src2)))], itins.rm, d>,
258 Sched<[itins.Sched.Folded, ReadAfterLd]>;
261 /// sse12_fp_scalar_int - SSE 1 & 2 scalar instructions intrinsics class
262 multiclass sse12_fp_scalar_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
263 string asm, string SSEVer, string FPSizeStr,
264 Operand memopr, ComplexPattern mem_cpat,
265 Domain d, OpndItins itins, bit Is2Addr = 1> {
266 let isCodeGenOnly = 1 in {
267 def rr_Int : SI_Int<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
269 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
270 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
271 [(set RC:$dst, (!cast<Intrinsic>(
272 !strconcat("int_x86_sse", SSEVer, "_", OpcodeStr, FPSizeStr))
273 RC:$src1, RC:$src2))], itins.rr, d>,
274 Sched<[itins.Sched]>;
275 def rm_Int : SI_Int<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, memopr:$src2),
277 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
278 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
279 [(set RC:$dst, (!cast<Intrinsic>(!strconcat("int_x86_sse",
280 SSEVer, "_", OpcodeStr, FPSizeStr))
281 RC:$src1, mem_cpat:$src2))], itins.rm, d>,
282 Sched<[itins.Sched.Folded, ReadAfterLd]>;
286 /// sse12_fp_packed - SSE 1 & 2 packed instructions class
287 multiclass sse12_fp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
288 RegisterClass RC, ValueType vt,
289 X86MemOperand x86memop, PatFrag mem_frag,
290 Domain d, OpndItins itins, bit Is2Addr = 1> {
291 let isCommutable = 1 in
292 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
294 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
295 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
296 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], itins.rr, d>,
297 Sched<[itins.Sched]>;
299 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
301 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
302 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
303 [(set RC:$dst, (OpNode RC:$src1, (mem_frag addr:$src2)))],
305 Sched<[itins.Sched.Folded, ReadAfterLd]>;
308 /// sse12_fp_packed_logical_rm - SSE 1 & 2 packed instructions class
309 multiclass sse12_fp_packed_logical_rm<bits<8> opc, RegisterClass RC, Domain d,
310 string OpcodeStr, X86MemOperand x86memop,
311 list<dag> pat_rr, list<dag> pat_rm,
313 let isCommutable = 1, hasSideEffects = 0 in
314 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
316 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
317 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
318 pat_rr, NoItinerary, d>,
319 Sched<[WriteVecLogic]>;
320 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
322 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
323 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
324 pat_rm, NoItinerary, d>,
325 Sched<[WriteVecLogicLd, ReadAfterLd]>;
328 //===----------------------------------------------------------------------===//
329 // Non-instruction patterns
330 //===----------------------------------------------------------------------===//
332 // A vector extract of the first f32/f64 position is a subregister copy
333 def : Pat<(f32 (extractelt (v4f32 VR128:$src), (iPTR 0))),
334 (COPY_TO_REGCLASS (v4f32 VR128:$src), FR32)>;
335 def : Pat<(f64 (extractelt (v2f64 VR128:$src), (iPTR 0))),
336 (COPY_TO_REGCLASS (v2f64 VR128:$src), FR64)>;
338 // A 128-bit subvector extract from the first 256-bit vector position
339 // is a subregister copy that needs no instruction.
340 def : Pat<(v4i32 (extract_subvector (v8i32 VR256:$src), (iPTR 0))),
341 (v4i32 (EXTRACT_SUBREG (v8i32 VR256:$src), sub_xmm))>;
342 def : Pat<(v4f32 (extract_subvector (v8f32 VR256:$src), (iPTR 0))),
343 (v4f32 (EXTRACT_SUBREG (v8f32 VR256:$src), sub_xmm))>;
345 def : Pat<(v2i64 (extract_subvector (v4i64 VR256:$src), (iPTR 0))),
346 (v2i64 (EXTRACT_SUBREG (v4i64 VR256:$src), sub_xmm))>;
347 def : Pat<(v2f64 (extract_subvector (v4f64 VR256:$src), (iPTR 0))),
348 (v2f64 (EXTRACT_SUBREG (v4f64 VR256:$src), sub_xmm))>;
350 def : Pat<(v8i16 (extract_subvector (v16i16 VR256:$src), (iPTR 0))),
351 (v8i16 (EXTRACT_SUBREG (v16i16 VR256:$src), sub_xmm))>;
352 def : Pat<(v16i8 (extract_subvector (v32i8 VR256:$src), (iPTR 0))),
353 (v16i8 (EXTRACT_SUBREG (v32i8 VR256:$src), sub_xmm))>;
355 // A 128-bit subvector insert to the first 256-bit vector position
356 // is a subregister copy that needs no instruction.
357 let AddedComplexity = 25 in { // to give priority over vinsertf128rm
358 def : Pat<(insert_subvector undef, (v2i64 VR128:$src), (iPTR 0)),
359 (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
360 def : Pat<(insert_subvector undef, (v2f64 VR128:$src), (iPTR 0)),
361 (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
362 def : Pat<(insert_subvector undef, (v4i32 VR128:$src), (iPTR 0)),
363 (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
364 def : Pat<(insert_subvector undef, (v4f32 VR128:$src), (iPTR 0)),
365 (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
366 def : Pat<(insert_subvector undef, (v8i16 VR128:$src), (iPTR 0)),
367 (INSERT_SUBREG (v16i16 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
368 def : Pat<(insert_subvector undef, (v16i8 VR128:$src), (iPTR 0)),
369 (INSERT_SUBREG (v32i8 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
372 // Implicitly promote a 32-bit scalar to a vector.
373 def : Pat<(v4f32 (scalar_to_vector FR32:$src)),
374 (COPY_TO_REGCLASS FR32:$src, VR128)>;
375 def : Pat<(v8f32 (scalar_to_vector FR32:$src)),
376 (COPY_TO_REGCLASS FR32:$src, VR128)>;
377 // Implicitly promote a 64-bit scalar to a vector.
378 def : Pat<(v2f64 (scalar_to_vector FR64:$src)),
379 (COPY_TO_REGCLASS FR64:$src, VR128)>;
380 def : Pat<(v4f64 (scalar_to_vector FR64:$src)),
381 (COPY_TO_REGCLASS FR64:$src, VR128)>;
383 // Bitcasts between 128-bit vector types. Return the original type since
384 // no instruction is needed for the conversion
385 let Predicates = [HasSSE2] in {
386 def : Pat<(v2i64 (bitconvert (v4i32 VR128:$src))), (v2i64 VR128:$src)>;
387 def : Pat<(v2i64 (bitconvert (v8i16 VR128:$src))), (v2i64 VR128:$src)>;
388 def : Pat<(v2i64 (bitconvert (v16i8 VR128:$src))), (v2i64 VR128:$src)>;
389 def : Pat<(v2i64 (bitconvert (v2f64 VR128:$src))), (v2i64 VR128:$src)>;
390 def : Pat<(v2i64 (bitconvert (v4f32 VR128:$src))), (v2i64 VR128:$src)>;
391 def : Pat<(v4i32 (bitconvert (v2i64 VR128:$src))), (v4i32 VR128:$src)>;
392 def : Pat<(v4i32 (bitconvert (v8i16 VR128:$src))), (v4i32 VR128:$src)>;
393 def : Pat<(v4i32 (bitconvert (v16i8 VR128:$src))), (v4i32 VR128:$src)>;
394 def : Pat<(v4i32 (bitconvert (v2f64 VR128:$src))), (v4i32 VR128:$src)>;
395 def : Pat<(v4i32 (bitconvert (v4f32 VR128:$src))), (v4i32 VR128:$src)>;
396 def : Pat<(v8i16 (bitconvert (v2i64 VR128:$src))), (v8i16 VR128:$src)>;
397 def : Pat<(v8i16 (bitconvert (v4i32 VR128:$src))), (v8i16 VR128:$src)>;
398 def : Pat<(v8i16 (bitconvert (v16i8 VR128:$src))), (v8i16 VR128:$src)>;
399 def : Pat<(v8i16 (bitconvert (v2f64 VR128:$src))), (v8i16 VR128:$src)>;
400 def : Pat<(v8i16 (bitconvert (v4f32 VR128:$src))), (v8i16 VR128:$src)>;
401 def : Pat<(v16i8 (bitconvert (v2i64 VR128:$src))), (v16i8 VR128:$src)>;
402 def : Pat<(v16i8 (bitconvert (v4i32 VR128:$src))), (v16i8 VR128:$src)>;
403 def : Pat<(v16i8 (bitconvert (v8i16 VR128:$src))), (v16i8 VR128:$src)>;
404 def : Pat<(v16i8 (bitconvert (v2f64 VR128:$src))), (v16i8 VR128:$src)>;
405 def : Pat<(v16i8 (bitconvert (v4f32 VR128:$src))), (v16i8 VR128:$src)>;
406 def : Pat<(v4f32 (bitconvert (v2i64 VR128:$src))), (v4f32 VR128:$src)>;
407 def : Pat<(v4f32 (bitconvert (v4i32 VR128:$src))), (v4f32 VR128:$src)>;
408 def : Pat<(v4f32 (bitconvert (v8i16 VR128:$src))), (v4f32 VR128:$src)>;
409 def : Pat<(v4f32 (bitconvert (v16i8 VR128:$src))), (v4f32 VR128:$src)>;
410 def : Pat<(v4f32 (bitconvert (v2f64 VR128:$src))), (v4f32 VR128:$src)>;
411 def : Pat<(v2f64 (bitconvert (v2i64 VR128:$src))), (v2f64 VR128:$src)>;
412 def : Pat<(v2f64 (bitconvert (v4i32 VR128:$src))), (v2f64 VR128:$src)>;
413 def : Pat<(v2f64 (bitconvert (v8i16 VR128:$src))), (v2f64 VR128:$src)>;
414 def : Pat<(v2f64 (bitconvert (v16i8 VR128:$src))), (v2f64 VR128:$src)>;
415 def : Pat<(v2f64 (bitconvert (v4f32 VR128:$src))), (v2f64 VR128:$src)>;
416 def : Pat<(f128 (bitconvert (i128 FR128:$src))), (f128 FR128:$src)>;
417 def : Pat<(i128 (bitconvert (f128 FR128:$src))), (i128 FR128:$src)>;
420 // Bitcasts between 256-bit vector types. Return the original type since
421 // no instruction is needed for the conversion
422 let Predicates = [HasAVX] in {
423 def : Pat<(v4f64 (bitconvert (v8f32 VR256:$src))), (v4f64 VR256:$src)>;
424 def : Pat<(v4f64 (bitconvert (v8i32 VR256:$src))), (v4f64 VR256:$src)>;
425 def : Pat<(v4f64 (bitconvert (v4i64 VR256:$src))), (v4f64 VR256:$src)>;
426 def : Pat<(v4f64 (bitconvert (v16i16 VR256:$src))), (v4f64 VR256:$src)>;
427 def : Pat<(v4f64 (bitconvert (v32i8 VR256:$src))), (v4f64 VR256:$src)>;
428 def : Pat<(v8f32 (bitconvert (v8i32 VR256:$src))), (v8f32 VR256:$src)>;
429 def : Pat<(v8f32 (bitconvert (v4i64 VR256:$src))), (v8f32 VR256:$src)>;
430 def : Pat<(v8f32 (bitconvert (v4f64 VR256:$src))), (v8f32 VR256:$src)>;
431 def : Pat<(v8f32 (bitconvert (v32i8 VR256:$src))), (v8f32 VR256:$src)>;
432 def : Pat<(v8f32 (bitconvert (v16i16 VR256:$src))), (v8f32 VR256:$src)>;
433 def : Pat<(v4i64 (bitconvert (v8f32 VR256:$src))), (v4i64 VR256:$src)>;
434 def : Pat<(v4i64 (bitconvert (v8i32 VR256:$src))), (v4i64 VR256:$src)>;
435 def : Pat<(v4i64 (bitconvert (v4f64 VR256:$src))), (v4i64 VR256:$src)>;
436 def : Pat<(v4i64 (bitconvert (v32i8 VR256:$src))), (v4i64 VR256:$src)>;
437 def : Pat<(v4i64 (bitconvert (v16i16 VR256:$src))), (v4i64 VR256:$src)>;
438 def : Pat<(v32i8 (bitconvert (v4f64 VR256:$src))), (v32i8 VR256:$src)>;
439 def : Pat<(v32i8 (bitconvert (v4i64 VR256:$src))), (v32i8 VR256:$src)>;
440 def : Pat<(v32i8 (bitconvert (v8f32 VR256:$src))), (v32i8 VR256:$src)>;
441 def : Pat<(v32i8 (bitconvert (v8i32 VR256:$src))), (v32i8 VR256:$src)>;
442 def : Pat<(v32i8 (bitconvert (v16i16 VR256:$src))), (v32i8 VR256:$src)>;
443 def : Pat<(v8i32 (bitconvert (v32i8 VR256:$src))), (v8i32 VR256:$src)>;
444 def : Pat<(v8i32 (bitconvert (v16i16 VR256:$src))), (v8i32 VR256:$src)>;
445 def : Pat<(v8i32 (bitconvert (v8f32 VR256:$src))), (v8i32 VR256:$src)>;
446 def : Pat<(v8i32 (bitconvert (v4i64 VR256:$src))), (v8i32 VR256:$src)>;
447 def : Pat<(v8i32 (bitconvert (v4f64 VR256:$src))), (v8i32 VR256:$src)>;
448 def : Pat<(v16i16 (bitconvert (v8f32 VR256:$src))), (v16i16 VR256:$src)>;
449 def : Pat<(v16i16 (bitconvert (v8i32 VR256:$src))), (v16i16 VR256:$src)>;
450 def : Pat<(v16i16 (bitconvert (v4i64 VR256:$src))), (v16i16 VR256:$src)>;
451 def : Pat<(v16i16 (bitconvert (v4f64 VR256:$src))), (v16i16 VR256:$src)>;
452 def : Pat<(v16i16 (bitconvert (v32i8 VR256:$src))), (v16i16 VR256:$src)>;
455 // Alias instructions that map fld0 to xorps for sse or vxorps for avx.
456 // This is expanded by ExpandPostRAPseudos.
457 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
458 isPseudo = 1, SchedRW = [WriteZero] in {
459 def FsFLD0SS : I<0, Pseudo, (outs FR32:$dst), (ins), "",
460 [(set FR32:$dst, fp32imm0)]>, Requires<[HasSSE1]>;
461 def FsFLD0SD : I<0, Pseudo, (outs FR64:$dst), (ins), "",
462 [(set FR64:$dst, fpimm0)]>, Requires<[HasSSE2]>;
465 //===----------------------------------------------------------------------===//
466 // AVX & SSE - Zero/One Vectors
467 //===----------------------------------------------------------------------===//
469 // Alias instruction that maps zero vector to pxor / xorp* for sse.
470 // This is expanded by ExpandPostRAPseudos to an xorps / vxorps, and then
471 // swizzled by ExecutionDepsFix to pxor.
472 // We set canFoldAsLoad because this can be converted to a constant-pool
473 // load of an all-zeros value if folding it would be beneficial.
474 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
475 isPseudo = 1, SchedRW = [WriteZero] in {
476 def V_SET0 : I<0, Pseudo, (outs VR128:$dst), (ins), "",
477 [(set VR128:$dst, (v4f32 immAllZerosV))]>;
480 def : Pat<(v2f64 immAllZerosV), (V_SET0)>;
481 def : Pat<(v4i32 immAllZerosV), (V_SET0)>;
482 def : Pat<(v2i64 immAllZerosV), (V_SET0)>;
483 def : Pat<(v8i16 immAllZerosV), (V_SET0)>;
484 def : Pat<(v16i8 immAllZerosV), (V_SET0)>;
487 // The same as done above but for AVX. The 256-bit AVX1 ISA doesn't support PI,
488 // and doesn't need it because on sandy bridge the register is set to zero
489 // at the rename stage without using any execution unit, so SET0PSY
490 // and SET0PDY can be used for vector int instructions without penalty
491 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
492 isPseudo = 1, Predicates = [HasAVX], SchedRW = [WriteZero] in {
493 def AVX_SET0 : I<0, Pseudo, (outs VR256:$dst), (ins), "",
494 [(set VR256:$dst, (v8f32 immAllZerosV))]>;
497 let Predicates = [HasAVX] in
498 def : Pat<(v4f64 immAllZerosV), (AVX_SET0)>;
500 let Predicates = [HasAVX2] in {
501 def : Pat<(v4i64 immAllZerosV), (AVX_SET0)>;
502 def : Pat<(v8i32 immAllZerosV), (AVX_SET0)>;
503 def : Pat<(v16i16 immAllZerosV), (AVX_SET0)>;
504 def : Pat<(v32i8 immAllZerosV), (AVX_SET0)>;
507 // AVX1 has no support for 256-bit integer instructions, but since the 128-bit
508 // VPXOR instruction writes zero to its upper part, it's safe build zeros.
509 let Predicates = [HasAVX1Only] in {
510 def : Pat<(v32i8 immAllZerosV), (SUBREG_TO_REG (i8 0), (V_SET0), sub_xmm)>;
511 def : Pat<(bc_v32i8 (v8f32 immAllZerosV)),
512 (SUBREG_TO_REG (i8 0), (V_SET0), sub_xmm)>;
514 def : Pat<(v16i16 immAllZerosV), (SUBREG_TO_REG (i16 0), (V_SET0), sub_xmm)>;
515 def : Pat<(bc_v16i16 (v8f32 immAllZerosV)),
516 (SUBREG_TO_REG (i16 0), (V_SET0), sub_xmm)>;
518 def : Pat<(v8i32 immAllZerosV), (SUBREG_TO_REG (i32 0), (V_SET0), sub_xmm)>;
519 def : Pat<(bc_v8i32 (v8f32 immAllZerosV)),
520 (SUBREG_TO_REG (i32 0), (V_SET0), sub_xmm)>;
522 def : Pat<(v4i64 immAllZerosV), (SUBREG_TO_REG (i64 0), (V_SET0), sub_xmm)>;
523 def : Pat<(bc_v4i64 (v8f32 immAllZerosV)),
524 (SUBREG_TO_REG (i64 0), (V_SET0), sub_xmm)>;
527 // We set canFoldAsLoad because this can be converted to a constant-pool
528 // load of an all-ones value if folding it would be beneficial.
529 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
530 isPseudo = 1, SchedRW = [WriteZero] in {
531 def V_SETALLONES : I<0, Pseudo, (outs VR128:$dst), (ins), "",
532 [(set VR128:$dst, (v4i32 immAllOnesV))]>;
533 let Predicates = [HasAVX2] in
534 def AVX2_SETALLONES : I<0, Pseudo, (outs VR256:$dst), (ins), "",
535 [(set VR256:$dst, (v8i32 immAllOnesV))]>;
539 //===----------------------------------------------------------------------===//
540 // SSE 1 & 2 - Move FP Scalar Instructions
542 // Move Instructions. Register-to-register movss/movsd is not used for FR32/64
543 // register copies because it's a partial register update; Register-to-register
544 // movss/movsd is not modeled as an INSERT_SUBREG because INSERT_SUBREG requires
545 // that the insert be implementable in terms of a copy, and just mentioned, we
546 // don't use movss/movsd for copies.
547 //===----------------------------------------------------------------------===//
549 multiclass sse12_move_rr<RegisterClass RC, SDNode OpNode, ValueType vt,
550 X86MemOperand x86memop, string base_opc,
551 string asm_opr, Domain d = GenericDomain> {
552 def rr : SI<0x10, MRMSrcReg, (outs VR128:$dst),
553 (ins VR128:$src1, RC:$src2),
554 !strconcat(base_opc, asm_opr),
555 [(set VR128:$dst, (vt (OpNode VR128:$src1,
556 (scalar_to_vector RC:$src2))))],
557 IIC_SSE_MOV_S_RR, d>, Sched<[WriteFShuffle]>;
559 // For the disassembler
560 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in
561 def rr_REV : SI<0x11, MRMDestReg, (outs VR128:$dst),
562 (ins VR128:$src1, RC:$src2),
563 !strconcat(base_opc, asm_opr),
564 [], IIC_SSE_MOV_S_RR>, Sched<[WriteFShuffle]>;
567 multiclass sse12_move<RegisterClass RC, SDNode OpNode, ValueType vt,
568 X86MemOperand x86memop, string OpcodeStr,
569 Domain d = GenericDomain> {
571 defm V#NAME : sse12_move_rr<RC, OpNode, vt, x86memop, OpcodeStr,
572 "\t{$src2, $src1, $dst|$dst, $src1, $src2}", d>,
575 def V#NAME#mr : SI<0x11, MRMDestMem, (outs), (ins x86memop:$dst, RC:$src),
576 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
577 [(store RC:$src, addr:$dst)], IIC_SSE_MOV_S_MR, d>,
578 VEX, VEX_LIG, Sched<[WriteStore]>;
580 let Constraints = "$src1 = $dst" in {
581 defm NAME : sse12_move_rr<RC, OpNode, vt, x86memop, OpcodeStr,
582 "\t{$src2, $dst|$dst, $src2}", d>;
585 def NAME#mr : SI<0x11, MRMDestMem, (outs), (ins x86memop:$dst, RC:$src),
586 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
587 [(store RC:$src, addr:$dst)], IIC_SSE_MOV_S_MR, d>,
591 // Loading from memory automatically zeroing upper bits.
592 multiclass sse12_move_rm<RegisterClass RC, X86MemOperand x86memop,
593 PatFrag mem_pat, string OpcodeStr,
594 Domain d = GenericDomain> {
595 def V#NAME#rm : SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
596 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
597 [(set RC:$dst, (mem_pat addr:$src))],
598 IIC_SSE_MOV_S_RM, d>, VEX, VEX_LIG, Sched<[WriteLoad]>;
599 def NAME#rm : SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
600 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
601 [(set RC:$dst, (mem_pat addr:$src))],
602 IIC_SSE_MOV_S_RM, d>, Sched<[WriteLoad]>;
605 defm MOVSS : sse12_move<FR32, X86Movss, v4f32, f32mem, "movss",
606 SSEPackedSingle>, XS;
607 defm MOVSD : sse12_move<FR64, X86Movsd, v2f64, f64mem, "movsd",
608 SSEPackedDouble>, XD;
610 let canFoldAsLoad = 1, isReMaterializable = 1 in {
611 defm MOVSS : sse12_move_rm<FR32, f32mem, loadf32, "movss",
612 SSEPackedSingle>, XS;
614 let AddedComplexity = 20 in
615 defm MOVSD : sse12_move_rm<FR64, f64mem, loadf64, "movsd",
616 SSEPackedDouble>, XD;
620 let Predicates = [UseAVX] in {
621 let AddedComplexity = 20 in {
622 // MOVSSrm zeros the high parts of the register; represent this
623 // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
624 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
625 (COPY_TO_REGCLASS (VMOVSSrm addr:$src), VR128)>;
626 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
627 (COPY_TO_REGCLASS (VMOVSSrm addr:$src), VR128)>;
628 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
629 (COPY_TO_REGCLASS (VMOVSSrm addr:$src), VR128)>;
631 // MOVSDrm zeros the high parts of the register; represent this
632 // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
633 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
634 (COPY_TO_REGCLASS (VMOVSDrm addr:$src), VR128)>;
635 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
636 (COPY_TO_REGCLASS (VMOVSDrm addr:$src), VR128)>;
637 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
638 (COPY_TO_REGCLASS (VMOVSDrm addr:$src), VR128)>;
639 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
640 (COPY_TO_REGCLASS (VMOVSDrm addr:$src), VR128)>;
641 def : Pat<(v2f64 (X86vzload addr:$src)),
642 (COPY_TO_REGCLASS (VMOVSDrm addr:$src), VR128)>;
644 // Represent the same patterns above but in the form they appear for
646 def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
647 (v4f32 (scalar_to_vector (loadf32 addr:$src))), (iPTR 0)))),
648 (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_xmm)>;
649 def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,
650 (v2f64 (scalar_to_vector (loadf64 addr:$src))), (iPTR 0)))),
651 (SUBREG_TO_REG (i32 0), (VMOVSDrm addr:$src), sub_xmm)>;
654 // Extract and store.
655 def : Pat<(store (f32 (extractelt (v4f32 VR128:$src), (iPTR 0))),
657 (VMOVSSmr addr:$dst, (COPY_TO_REGCLASS (v4f32 VR128:$src), FR32))>;
658 def : Pat<(store (f64 (extractelt (v2f64 VR128:$src), (iPTR 0))),
660 (VMOVSDmr addr:$dst, (COPY_TO_REGCLASS (v2f64 VR128:$src), FR64))>;
662 // Shuffle with VMOVSS
663 def : Pat<(v4i32 (X86Movss VR128:$src1, VR128:$src2)),
664 (VMOVSSrr (v4i32 VR128:$src1),
665 (COPY_TO_REGCLASS (v4i32 VR128:$src2), FR32))>;
666 def : Pat<(v4f32 (X86Movss VR128:$src1, VR128:$src2)),
667 (VMOVSSrr (v4f32 VR128:$src1),
668 (COPY_TO_REGCLASS (v4f32 VR128:$src2), FR32))>;
671 def : Pat<(v8i32 (X86Movss VR256:$src1, VR256:$src2)),
672 (SUBREG_TO_REG (i32 0),
673 (VMOVSSrr (EXTRACT_SUBREG (v8i32 VR256:$src1), sub_xmm),
674 (EXTRACT_SUBREG (v8i32 VR256:$src2), sub_xmm)),
676 def : Pat<(v8f32 (X86Movss VR256:$src1, VR256:$src2)),
677 (SUBREG_TO_REG (i32 0),
678 (VMOVSSrr (EXTRACT_SUBREG (v8f32 VR256:$src1), sub_xmm),
679 (EXTRACT_SUBREG (v8f32 VR256:$src2), sub_xmm)),
682 // Shuffle with VMOVSD
683 def : Pat<(v2i64 (X86Movsd VR128:$src1, VR128:$src2)),
684 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
685 def : Pat<(v2f64 (X86Movsd VR128:$src1, VR128:$src2)),
686 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
687 def : Pat<(v4f32 (X86Movsd VR128:$src1, VR128:$src2)),
688 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
689 def : Pat<(v4i32 (X86Movsd VR128:$src1, VR128:$src2)),
690 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
693 def : Pat<(v4i64 (X86Movsd VR256:$src1, VR256:$src2)),
694 (SUBREG_TO_REG (i32 0),
695 (VMOVSDrr (EXTRACT_SUBREG (v4i64 VR256:$src1), sub_xmm),
696 (EXTRACT_SUBREG (v4i64 VR256:$src2), sub_xmm)),
698 def : Pat<(v4f64 (X86Movsd VR256:$src1, VR256:$src2)),
699 (SUBREG_TO_REG (i32 0),
700 (VMOVSDrr (EXTRACT_SUBREG (v4f64 VR256:$src1), sub_xmm),
701 (EXTRACT_SUBREG (v4f64 VR256:$src2), sub_xmm)),
704 // FIXME: Instead of a X86Movlps there should be a X86Movsd here, the problem
705 // is during lowering, where it's not possible to recognize the fold cause
706 // it has two uses through a bitcast. One use disappears at isel time and the
707 // fold opportunity reappears.
708 def : Pat<(v2f64 (X86Movlpd VR128:$src1, VR128:$src2)),
709 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
710 def : Pat<(v2i64 (X86Movlpd VR128:$src1, VR128:$src2)),
711 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
712 def : Pat<(v4f32 (X86Movlps VR128:$src1, VR128:$src2)),
713 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
714 def : Pat<(v4i32 (X86Movlps VR128:$src1, VR128:$src2)),
715 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
718 let Predicates = [UseSSE1] in {
719 let Predicates = [NoSSE41], AddedComplexity = 15 in {
720 // Move scalar to XMM zero-extended, zeroing a VR128 then do a
721 // MOVSS to the lower bits.
722 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),
723 (MOVSSrr (v4f32 (V_SET0)), FR32:$src)>;
724 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
725 (MOVSSrr (v4f32 (V_SET0)), (COPY_TO_REGCLASS VR128:$src, FR32))>;
726 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
727 (MOVSSrr (v4i32 (V_SET0)), (COPY_TO_REGCLASS VR128:$src, FR32))>;
730 let AddedComplexity = 20 in {
731 // MOVSSrm already zeros the high parts of the register.
732 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
733 (COPY_TO_REGCLASS (MOVSSrm addr:$src), VR128)>;
734 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
735 (COPY_TO_REGCLASS (MOVSSrm addr:$src), VR128)>;
736 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
737 (COPY_TO_REGCLASS (MOVSSrm addr:$src), VR128)>;
740 // Extract and store.
741 def : Pat<(store (f32 (extractelt (v4f32 VR128:$src), (iPTR 0))),
743 (MOVSSmr addr:$dst, (COPY_TO_REGCLASS VR128:$src, FR32))>;
745 // Shuffle with MOVSS
746 def : Pat<(v4i32 (X86Movss VR128:$src1, VR128:$src2)),
747 (MOVSSrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR32))>;
748 def : Pat<(v4f32 (X86Movss VR128:$src1, VR128:$src2)),
749 (MOVSSrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR32))>;
752 let Predicates = [UseSSE2] in {
753 let Predicates = [NoSSE41], AddedComplexity = 15 in {
754 // Move scalar to XMM zero-extended, zeroing a VR128 then do a
755 // MOVSD to the lower bits.
756 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))),
757 (MOVSDrr (v2f64 (V_SET0)), FR64:$src)>;
760 let AddedComplexity = 20 in {
761 // MOVSDrm already zeros the high parts of the register.
762 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
763 (COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
764 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
765 (COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
766 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
767 (COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
768 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
769 (COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
770 def : Pat<(v2f64 (X86vzload addr:$src)),
771 (COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
774 // Extract and store.
775 def : Pat<(store (f64 (extractelt (v2f64 VR128:$src), (iPTR 0))),
777 (MOVSDmr addr:$dst, (COPY_TO_REGCLASS VR128:$src, FR64))>;
779 // Shuffle with MOVSD
780 def : Pat<(v2i64 (X86Movsd VR128:$src1, VR128:$src2)),
781 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
782 def : Pat<(v2f64 (X86Movsd VR128:$src1, VR128:$src2)),
783 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
784 def : Pat<(v4f32 (X86Movsd VR128:$src1, VR128:$src2)),
785 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
786 def : Pat<(v4i32 (X86Movsd VR128:$src1, VR128:$src2)),
787 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
789 // FIXME: Instead of a X86Movlps there should be a X86Movsd here, the problem
790 // is during lowering, where it's not possible to recognize the fold because
791 // it has two uses through a bitcast. One use disappears at isel time and the
792 // fold opportunity reappears.
793 def : Pat<(v2f64 (X86Movlpd VR128:$src1, VR128:$src2)),
794 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
795 def : Pat<(v2i64 (X86Movlpd VR128:$src1, VR128:$src2)),
796 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
797 def : Pat<(v4f32 (X86Movlps VR128:$src1, VR128:$src2)),
798 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
799 def : Pat<(v4i32 (X86Movlps VR128:$src1, VR128:$src2)),
800 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
803 //===----------------------------------------------------------------------===//
804 // SSE 1 & 2 - Move Aligned/Unaligned FP Instructions
805 //===----------------------------------------------------------------------===//
807 multiclass sse12_mov_packed<bits<8> opc, RegisterClass RC,
808 X86MemOperand x86memop, PatFrag ld_frag,
809 string asm, Domain d,
811 bit IsReMaterializable = 1> {
812 let hasSideEffects = 0 in
813 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
814 !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], itins.rr, d>,
815 Sched<[WriteFShuffle]>;
816 let canFoldAsLoad = 1, isReMaterializable = IsReMaterializable in
817 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
818 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
819 [(set RC:$dst, (ld_frag addr:$src))], itins.rm, d>,
823 let Predicates = [HasAVX, NoVLX] in {
824 defm VMOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
825 "movaps", SSEPackedSingle, SSE_MOVA_ITINS>,
827 defm VMOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
828 "movapd", SSEPackedDouble, SSE_MOVA_ITINS>,
830 defm VMOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
831 "movups", SSEPackedSingle, SSE_MOVU_ITINS>,
833 defm VMOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
834 "movupd", SSEPackedDouble, SSE_MOVU_ITINS, 0>,
837 defm VMOVAPSY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv8f32,
838 "movaps", SSEPackedSingle, SSE_MOVA_ITINS>,
840 defm VMOVAPDY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv4f64,
841 "movapd", SSEPackedDouble, SSE_MOVA_ITINS>,
843 defm VMOVUPSY : sse12_mov_packed<0x10, VR256, f256mem, loadv8f32,
844 "movups", SSEPackedSingle, SSE_MOVU_ITINS>,
846 defm VMOVUPDY : sse12_mov_packed<0x10, VR256, f256mem, loadv4f64,
847 "movupd", SSEPackedDouble, SSE_MOVU_ITINS, 0>,
851 let Predicates = [UseSSE1] in {
852 defm MOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
853 "movaps", SSEPackedSingle, SSE_MOVA_ITINS>,
855 defm MOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
856 "movups", SSEPackedSingle, SSE_MOVU_ITINS>,
859 let Predicates = [UseSSE2] in {
860 defm MOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
861 "movapd", SSEPackedDouble, SSE_MOVA_ITINS>,
863 defm MOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
864 "movupd", SSEPackedDouble, SSE_MOVU_ITINS, 0>,
868 let SchedRW = [WriteStore], Predicates = [HasAVX, NoVLX] in {
869 def VMOVAPSmr : VPSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
870 "movaps\t{$src, $dst|$dst, $src}",
871 [(alignedstore (v4f32 VR128:$src), addr:$dst)],
872 IIC_SSE_MOVA_P_MR>, VEX;
873 def VMOVAPDmr : VPDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
874 "movapd\t{$src, $dst|$dst, $src}",
875 [(alignedstore (v2f64 VR128:$src), addr:$dst)],
876 IIC_SSE_MOVA_P_MR>, VEX;
877 def VMOVUPSmr : VPSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
878 "movups\t{$src, $dst|$dst, $src}",
879 [(store (v4f32 VR128:$src), addr:$dst)],
880 IIC_SSE_MOVU_P_MR>, VEX;
881 def VMOVUPDmr : VPDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
882 "movupd\t{$src, $dst|$dst, $src}",
883 [(store (v2f64 VR128:$src), addr:$dst)],
884 IIC_SSE_MOVU_P_MR>, VEX;
885 def VMOVAPSYmr : VPSI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
886 "movaps\t{$src, $dst|$dst, $src}",
887 [(alignedstore256 (v8f32 VR256:$src), addr:$dst)],
888 IIC_SSE_MOVA_P_MR>, VEX, VEX_L;
889 def VMOVAPDYmr : VPDI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
890 "movapd\t{$src, $dst|$dst, $src}",
891 [(alignedstore256 (v4f64 VR256:$src), addr:$dst)],
892 IIC_SSE_MOVA_P_MR>, VEX, VEX_L;
893 def VMOVUPSYmr : VPSI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
894 "movups\t{$src, $dst|$dst, $src}",
895 [(store (v8f32 VR256:$src), addr:$dst)],
896 IIC_SSE_MOVU_P_MR>, VEX, VEX_L;
897 def VMOVUPDYmr : VPDI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
898 "movupd\t{$src, $dst|$dst, $src}",
899 [(store (v4f64 VR256:$src), addr:$dst)],
900 IIC_SSE_MOVU_P_MR>, VEX, VEX_L;
904 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
905 SchedRW = [WriteFShuffle] in {
906 def VMOVAPSrr_REV : VPSI<0x29, MRMDestReg, (outs VR128:$dst),
908 "movaps\t{$src, $dst|$dst, $src}", [],
909 IIC_SSE_MOVA_P_RR>, VEX;
910 def VMOVAPDrr_REV : VPDI<0x29, MRMDestReg, (outs VR128:$dst),
912 "movapd\t{$src, $dst|$dst, $src}", [],
913 IIC_SSE_MOVA_P_RR>, VEX;
914 def VMOVUPSrr_REV : VPSI<0x11, MRMDestReg, (outs VR128:$dst),
916 "movups\t{$src, $dst|$dst, $src}", [],
917 IIC_SSE_MOVU_P_RR>, VEX;
918 def VMOVUPDrr_REV : VPDI<0x11, MRMDestReg, (outs VR128:$dst),
920 "movupd\t{$src, $dst|$dst, $src}", [],
921 IIC_SSE_MOVU_P_RR>, VEX;
922 def VMOVAPSYrr_REV : VPSI<0x29, MRMDestReg, (outs VR256:$dst),
924 "movaps\t{$src, $dst|$dst, $src}", [],
925 IIC_SSE_MOVA_P_RR>, VEX, VEX_L;
926 def VMOVAPDYrr_REV : VPDI<0x29, MRMDestReg, (outs VR256:$dst),
928 "movapd\t{$src, $dst|$dst, $src}", [],
929 IIC_SSE_MOVA_P_RR>, VEX, VEX_L;
930 def VMOVUPSYrr_REV : VPSI<0x11, MRMDestReg, (outs VR256:$dst),
932 "movups\t{$src, $dst|$dst, $src}", [],
933 IIC_SSE_MOVU_P_RR>, VEX, VEX_L;
934 def VMOVUPDYrr_REV : VPDI<0x11, MRMDestReg, (outs VR256:$dst),
936 "movupd\t{$src, $dst|$dst, $src}", [],
937 IIC_SSE_MOVU_P_RR>, VEX, VEX_L;
940 def : Pat<(int_x86_avx_storeu_ps_256 addr:$dst, VR256:$src),
941 (VMOVUPSYmr addr:$dst, VR256:$src)>;
942 def : Pat<(int_x86_avx_storeu_pd_256 addr:$dst, VR256:$src),
943 (VMOVUPDYmr addr:$dst, VR256:$src)>;
945 let SchedRW = [WriteStore] in {
946 def MOVAPSmr : PSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
947 "movaps\t{$src, $dst|$dst, $src}",
948 [(alignedstore (v4f32 VR128:$src), addr:$dst)],
950 def MOVAPDmr : PDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
951 "movapd\t{$src, $dst|$dst, $src}",
952 [(alignedstore (v2f64 VR128:$src), addr:$dst)],
954 def MOVUPSmr : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
955 "movups\t{$src, $dst|$dst, $src}",
956 [(store (v4f32 VR128:$src), addr:$dst)],
958 def MOVUPDmr : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
959 "movupd\t{$src, $dst|$dst, $src}",
960 [(store (v2f64 VR128:$src), addr:$dst)],
965 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
966 SchedRW = [WriteFShuffle] in {
967 def MOVAPSrr_REV : PSI<0x29, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
968 "movaps\t{$src, $dst|$dst, $src}", [],
970 def MOVAPDrr_REV : PDI<0x29, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
971 "movapd\t{$src, $dst|$dst, $src}", [],
973 def MOVUPSrr_REV : PSI<0x11, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
974 "movups\t{$src, $dst|$dst, $src}", [],
976 def MOVUPDrr_REV : PDI<0x11, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
977 "movupd\t{$src, $dst|$dst, $src}", [],
981 let Predicates = [HasAVX] in {
982 def : Pat<(int_x86_sse_storeu_ps addr:$dst, VR128:$src),
983 (VMOVUPSmr addr:$dst, VR128:$src)>;
984 def : Pat<(int_x86_sse2_storeu_pd addr:$dst, VR128:$src),
985 (VMOVUPDmr addr:$dst, VR128:$src)>;
988 let Predicates = [UseSSE1] in
989 def : Pat<(int_x86_sse_storeu_ps addr:$dst, VR128:$src),
990 (MOVUPSmr addr:$dst, VR128:$src)>;
991 let Predicates = [UseSSE2] in
992 def : Pat<(int_x86_sse2_storeu_pd addr:$dst, VR128:$src),
993 (MOVUPDmr addr:$dst, VR128:$src)>;
995 // Use vmovaps/vmovups for AVX integer load/store.
996 let Predicates = [HasAVX, NoVLX] in {
997 // 128-bit load/store
998 def : Pat<(alignedloadv2i64 addr:$src),
999 (VMOVAPSrm addr:$src)>;
1000 def : Pat<(loadv2i64 addr:$src),
1001 (VMOVUPSrm addr:$src)>;
1003 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
1004 (VMOVAPSmr addr:$dst, VR128:$src)>;
1005 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
1006 (VMOVAPSmr addr:$dst, VR128:$src)>;
1007 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
1008 (VMOVAPSmr addr:$dst, VR128:$src)>;
1009 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
1010 (VMOVAPSmr addr:$dst, VR128:$src)>;
1011 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
1012 (VMOVUPSmr addr:$dst, VR128:$src)>;
1013 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
1014 (VMOVUPSmr addr:$dst, VR128:$src)>;
1015 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
1016 (VMOVUPSmr addr:$dst, VR128:$src)>;
1017 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
1018 (VMOVUPSmr addr:$dst, VR128:$src)>;
1020 // 256-bit load/store
1021 def : Pat<(alignedloadv4i64 addr:$src),
1022 (VMOVAPSYrm addr:$src)>;
1023 def : Pat<(loadv4i64 addr:$src),
1024 (VMOVUPSYrm addr:$src)>;
1025 def : Pat<(alignedstore256 (v4i64 VR256:$src), addr:$dst),
1026 (VMOVAPSYmr addr:$dst, VR256:$src)>;
1027 def : Pat<(alignedstore256 (v8i32 VR256:$src), addr:$dst),
1028 (VMOVAPSYmr addr:$dst, VR256:$src)>;
1029 def : Pat<(alignedstore256 (v16i16 VR256:$src), addr:$dst),
1030 (VMOVAPSYmr addr:$dst, VR256:$src)>;
1031 def : Pat<(alignedstore256 (v32i8 VR256:$src), addr:$dst),
1032 (VMOVAPSYmr addr:$dst, VR256:$src)>;
1033 def : Pat<(store (v4i64 VR256:$src), addr:$dst),
1034 (VMOVUPSYmr addr:$dst, VR256:$src)>;
1035 def : Pat<(store (v8i32 VR256:$src), addr:$dst),
1036 (VMOVUPSYmr addr:$dst, VR256:$src)>;
1037 def : Pat<(store (v16i16 VR256:$src), addr:$dst),
1038 (VMOVUPSYmr addr:$dst, VR256:$src)>;
1039 def : Pat<(store (v32i8 VR256:$src), addr:$dst),
1040 (VMOVUPSYmr addr:$dst, VR256:$src)>;
1042 // Special patterns for storing subvector extracts of lower 128-bits
1043 // Its cheaper to just use VMOVAPS/VMOVUPS instead of VEXTRACTF128mr
1044 def : Pat<(alignedstore (v2f64 (extract_subvector
1045 (v4f64 VR256:$src), (iPTR 0))), addr:$dst),
1046 (VMOVAPDmr addr:$dst, (v2f64 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1047 def : Pat<(alignedstore (v4f32 (extract_subvector
1048 (v8f32 VR256:$src), (iPTR 0))), addr:$dst),
1049 (VMOVAPSmr addr:$dst, (v4f32 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1050 def : Pat<(alignedstore (v2i64 (extract_subvector
1051 (v4i64 VR256:$src), (iPTR 0))), addr:$dst),
1052 (VMOVAPDmr addr:$dst, (v2i64 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1053 def : Pat<(alignedstore (v4i32 (extract_subvector
1054 (v8i32 VR256:$src), (iPTR 0))), addr:$dst),
1055 (VMOVAPSmr addr:$dst, (v4i32 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1056 def : Pat<(alignedstore (v8i16 (extract_subvector
1057 (v16i16 VR256:$src), (iPTR 0))), addr:$dst),
1058 (VMOVAPSmr addr:$dst, (v8i16 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1059 def : Pat<(alignedstore (v16i8 (extract_subvector
1060 (v32i8 VR256:$src), (iPTR 0))), addr:$dst),
1061 (VMOVAPSmr addr:$dst, (v16i8 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1063 def : Pat<(store (v2f64 (extract_subvector
1064 (v4f64 VR256:$src), (iPTR 0))), addr:$dst),
1065 (VMOVUPDmr addr:$dst, (v2f64 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1066 def : Pat<(store (v4f32 (extract_subvector
1067 (v8f32 VR256:$src), (iPTR 0))), addr:$dst),
1068 (VMOVUPSmr addr:$dst, (v4f32 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1069 def : Pat<(store (v2i64 (extract_subvector
1070 (v4i64 VR256:$src), (iPTR 0))), addr:$dst),
1071 (VMOVUPDmr addr:$dst, (v2i64 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1072 def : Pat<(store (v4i32 (extract_subvector
1073 (v8i32 VR256:$src), (iPTR 0))), addr:$dst),
1074 (VMOVUPSmr addr:$dst, (v4i32 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1075 def : Pat<(store (v8i16 (extract_subvector
1076 (v16i16 VR256:$src), (iPTR 0))), addr:$dst),
1077 (VMOVUPSmr addr:$dst, (v8i16 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1078 def : Pat<(store (v16i8 (extract_subvector
1079 (v32i8 VR256:$src), (iPTR 0))), addr:$dst),
1080 (VMOVUPSmr addr:$dst, (v16i8 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1083 // Use movaps / movups for SSE integer load / store (one byte shorter).
1084 // The instructions selected below are then converted to MOVDQA/MOVDQU
1085 // during the SSE domain pass.
1086 let Predicates = [UseSSE1] in {
1087 def : Pat<(alignedloadv2i64 addr:$src),
1088 (MOVAPSrm addr:$src)>;
1089 def : Pat<(loadv2i64 addr:$src),
1090 (MOVUPSrm addr:$src)>;
1092 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
1093 (MOVAPSmr addr:$dst, VR128:$src)>;
1094 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
1095 (MOVAPSmr addr:$dst, VR128:$src)>;
1096 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
1097 (MOVAPSmr addr:$dst, VR128:$src)>;
1098 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
1099 (MOVAPSmr addr:$dst, VR128:$src)>;
1100 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
1101 (MOVUPSmr addr:$dst, VR128:$src)>;
1102 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
1103 (MOVUPSmr addr:$dst, VR128:$src)>;
1104 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
1105 (MOVUPSmr addr:$dst, VR128:$src)>;
1106 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
1107 (MOVUPSmr addr:$dst, VR128:$src)>;
1110 // Alias instruction to load FR32 or FR64 from f128mem using movaps. Upper
1111 // bits are disregarded. FIXME: Set encoding to pseudo!
1112 let canFoldAsLoad = 1, isReMaterializable = 1, SchedRW = [WriteLoad] in {
1113 let isCodeGenOnly = 1 in {
1114 def FsVMOVAPSrm : VPSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
1115 "movaps\t{$src, $dst|$dst, $src}",
1116 [(set FR32:$dst, (alignedloadfsf32 addr:$src))],
1117 IIC_SSE_MOVA_P_RM>, VEX;
1118 def FsVMOVAPDrm : VPDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
1119 "movapd\t{$src, $dst|$dst, $src}",
1120 [(set FR64:$dst, (alignedloadfsf64 addr:$src))],
1121 IIC_SSE_MOVA_P_RM>, VEX;
1122 def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
1123 "movaps\t{$src, $dst|$dst, $src}",
1124 [(set FR32:$dst, (alignedloadfsf32 addr:$src))],
1126 def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
1127 "movapd\t{$src, $dst|$dst, $src}",
1128 [(set FR64:$dst, (alignedloadfsf64 addr:$src))],
1133 //===----------------------------------------------------------------------===//
1134 // SSE 1 & 2 - Move Low packed FP Instructions
1135 //===----------------------------------------------------------------------===//
1137 multiclass sse12_mov_hilo_packed_base<bits<8>opc, SDNode psnode, SDNode pdnode,
1138 string base_opc, string asm_opr,
1139 InstrItinClass itin> {
1140 def PSrm : PI<opc, MRMSrcMem,
1141 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
1142 !strconcat(base_opc, "s", asm_opr),
1144 (psnode VR128:$src1,
1145 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))))],
1146 itin, SSEPackedSingle>, PS,
1147 Sched<[WriteFShuffleLd, ReadAfterLd]>;
1149 def PDrm : PI<opc, MRMSrcMem,
1150 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
1151 !strconcat(base_opc, "d", asm_opr),
1152 [(set VR128:$dst, (v2f64 (pdnode VR128:$src1,
1153 (scalar_to_vector (loadf64 addr:$src2)))))],
1154 itin, SSEPackedDouble>, PD,
1155 Sched<[WriteFShuffleLd, ReadAfterLd]>;
1159 multiclass sse12_mov_hilo_packed<bits<8>opc, SDNode psnode, SDNode pdnode,
1160 string base_opc, InstrItinClass itin> {
1161 let Predicates = [UseAVX] in
1162 defm V#NAME : sse12_mov_hilo_packed_base<opc, psnode, pdnode, base_opc,
1163 "\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1166 let Constraints = "$src1 = $dst" in
1167 defm NAME : sse12_mov_hilo_packed_base<opc, psnode, pdnode, base_opc,
1168 "\t{$src2, $dst|$dst, $src2}",
1172 let AddedComplexity = 20 in {
1173 defm MOVL : sse12_mov_hilo_packed<0x12, X86Movlps, X86Movlpd, "movlp",
1177 let SchedRW = [WriteStore] in {
1178 let Predicates = [UseAVX] in {
1179 def VMOVLPSmr : VPSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1180 "movlps\t{$src, $dst|$dst, $src}",
1181 [(store (f64 (extractelt (bc_v2f64 (v4f32 VR128:$src)),
1182 (iPTR 0))), addr:$dst)],
1183 IIC_SSE_MOV_LH>, VEX;
1184 def VMOVLPDmr : VPDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1185 "movlpd\t{$src, $dst|$dst, $src}",
1186 [(store (f64 (extractelt (v2f64 VR128:$src),
1187 (iPTR 0))), addr:$dst)],
1188 IIC_SSE_MOV_LH>, VEX;
1190 def MOVLPSmr : PSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1191 "movlps\t{$src, $dst|$dst, $src}",
1192 [(store (f64 (extractelt (bc_v2f64 (v4f32 VR128:$src)),
1193 (iPTR 0))), addr:$dst)],
1195 def MOVLPDmr : PDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1196 "movlpd\t{$src, $dst|$dst, $src}",
1197 [(store (f64 (extractelt (v2f64 VR128:$src),
1198 (iPTR 0))), addr:$dst)],
1202 let Predicates = [UseAVX] in {
1203 // Shuffle with VMOVLPS
1204 def : Pat<(v4f32 (X86Movlps VR128:$src1, (load addr:$src2))),
1205 (VMOVLPSrm VR128:$src1, addr:$src2)>;
1206 def : Pat<(v4i32 (X86Movlps VR128:$src1, (load addr:$src2))),
1207 (VMOVLPSrm VR128:$src1, addr:$src2)>;
1209 // Shuffle with VMOVLPD
1210 def : Pat<(v2f64 (X86Movlpd VR128:$src1, (load addr:$src2))),
1211 (VMOVLPDrm VR128:$src1, addr:$src2)>;
1212 def : Pat<(v2i64 (X86Movlpd VR128:$src1, (load addr:$src2))),
1213 (VMOVLPDrm VR128:$src1, addr:$src2)>;
1214 def : Pat<(v2f64 (X86Movsd VR128:$src1,
1215 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
1216 (VMOVLPDrm VR128:$src1, addr:$src2)>;
1219 def : Pat<(store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)),
1221 (VMOVLPSmr addr:$src1, VR128:$src2)>;
1222 def : Pat<(store (v4i32 (X86Movlps
1223 (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)), addr:$src1),
1224 (VMOVLPSmr addr:$src1, VR128:$src2)>;
1225 def : Pat<(store (v2f64 (X86Movlpd (load addr:$src1), VR128:$src2)),
1227 (VMOVLPDmr addr:$src1, VR128:$src2)>;
1228 def : Pat<(store (v2i64 (X86Movlpd (load addr:$src1), VR128:$src2)),
1230 (VMOVLPDmr addr:$src1, VR128:$src2)>;
1233 let Predicates = [UseSSE1] in {
1234 // (store (vector_shuffle (load addr), v2, <4, 5, 2, 3>), addr) using MOVLPS
1235 def : Pat<(store (i64 (extractelt (bc_v2i64 (v4f32 VR128:$src2)),
1236 (iPTR 0))), addr:$src1),
1237 (MOVLPSmr addr:$src1, VR128:$src2)>;
1239 // Shuffle with MOVLPS
1240 def : Pat<(v4f32 (X86Movlps VR128:$src1, (load addr:$src2))),
1241 (MOVLPSrm VR128:$src1, addr:$src2)>;
1242 def : Pat<(v4i32 (X86Movlps VR128:$src1, (load addr:$src2))),
1243 (MOVLPSrm VR128:$src1, addr:$src2)>;
1244 def : Pat<(X86Movlps VR128:$src1,
1245 (bc_v4f32 (v2i64 (scalar_to_vector (loadi64 addr:$src2))))),
1246 (MOVLPSrm VR128:$src1, addr:$src2)>;
1249 def : Pat<(store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)),
1251 (MOVLPSmr addr:$src1, VR128:$src2)>;
1252 def : Pat<(store (v4i32 (X86Movlps
1253 (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)),
1255 (MOVLPSmr addr:$src1, VR128:$src2)>;
1258 let Predicates = [UseSSE2] in {
1259 // Shuffle with MOVLPD
1260 def : Pat<(v2f64 (X86Movlpd VR128:$src1, (load addr:$src2))),
1261 (MOVLPDrm VR128:$src1, addr:$src2)>;
1262 def : Pat<(v2i64 (X86Movlpd VR128:$src1, (load addr:$src2))),
1263 (MOVLPDrm VR128:$src1, addr:$src2)>;
1264 def : Pat<(v2f64 (X86Movsd VR128:$src1,
1265 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
1266 (MOVLPDrm VR128:$src1, addr:$src2)>;
1269 def : Pat<(store (v2f64 (X86Movlpd (load addr:$src1), VR128:$src2)),
1271 (MOVLPDmr addr:$src1, VR128:$src2)>;
1272 def : Pat<(store (v2i64 (X86Movlpd (load addr:$src1), VR128:$src2)),
1274 (MOVLPDmr addr:$src1, VR128:$src2)>;
1277 //===----------------------------------------------------------------------===//
1278 // SSE 1 & 2 - Move Hi packed FP Instructions
1279 //===----------------------------------------------------------------------===//
1281 let AddedComplexity = 20 in {
1282 defm MOVH : sse12_mov_hilo_packed<0x16, X86Movlhps, X86Movlhpd, "movhp",
1286 let SchedRW = [WriteStore] in {
1287 // v2f64 extract element 1 is always custom lowered to unpack high to low
1288 // and extract element 0 so the non-store version isn't too horrible.
1289 let Predicates = [UseAVX] in {
1290 def VMOVHPSmr : VPSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1291 "movhps\t{$src, $dst|$dst, $src}",
1292 [(store (f64 (extractelt
1293 (X86Unpckh (bc_v2f64 (v4f32 VR128:$src)),
1294 (bc_v2f64 (v4f32 VR128:$src))),
1295 (iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>, VEX;
1296 def VMOVHPDmr : VPDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1297 "movhpd\t{$src, $dst|$dst, $src}",
1298 [(store (f64 (extractelt
1299 (v2f64 (X86Unpckh VR128:$src, VR128:$src)),
1300 (iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>, VEX;
1302 def MOVHPSmr : PSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1303 "movhps\t{$src, $dst|$dst, $src}",
1304 [(store (f64 (extractelt
1305 (X86Unpckh (bc_v2f64 (v4f32 VR128:$src)),
1306 (bc_v2f64 (v4f32 VR128:$src))),
1307 (iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>;
1308 def MOVHPDmr : PDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1309 "movhpd\t{$src, $dst|$dst, $src}",
1310 [(store (f64 (extractelt
1311 (v2f64 (X86Unpckh VR128:$src, VR128:$src)),
1312 (iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>;
1315 let Predicates = [UseAVX] in {
1317 def : Pat<(X86Movlhps VR128:$src1,
1318 (bc_v4f32 (v2i64 (scalar_to_vector (loadi64 addr:$src2))))),
1319 (VMOVHPSrm VR128:$src1, addr:$src2)>;
1320 def : Pat<(X86Movlhps VR128:$src1,
1321 (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
1322 (VMOVHPSrm VR128:$src1, addr:$src2)>;
1326 // FIXME: Instead of X86Unpckl, there should be a X86Movlhpd here, the problem
1327 // is during lowering, where it's not possible to recognize the load fold
1328 // cause it has two uses through a bitcast. One use disappears at isel time
1329 // and the fold opportunity reappears.
1330 def : Pat<(v2f64 (X86Unpckl VR128:$src1,
1331 (scalar_to_vector (loadf64 addr:$src2)))),
1332 (VMOVHPDrm VR128:$src1, addr:$src2)>;
1333 // Also handle an i64 load because that may get selected as a faster way to
1335 def : Pat<(v2f64 (X86Unpckl VR128:$src1,
1336 (bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src2)))))),
1337 (VMOVHPDrm VR128:$src1, addr:$src2)>;
1339 def : Pat<(store (f64 (extractelt
1340 (v2f64 (X86VPermilpi VR128:$src, (i8 1))),
1341 (iPTR 0))), addr:$dst),
1342 (VMOVHPDmr addr:$dst, VR128:$src)>;
1345 let Predicates = [UseSSE1] in {
1347 def : Pat<(X86Movlhps VR128:$src1,
1348 (bc_v4f32 (v2i64 (scalar_to_vector (loadi64 addr:$src2))))),
1349 (MOVHPSrm VR128:$src1, addr:$src2)>;
1350 def : Pat<(X86Movlhps VR128:$src1,
1351 (bc_v4f32 (v2i64 (X86vzload addr:$src2)))),
1352 (MOVHPSrm VR128:$src1, addr:$src2)>;
1355 let Predicates = [UseSSE2] in {
1358 // FIXME: Instead of X86Unpckl, there should be a X86Movlhpd here, the problem
1359 // is during lowering, where it's not possible to recognize the load fold
1360 // cause it has two uses through a bitcast. One use disappears at isel time
1361 // and the fold opportunity reappears.
1362 def : Pat<(v2f64 (X86Unpckl VR128:$src1,
1363 (scalar_to_vector (loadf64 addr:$src2)))),
1364 (MOVHPDrm VR128:$src1, addr:$src2)>;
1365 // Also handle an i64 load because that may get selected as a faster way to
1367 def : Pat<(v2f64 (X86Unpckl VR128:$src1,
1368 (bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src2)))))),
1369 (MOVHPDrm VR128:$src1, addr:$src2)>;
1371 def : Pat<(store (f64 (extractelt
1372 (v2f64 (X86Shufp VR128:$src, VR128:$src, (i8 1))),
1373 (iPTR 0))), addr:$dst),
1374 (MOVHPDmr addr:$dst, VR128:$src)>;
1377 //===----------------------------------------------------------------------===//
1378 // SSE 1 & 2 - Move Low to High and High to Low packed FP Instructions
1379 //===----------------------------------------------------------------------===//
1381 let AddedComplexity = 20, Predicates = [UseAVX] in {
1382 def VMOVLHPSrr : VPSI<0x16, MRMSrcReg, (outs VR128:$dst),
1383 (ins VR128:$src1, VR128:$src2),
1384 "movlhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1386 (v4f32 (X86Movlhps VR128:$src1, VR128:$src2)))],
1388 VEX_4V, Sched<[WriteFShuffle]>;
1389 def VMOVHLPSrr : VPSI<0x12, MRMSrcReg, (outs VR128:$dst),
1390 (ins VR128:$src1, VR128:$src2),
1391 "movhlps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1393 (v4f32 (X86Movhlps VR128:$src1, VR128:$src2)))],
1395 VEX_4V, Sched<[WriteFShuffle]>;
1397 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
1398 def MOVLHPSrr : PSI<0x16, MRMSrcReg, (outs VR128:$dst),
1399 (ins VR128:$src1, VR128:$src2),
1400 "movlhps\t{$src2, $dst|$dst, $src2}",
1402 (v4f32 (X86Movlhps VR128:$src1, VR128:$src2)))],
1403 IIC_SSE_MOV_LH>, Sched<[WriteFShuffle]>;
1404 def MOVHLPSrr : PSI<0x12, MRMSrcReg, (outs VR128:$dst),
1405 (ins VR128:$src1, VR128:$src2),
1406 "movhlps\t{$src2, $dst|$dst, $src2}",
1408 (v4f32 (X86Movhlps VR128:$src1, VR128:$src2)))],
1409 IIC_SSE_MOV_LH>, Sched<[WriteFShuffle]>;
1412 let Predicates = [UseAVX] in {
1414 def : Pat<(v4i32 (X86Movlhps VR128:$src1, VR128:$src2)),
1415 (VMOVLHPSrr VR128:$src1, VR128:$src2)>;
1416 def : Pat<(v2i64 (X86Movlhps VR128:$src1, VR128:$src2)),
1417 (VMOVLHPSrr (v2i64 VR128:$src1), VR128:$src2)>;
1420 def : Pat<(v4i32 (X86Movhlps VR128:$src1, VR128:$src2)),
1421 (VMOVHLPSrr VR128:$src1, VR128:$src2)>;
1424 let Predicates = [UseSSE1] in {
1426 def : Pat<(v4i32 (X86Movlhps VR128:$src1, VR128:$src2)),
1427 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
1428 def : Pat<(v2i64 (X86Movlhps VR128:$src1, VR128:$src2)),
1429 (MOVLHPSrr (v2i64 VR128:$src1), VR128:$src2)>;
1432 def : Pat<(v4i32 (X86Movhlps VR128:$src1, VR128:$src2)),
1433 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
1436 //===----------------------------------------------------------------------===//
1437 // SSE 1 & 2 - Conversion Instructions
1438 //===----------------------------------------------------------------------===//
1440 def SSE_CVT_PD : OpndItins<
1441 IIC_SSE_CVT_PD_RR, IIC_SSE_CVT_PD_RM
1444 let Sched = WriteCvtI2F in
1445 def SSE_CVT_PS : OpndItins<
1446 IIC_SSE_CVT_PS_RR, IIC_SSE_CVT_PS_RM
1449 let Sched = WriteCvtI2F in
1450 def SSE_CVT_Scalar : OpndItins<
1451 IIC_SSE_CVT_Scalar_RR, IIC_SSE_CVT_Scalar_RM
1454 let Sched = WriteCvtF2I in
1455 def SSE_CVT_SS2SI_32 : OpndItins<
1456 IIC_SSE_CVT_SS2SI32_RR, IIC_SSE_CVT_SS2SI32_RM
1459 let Sched = WriteCvtF2I in
1460 def SSE_CVT_SS2SI_64 : OpndItins<
1461 IIC_SSE_CVT_SS2SI64_RR, IIC_SSE_CVT_SS2SI64_RM
1464 let Sched = WriteCvtF2I in
1465 def SSE_CVT_SD2SI : OpndItins<
1466 IIC_SSE_CVT_SD2SI_RR, IIC_SSE_CVT_SD2SI_RM
1469 // FIXME: We probably want to match the rm form only when optimizing for
1470 // size, to avoid false depenendecies (see sse_fp_unop_s for details)
1471 multiclass sse12_cvt_s<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
1472 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
1473 string asm, OpndItins itins> {
1474 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
1475 [(set DstRC:$dst, (OpNode SrcRC:$src))],
1476 itins.rr>, Sched<[itins.Sched]>;
1477 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
1478 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))],
1479 itins.rm>, Sched<[itins.Sched.Folded]>;
1482 multiclass sse12_cvt_p<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
1483 X86MemOperand x86memop, string asm, Domain d,
1485 let hasSideEffects = 0 in {
1486 def rr : I<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
1487 [], itins.rr, d>, Sched<[itins.Sched]>;
1489 def rm : I<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
1490 [], itins.rm, d>, Sched<[itins.Sched.Folded]>;
1494 // FIXME: We probably want to match the rm form only when optimizing for
1495 // size, to avoid false depenendecies (see sse_fp_unop_s for details)
1496 multiclass sse12_vcvt_avx<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
1497 X86MemOperand x86memop, string asm> {
1498 let hasSideEffects = 0, Predicates = [UseAVX] in {
1499 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src),
1500 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>,
1501 Sched<[WriteCvtI2F]>;
1503 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
1504 (ins DstRC:$src1, x86memop:$src),
1505 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>,
1506 Sched<[WriteCvtI2FLd, ReadAfterLd]>;
1507 } // hasSideEffects = 0
1510 let Predicates = [UseAVX] in {
1511 defm VCVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
1512 "cvttss2si\t{$src, $dst|$dst, $src}",
1515 defm VCVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
1516 "cvttss2si\t{$src, $dst|$dst, $src}",
1518 XS, VEX, VEX_W, VEX_LIG;
1519 defm VCVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
1520 "cvttsd2si\t{$src, $dst|$dst, $src}",
1523 defm VCVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
1524 "cvttsd2si\t{$src, $dst|$dst, $src}",
1526 XD, VEX, VEX_W, VEX_LIG;
1528 def : InstAlias<"vcvttss2si{l}\t{$src, $dst|$dst, $src}",
1529 (VCVTTSS2SIrr GR32:$dst, FR32:$src), 0>;
1530 def : InstAlias<"vcvttss2si{l}\t{$src, $dst|$dst, $src}",
1531 (VCVTTSS2SIrm GR32:$dst, f32mem:$src), 0>;
1532 def : InstAlias<"vcvttsd2si{l}\t{$src, $dst|$dst, $src}",
1533 (VCVTTSD2SIrr GR32:$dst, FR64:$src), 0>;
1534 def : InstAlias<"vcvttsd2si{l}\t{$src, $dst|$dst, $src}",
1535 (VCVTTSD2SIrm GR32:$dst, f64mem:$src), 0>;
1536 def : InstAlias<"vcvttss2si{q}\t{$src, $dst|$dst, $src}",
1537 (VCVTTSS2SI64rr GR64:$dst, FR32:$src), 0>;
1538 def : InstAlias<"vcvttss2si{q}\t{$src, $dst|$dst, $src}",
1539 (VCVTTSS2SI64rm GR64:$dst, f32mem:$src), 0>;
1540 def : InstAlias<"vcvttsd2si{q}\t{$src, $dst|$dst, $src}",
1541 (VCVTTSD2SI64rr GR64:$dst, FR64:$src), 0>;
1542 def : InstAlias<"vcvttsd2si{q}\t{$src, $dst|$dst, $src}",
1543 (VCVTTSD2SI64rm GR64:$dst, f64mem:$src), 0>;
1545 // The assembler can recognize rr 64-bit instructions by seeing a rxx
1546 // register, but the same isn't true when only using memory operands,
1547 // provide other assembly "l" and "q" forms to address this explicitly
1548 // where appropriate to do so.
1549 defm VCVTSI2SS : sse12_vcvt_avx<0x2A, GR32, FR32, i32mem, "cvtsi2ss{l}">,
1550 XS, VEX_4V, VEX_LIG;
1551 defm VCVTSI2SS64 : sse12_vcvt_avx<0x2A, GR64, FR32, i64mem, "cvtsi2ss{q}">,
1552 XS, VEX_4V, VEX_W, VEX_LIG;
1553 defm VCVTSI2SD : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd{l}">,
1554 XD, VEX_4V, VEX_LIG;
1555 defm VCVTSI2SD64 : sse12_vcvt_avx<0x2A, GR64, FR64, i64mem, "cvtsi2sd{q}">,
1556 XD, VEX_4V, VEX_W, VEX_LIG;
1558 let Predicates = [UseAVX] in {
1559 def : InstAlias<"vcvtsi2ss\t{$src, $src1, $dst|$dst, $src1, $src}",
1560 (VCVTSI2SSrm FR64:$dst, FR64:$src1, i32mem:$src), 0>;
1561 def : InstAlias<"vcvtsi2sd\t{$src, $src1, $dst|$dst, $src1, $src}",
1562 (VCVTSI2SDrm FR64:$dst, FR64:$src1, i32mem:$src), 0>;
1564 def : Pat<(f32 (sint_to_fp (loadi32 addr:$src))),
1565 (VCVTSI2SSrm (f32 (IMPLICIT_DEF)), addr:$src)>;
1566 def : Pat<(f32 (sint_to_fp (loadi64 addr:$src))),
1567 (VCVTSI2SS64rm (f32 (IMPLICIT_DEF)), addr:$src)>;
1568 def : Pat<(f64 (sint_to_fp (loadi32 addr:$src))),
1569 (VCVTSI2SDrm (f64 (IMPLICIT_DEF)), addr:$src)>;
1570 def : Pat<(f64 (sint_to_fp (loadi64 addr:$src))),
1571 (VCVTSI2SD64rm (f64 (IMPLICIT_DEF)), addr:$src)>;
1573 def : Pat<(f32 (sint_to_fp GR32:$src)),
1574 (VCVTSI2SSrr (f32 (IMPLICIT_DEF)), GR32:$src)>;
1575 def : Pat<(f32 (sint_to_fp GR64:$src)),
1576 (VCVTSI2SS64rr (f32 (IMPLICIT_DEF)), GR64:$src)>;
1577 def : Pat<(f64 (sint_to_fp GR32:$src)),
1578 (VCVTSI2SDrr (f64 (IMPLICIT_DEF)), GR32:$src)>;
1579 def : Pat<(f64 (sint_to_fp GR64:$src)),
1580 (VCVTSI2SD64rr (f64 (IMPLICIT_DEF)), GR64:$src)>;
1583 defm CVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
1584 "cvttss2si\t{$src, $dst|$dst, $src}",
1585 SSE_CVT_SS2SI_32>, XS;
1586 defm CVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
1587 "cvttss2si\t{$src, $dst|$dst, $src}",
1588 SSE_CVT_SS2SI_64>, XS, REX_W;
1589 defm CVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
1590 "cvttsd2si\t{$src, $dst|$dst, $src}",
1592 defm CVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
1593 "cvttsd2si\t{$src, $dst|$dst, $src}",
1594 SSE_CVT_SD2SI>, XD, REX_W;
1595 defm CVTSI2SS : sse12_cvt_s<0x2A, GR32, FR32, sint_to_fp, i32mem, loadi32,
1596 "cvtsi2ss{l}\t{$src, $dst|$dst, $src}",
1597 SSE_CVT_Scalar>, XS;
1598 defm CVTSI2SS64 : sse12_cvt_s<0x2A, GR64, FR32, sint_to_fp, i64mem, loadi64,
1599 "cvtsi2ss{q}\t{$src, $dst|$dst, $src}",
1600 SSE_CVT_Scalar>, XS, REX_W;
1601 defm CVTSI2SD : sse12_cvt_s<0x2A, GR32, FR64, sint_to_fp, i32mem, loadi32,
1602 "cvtsi2sd{l}\t{$src, $dst|$dst, $src}",
1603 SSE_CVT_Scalar>, XD;
1604 defm CVTSI2SD64 : sse12_cvt_s<0x2A, GR64, FR64, sint_to_fp, i64mem, loadi64,
1605 "cvtsi2sd{q}\t{$src, $dst|$dst, $src}",
1606 SSE_CVT_Scalar>, XD, REX_W;
1608 def : InstAlias<"cvttss2si{l}\t{$src, $dst|$dst, $src}",
1609 (CVTTSS2SIrr GR32:$dst, FR32:$src), 0>;
1610 def : InstAlias<"cvttss2si{l}\t{$src, $dst|$dst, $src}",
1611 (CVTTSS2SIrm GR32:$dst, f32mem:$src), 0>;
1612 def : InstAlias<"cvttsd2si{l}\t{$src, $dst|$dst, $src}",
1613 (CVTTSD2SIrr GR32:$dst, FR64:$src), 0>;
1614 def : InstAlias<"cvttsd2si{l}\t{$src, $dst|$dst, $src}",
1615 (CVTTSD2SIrm GR32:$dst, f64mem:$src), 0>;
1616 def : InstAlias<"cvttss2si{q}\t{$src, $dst|$dst, $src}",
1617 (CVTTSS2SI64rr GR64:$dst, FR32:$src), 0>;
1618 def : InstAlias<"cvttss2si{q}\t{$src, $dst|$dst, $src}",
1619 (CVTTSS2SI64rm GR64:$dst, f32mem:$src), 0>;
1620 def : InstAlias<"cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1621 (CVTTSD2SI64rr GR64:$dst, FR64:$src), 0>;
1622 def : InstAlias<"cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1623 (CVTTSD2SI64rm GR64:$dst, f64mem:$src), 0>;
1625 def : InstAlias<"cvtsi2ss\t{$src, $dst|$dst, $src}",
1626 (CVTSI2SSrm FR64:$dst, i32mem:$src), 0>;
1627 def : InstAlias<"cvtsi2sd\t{$src, $dst|$dst, $src}",
1628 (CVTSI2SDrm FR64:$dst, i32mem:$src), 0>;
1630 // Conversion Instructions Intrinsics - Match intrinsics which expect MM
1631 // and/or XMM operand(s).
1633 // FIXME: We probably want to match the rm form only when optimizing for
1634 // size, to avoid false depenendecies (see sse_fp_unop_s for details)
1635 multiclass sse12_cvt_sint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
1636 Intrinsic Int, Operand memop, ComplexPattern mem_cpat,
1637 string asm, OpndItins itins> {
1638 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
1639 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
1640 [(set DstRC:$dst, (Int SrcRC:$src))], itins.rr>,
1641 Sched<[itins.Sched]>;
1642 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins memop:$src),
1643 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
1644 [(set DstRC:$dst, (Int mem_cpat:$src))], itins.rm>,
1645 Sched<[itins.Sched.Folded]>;
1648 multiclass sse12_cvt_sint_3addr<bits<8> opc, RegisterClass SrcRC,
1649 RegisterClass DstRC, Intrinsic Int, X86MemOperand x86memop,
1650 PatFrag ld_frag, string asm, OpndItins itins,
1652 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src2),
1654 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
1655 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
1656 [(set DstRC:$dst, (Int DstRC:$src1, SrcRC:$src2))],
1657 itins.rr>, Sched<[itins.Sched]>;
1658 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
1659 (ins DstRC:$src1, x86memop:$src2),
1661 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
1662 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
1663 [(set DstRC:$dst, (Int DstRC:$src1, (ld_frag addr:$src2)))],
1664 itins.rm>, Sched<[itins.Sched.Folded, ReadAfterLd]>;
1667 let Predicates = [UseAVX] in {
1668 defm VCVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32,
1669 int_x86_sse2_cvtsd2si, sdmem, sse_load_f64, "cvtsd2si",
1670 SSE_CVT_SD2SI>, XD, VEX, VEX_LIG;
1671 defm VCVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64,
1672 int_x86_sse2_cvtsd2si64, sdmem, sse_load_f64, "cvtsd2si",
1673 SSE_CVT_SD2SI>, XD, VEX, VEX_W, VEX_LIG;
1675 defm CVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
1676 sdmem, sse_load_f64, "cvtsd2si", SSE_CVT_SD2SI>, XD;
1677 defm CVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse2_cvtsd2si64,
1678 sdmem, sse_load_f64, "cvtsd2si", SSE_CVT_SD2SI>, XD, REX_W;
1681 let isCodeGenOnly = 1 in {
1682 let Predicates = [UseAVX] in {
1683 defm Int_VCVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
1684 int_x86_sse_cvtsi2ss, i32mem, loadi32, "cvtsi2ss{l}",
1685 SSE_CVT_Scalar, 0>, XS, VEX_4V;
1686 defm Int_VCVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
1687 int_x86_sse_cvtsi642ss, i64mem, loadi64, "cvtsi2ss{q}",
1688 SSE_CVT_Scalar, 0>, XS, VEX_4V,
1690 defm Int_VCVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
1691 int_x86_sse2_cvtsi2sd, i32mem, loadi32, "cvtsi2sd{l}",
1692 SSE_CVT_Scalar, 0>, XD, VEX_4V;
1693 defm Int_VCVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
1694 int_x86_sse2_cvtsi642sd, i64mem, loadi64, "cvtsi2sd{q}",
1695 SSE_CVT_Scalar, 0>, XD,
1698 let Constraints = "$src1 = $dst" in {
1699 defm Int_CVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
1700 int_x86_sse_cvtsi2ss, i32mem, loadi32,
1701 "cvtsi2ss{l}", SSE_CVT_Scalar>, XS;
1702 defm Int_CVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
1703 int_x86_sse_cvtsi642ss, i64mem, loadi64,
1704 "cvtsi2ss{q}", SSE_CVT_Scalar>, XS, REX_W;
1705 defm Int_CVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
1706 int_x86_sse2_cvtsi2sd, i32mem, loadi32,
1707 "cvtsi2sd{l}", SSE_CVT_Scalar>, XD;
1708 defm Int_CVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
1709 int_x86_sse2_cvtsi642sd, i64mem, loadi64,
1710 "cvtsi2sd{q}", SSE_CVT_Scalar>, XD, REX_W;
1712 } // isCodeGenOnly = 1
1716 // Aliases for intrinsics
1717 let isCodeGenOnly = 1 in {
1718 let Predicates = [UseAVX] in {
1719 defm Int_VCVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
1720 ssmem, sse_load_f32, "cvttss2si",
1721 SSE_CVT_SS2SI_32>, XS, VEX;
1722 defm Int_VCVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
1723 int_x86_sse_cvttss2si64, ssmem, sse_load_f32,
1724 "cvttss2si", SSE_CVT_SS2SI_64>,
1726 defm Int_VCVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
1727 sdmem, sse_load_f64, "cvttsd2si",
1728 SSE_CVT_SD2SI>, XD, VEX;
1729 defm Int_VCVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
1730 int_x86_sse2_cvttsd2si64, sdmem, sse_load_f64,
1731 "cvttsd2si", SSE_CVT_SD2SI>,
1734 defm Int_CVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
1735 ssmem, sse_load_f32, "cvttss2si",
1736 SSE_CVT_SS2SI_32>, XS;
1737 defm Int_CVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
1738 int_x86_sse_cvttss2si64, ssmem, sse_load_f32,
1739 "cvttss2si", SSE_CVT_SS2SI_64>, XS, REX_W;
1740 defm Int_CVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
1741 sdmem, sse_load_f64, "cvttsd2si",
1743 defm Int_CVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
1744 int_x86_sse2_cvttsd2si64, sdmem, sse_load_f64,
1745 "cvttsd2si", SSE_CVT_SD2SI>, XD, REX_W;
1746 } // isCodeGenOnly = 1
1748 let Predicates = [UseAVX] in {
1749 defm VCVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
1750 ssmem, sse_load_f32, "cvtss2si",
1751 SSE_CVT_SS2SI_32>, XS, VEX, VEX_LIG;
1752 defm VCVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse_cvtss2si64,
1753 ssmem, sse_load_f32, "cvtss2si",
1754 SSE_CVT_SS2SI_64>, XS, VEX, VEX_W, VEX_LIG;
1756 defm CVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
1757 ssmem, sse_load_f32, "cvtss2si",
1758 SSE_CVT_SS2SI_32>, XS;
1759 defm CVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse_cvtss2si64,
1760 ssmem, sse_load_f32, "cvtss2si",
1761 SSE_CVT_SS2SI_64>, XS, REX_W;
1763 defm VCVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, i128mem,
1764 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
1765 SSEPackedSingle, SSE_CVT_PS>,
1766 PS, VEX, Requires<[HasAVX]>;
1767 defm VCVTDQ2PSY : sse12_cvt_p<0x5B, VR256, VR256, i256mem,
1768 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
1769 SSEPackedSingle, SSE_CVT_PS>,
1770 PS, VEX, VEX_L, Requires<[HasAVX]>;
1772 defm CVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, i128mem,
1773 "cvtdq2ps\t{$src, $dst|$dst, $src}",
1774 SSEPackedSingle, SSE_CVT_PS>,
1775 PS, Requires<[UseSSE2]>;
1777 let Predicates = [UseAVX] in {
1778 def : InstAlias<"vcvtss2si{l}\t{$src, $dst|$dst, $src}",
1779 (VCVTSS2SIrr GR32:$dst, VR128:$src), 0>;
1780 def : InstAlias<"vcvtss2si{l}\t{$src, $dst|$dst, $src}",
1781 (VCVTSS2SIrm GR32:$dst, ssmem:$src), 0>;
1782 def : InstAlias<"vcvtsd2si{l}\t{$src, $dst|$dst, $src}",
1783 (VCVTSD2SIrr GR32:$dst, VR128:$src), 0>;
1784 def : InstAlias<"vcvtsd2si{l}\t{$src, $dst|$dst, $src}",
1785 (VCVTSD2SIrm GR32:$dst, sdmem:$src), 0>;
1786 def : InstAlias<"vcvtss2si{q}\t{$src, $dst|$dst, $src}",
1787 (VCVTSS2SI64rr GR64:$dst, VR128:$src), 0>;
1788 def : InstAlias<"vcvtss2si{q}\t{$src, $dst|$dst, $src}",
1789 (VCVTSS2SI64rm GR64:$dst, ssmem:$src), 0>;
1790 def : InstAlias<"vcvtsd2si{q}\t{$src, $dst|$dst, $src}",
1791 (VCVTSD2SI64rr GR64:$dst, VR128:$src), 0>;
1792 def : InstAlias<"vcvtsd2si{q}\t{$src, $dst|$dst, $src}",
1793 (VCVTSD2SI64rm GR64:$dst, sdmem:$src), 0>;
1796 def : InstAlias<"cvtss2si{l}\t{$src, $dst|$dst, $src}",
1797 (CVTSS2SIrr GR32:$dst, VR128:$src), 0>;
1798 def : InstAlias<"cvtss2si{l}\t{$src, $dst|$dst, $src}",
1799 (CVTSS2SIrm GR32:$dst, ssmem:$src), 0>;
1800 def : InstAlias<"cvtsd2si{l}\t{$src, $dst|$dst, $src}",
1801 (CVTSD2SIrr GR32:$dst, VR128:$src), 0>;
1802 def : InstAlias<"cvtsd2si{l}\t{$src, $dst|$dst, $src}",
1803 (CVTSD2SIrm GR32:$dst, sdmem:$src), 0>;
1804 def : InstAlias<"cvtss2si{q}\t{$src, $dst|$dst, $src}",
1805 (CVTSS2SI64rr GR64:$dst, VR128:$src), 0>;
1806 def : InstAlias<"cvtss2si{q}\t{$src, $dst|$dst, $src}",
1807 (CVTSS2SI64rm GR64:$dst, ssmem:$src), 0>;
1808 def : InstAlias<"cvtsd2si{q}\t{$src, $dst|$dst, $src}",
1809 (CVTSD2SI64rr GR64:$dst, VR128:$src), 0>;
1810 def : InstAlias<"cvtsd2si{q}\t{$src, $dst|$dst, $src}",
1811 (CVTSD2SI64rm GR64:$dst, sdmem:$src), 0>;
1815 // Convert scalar double to scalar single
1816 let hasSideEffects = 0, Predicates = [UseAVX] in {
1817 def VCVTSD2SSrr : VSDI<0x5A, MRMSrcReg, (outs FR32:$dst),
1818 (ins FR64:$src1, FR64:$src2),
1819 "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", [],
1820 IIC_SSE_CVT_Scalar_RR>, VEX_4V, VEX_LIG,
1821 Sched<[WriteCvtF2F]>;
1823 def VCVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst),
1824 (ins FR64:$src1, f64mem:$src2),
1825 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1826 [], IIC_SSE_CVT_Scalar_RM>,
1827 XD, Requires<[HasAVX, OptForSize]>, VEX_4V, VEX_LIG,
1828 Sched<[WriteCvtF2FLd, ReadAfterLd]>;
1831 def : Pat<(f32 (fround FR64:$src)), (VCVTSD2SSrr FR64:$src, FR64:$src)>,
1834 def CVTSD2SSrr : SDI<0x5A, MRMSrcReg, (outs FR32:$dst), (ins FR64:$src),
1835 "cvtsd2ss\t{$src, $dst|$dst, $src}",
1836 [(set FR32:$dst, (fround FR64:$src))],
1837 IIC_SSE_CVT_Scalar_RR>, Sched<[WriteCvtF2F]>;
1838 def CVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src),
1839 "cvtsd2ss\t{$src, $dst|$dst, $src}",
1840 [(set FR32:$dst, (fround (loadf64 addr:$src)))],
1841 IIC_SSE_CVT_Scalar_RM>,
1843 Requires<[UseSSE2, OptForSize]>, Sched<[WriteCvtF2FLd]>;
1845 let isCodeGenOnly = 1 in {
1846 def Int_VCVTSD2SSrr: I<0x5A, MRMSrcReg,
1847 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1848 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1850 (int_x86_sse2_cvtsd2ss VR128:$src1, VR128:$src2))],
1851 IIC_SSE_CVT_Scalar_RR>, XD, VEX_4V, Requires<[HasAVX]>,
1852 Sched<[WriteCvtF2F]>;
1853 def Int_VCVTSD2SSrm: I<0x5A, MRMSrcReg,
1854 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2),
1855 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1856 [(set VR128:$dst, (int_x86_sse2_cvtsd2ss
1857 VR128:$src1, sse_load_f64:$src2))],
1858 IIC_SSE_CVT_Scalar_RM>, XD, VEX_4V, Requires<[HasAVX]>,
1859 Sched<[WriteCvtF2FLd, ReadAfterLd]>;
1861 let Constraints = "$src1 = $dst" in {
1862 def Int_CVTSD2SSrr: I<0x5A, MRMSrcReg,
1863 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1864 "cvtsd2ss\t{$src2, $dst|$dst, $src2}",
1866 (int_x86_sse2_cvtsd2ss VR128:$src1, VR128:$src2))],
1867 IIC_SSE_CVT_Scalar_RR>, XD, Requires<[UseSSE2]>,
1868 Sched<[WriteCvtF2F]>;
1869 def Int_CVTSD2SSrm: I<0x5A, MRMSrcReg,
1870 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2),
1871 "cvtsd2ss\t{$src2, $dst|$dst, $src2}",
1872 [(set VR128:$dst, (int_x86_sse2_cvtsd2ss
1873 VR128:$src1, sse_load_f64:$src2))],
1874 IIC_SSE_CVT_Scalar_RM>, XD, Requires<[UseSSE2]>,
1875 Sched<[WriteCvtF2FLd, ReadAfterLd]>;
1877 } // isCodeGenOnly = 1
1879 // Convert scalar single to scalar double
1880 // SSE2 instructions with XS prefix
1881 let hasSideEffects = 0, Predicates = [UseAVX] in {
1882 def VCVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst),
1883 (ins FR32:$src1, FR32:$src2),
1884 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1885 [], IIC_SSE_CVT_Scalar_RR>,
1886 XS, Requires<[HasAVX]>, VEX_4V, VEX_LIG,
1887 Sched<[WriteCvtF2F]>;
1889 def VCVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst),
1890 (ins FR32:$src1, f32mem:$src2),
1891 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1892 [], IIC_SSE_CVT_Scalar_RM>,
1893 XS, VEX_4V, VEX_LIG, Requires<[HasAVX, OptForSize]>,
1894 Sched<[WriteCvtF2FLd, ReadAfterLd]>;
1897 def : Pat<(f64 (fextend FR32:$src)),
1898 (VCVTSS2SDrr FR32:$src, FR32:$src)>, Requires<[UseAVX]>;
1899 def : Pat<(fextend (loadf32 addr:$src)),
1900 (VCVTSS2SDrm (f32 (IMPLICIT_DEF)), addr:$src)>, Requires<[UseAVX]>;
1902 def : Pat<(extloadf32 addr:$src),
1903 (VCVTSS2SDrm (f32 (IMPLICIT_DEF)), addr:$src)>,
1904 Requires<[UseAVX, OptForSize]>;
1905 def : Pat<(extloadf32 addr:$src),
1906 (VCVTSS2SDrr (f32 (IMPLICIT_DEF)), (VMOVSSrm addr:$src))>,
1907 Requires<[UseAVX, OptForSpeed]>;
1909 def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src),
1910 "cvtss2sd\t{$src, $dst|$dst, $src}",
1911 [(set FR64:$dst, (fextend FR32:$src))],
1912 IIC_SSE_CVT_Scalar_RR>, XS,
1913 Requires<[UseSSE2]>, Sched<[WriteCvtF2F]>;
1914 def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src),
1915 "cvtss2sd\t{$src, $dst|$dst, $src}",
1916 [(set FR64:$dst, (extloadf32 addr:$src))],
1917 IIC_SSE_CVT_Scalar_RM>, XS,
1918 Requires<[UseSSE2, OptForSize]>, Sched<[WriteCvtF2FLd]>;
1920 // extload f32 -> f64. This matches load+fextend because we have a hack in
1921 // the isel (PreprocessForFPConvert) that can introduce loads after dag
1923 // Since these loads aren't folded into the fextend, we have to match it
1925 def : Pat<(fextend (loadf32 addr:$src)),
1926 (CVTSS2SDrm addr:$src)>, Requires<[UseSSE2]>;
1927 def : Pat<(extloadf32 addr:$src),
1928 (CVTSS2SDrr (MOVSSrm addr:$src))>, Requires<[UseSSE2, OptForSpeed]>;
1930 let isCodeGenOnly = 1 in {
1931 def Int_VCVTSS2SDrr: I<0x5A, MRMSrcReg,
1932 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1933 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1935 (int_x86_sse2_cvtss2sd VR128:$src1, VR128:$src2))],
1936 IIC_SSE_CVT_Scalar_RR>, XS, VEX_4V, Requires<[HasAVX]>,
1937 Sched<[WriteCvtF2F]>;
1938 def Int_VCVTSS2SDrm: I<0x5A, MRMSrcMem,
1939 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2),
1940 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1942 (int_x86_sse2_cvtss2sd VR128:$src1, sse_load_f32:$src2))],
1943 IIC_SSE_CVT_Scalar_RM>, XS, VEX_4V, Requires<[HasAVX]>,
1944 Sched<[WriteCvtF2FLd, ReadAfterLd]>;
1945 let Constraints = "$src1 = $dst" in { // SSE2 instructions with XS prefix
1946 def Int_CVTSS2SDrr: I<0x5A, MRMSrcReg,
1947 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1948 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
1950 (int_x86_sse2_cvtss2sd VR128:$src1, VR128:$src2))],
1951 IIC_SSE_CVT_Scalar_RR>, XS, Requires<[UseSSE2]>,
1952 Sched<[WriteCvtF2F]>;
1953 def Int_CVTSS2SDrm: I<0x5A, MRMSrcMem,
1954 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2),
1955 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
1957 (int_x86_sse2_cvtss2sd VR128:$src1, sse_load_f32:$src2))],
1958 IIC_SSE_CVT_Scalar_RM>, XS, Requires<[UseSSE2]>,
1959 Sched<[WriteCvtF2FLd, ReadAfterLd]>;
1961 } // isCodeGenOnly = 1
1963 // Convert packed single/double fp to doubleword
1964 def VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1965 "cvtps2dq\t{$src, $dst|$dst, $src}",
1966 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))],
1967 IIC_SSE_CVT_PS_RR>, VEX, Sched<[WriteCvtF2I]>;
1968 def VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1969 "cvtps2dq\t{$src, $dst|$dst, $src}",
1971 (int_x86_sse2_cvtps2dq (loadv4f32 addr:$src)))],
1972 IIC_SSE_CVT_PS_RM>, VEX, Sched<[WriteCvtF2ILd]>;
1973 def VCVTPS2DQYrr : VPDI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1974 "cvtps2dq\t{$src, $dst|$dst, $src}",
1976 (int_x86_avx_cvt_ps2dq_256 VR256:$src))],
1977 IIC_SSE_CVT_PS_RR>, VEX, VEX_L, Sched<[WriteCvtF2I]>;
1978 def VCVTPS2DQYrm : VPDI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1979 "cvtps2dq\t{$src, $dst|$dst, $src}",
1981 (int_x86_avx_cvt_ps2dq_256 (loadv8f32 addr:$src)))],
1982 IIC_SSE_CVT_PS_RM>, VEX, VEX_L, Sched<[WriteCvtF2ILd]>;
1983 def CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1984 "cvtps2dq\t{$src, $dst|$dst, $src}",
1985 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))],
1986 IIC_SSE_CVT_PS_RR>, Sched<[WriteCvtF2I]>;
1987 def CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1988 "cvtps2dq\t{$src, $dst|$dst, $src}",
1990 (int_x86_sse2_cvtps2dq (memopv4f32 addr:$src)))],
1991 IIC_SSE_CVT_PS_RM>, Sched<[WriteCvtF2ILd]>;
1994 // Convert Packed Double FP to Packed DW Integers
1995 let Predicates = [HasAVX] in {
1996 // The assembler can recognize rr 256-bit instructions by seeing a ymm
1997 // register, but the same isn't true when using memory operands instead.
1998 // Provide other assembly rr and rm forms to address this explicitly.
1999 def VCVTPD2DQrr : SDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2000 "vcvtpd2dq\t{$src, $dst|$dst, $src}",
2001 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
2002 VEX, Sched<[WriteCvtF2I]>;
2005 def : InstAlias<"vcvtpd2dqx\t{$src, $dst|$dst, $src}",
2006 (VCVTPD2DQrr VR128:$dst, VR128:$src), 0>;
2007 def VCVTPD2DQXrm : SDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2008 "vcvtpd2dqx\t{$src, $dst|$dst, $src}",
2010 (int_x86_sse2_cvtpd2dq (loadv2f64 addr:$src)))]>, VEX,
2011 Sched<[WriteCvtF2ILd]>;
2014 def VCVTPD2DQYrr : SDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
2015 "vcvtpd2dq{y}\t{$src, $dst|$dst, $src}",
2017 (int_x86_avx_cvt_pd2dq_256 VR256:$src))]>, VEX, VEX_L,
2018 Sched<[WriteCvtF2I]>;
2019 def VCVTPD2DQYrm : SDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
2020 "vcvtpd2dq{y}\t{$src, $dst|$dst, $src}",
2022 (int_x86_avx_cvt_pd2dq_256 (loadv4f64 addr:$src)))]>,
2023 VEX, VEX_L, Sched<[WriteCvtF2ILd]>;
2024 def : InstAlias<"vcvtpd2dq\t{$src, $dst|$dst, $src}",
2025 (VCVTPD2DQYrr VR128:$dst, VR256:$src), 0>;
2028 def CVTPD2DQrm : SDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2029 "cvtpd2dq\t{$src, $dst|$dst, $src}",
2031 (int_x86_sse2_cvtpd2dq (memopv2f64 addr:$src)))],
2032 IIC_SSE_CVT_PD_RM>, Sched<[WriteCvtF2ILd]>;
2033 def CVTPD2DQrr : SDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2034 "cvtpd2dq\t{$src, $dst|$dst, $src}",
2035 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))],
2036 IIC_SSE_CVT_PD_RR>, Sched<[WriteCvtF2I]>;
2038 // Convert with truncation packed single/double fp to doubleword
2039 // SSE2 packed instructions with XS prefix
2040 def VCVTTPS2DQrr : VS2SI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2041 "cvttps2dq\t{$src, $dst|$dst, $src}",
2043 (int_x86_sse2_cvttps2dq VR128:$src))],
2044 IIC_SSE_CVT_PS_RR>, VEX, Sched<[WriteCvtF2I]>;
2045 def VCVTTPS2DQrm : VS2SI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2046 "cvttps2dq\t{$src, $dst|$dst, $src}",
2047 [(set VR128:$dst, (int_x86_sse2_cvttps2dq
2048 (loadv4f32 addr:$src)))],
2049 IIC_SSE_CVT_PS_RM>, VEX, Sched<[WriteCvtF2ILd]>;
2050 def VCVTTPS2DQYrr : VS2SI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2051 "cvttps2dq\t{$src, $dst|$dst, $src}",
2053 (int_x86_avx_cvtt_ps2dq_256 VR256:$src))],
2054 IIC_SSE_CVT_PS_RR>, VEX, VEX_L, Sched<[WriteCvtF2I]>;
2055 def VCVTTPS2DQYrm : VS2SI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
2056 "cvttps2dq\t{$src, $dst|$dst, $src}",
2057 [(set VR256:$dst, (int_x86_avx_cvtt_ps2dq_256
2058 (loadv8f32 addr:$src)))],
2059 IIC_SSE_CVT_PS_RM>, VEX, VEX_L,
2060 Sched<[WriteCvtF2ILd]>;
2062 def CVTTPS2DQrr : S2SI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2063 "cvttps2dq\t{$src, $dst|$dst, $src}",
2064 [(set VR128:$dst, (int_x86_sse2_cvttps2dq VR128:$src))],
2065 IIC_SSE_CVT_PS_RR>, Sched<[WriteCvtF2I]>;
2066 def CVTTPS2DQrm : S2SI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2067 "cvttps2dq\t{$src, $dst|$dst, $src}",
2069 (int_x86_sse2_cvttps2dq (memopv4f32 addr:$src)))],
2070 IIC_SSE_CVT_PS_RM>, Sched<[WriteCvtF2ILd]>;
2072 let Predicates = [HasAVX] in {
2073 def : Pat<(int_x86_sse2_cvtdq2ps VR128:$src),
2074 (VCVTDQ2PSrr VR128:$src)>;
2075 def : Pat<(int_x86_sse2_cvtdq2ps (bc_v4i32 (loadv2i64 addr:$src))),
2076 (VCVTDQ2PSrm addr:$src)>;
2079 let Predicates = [HasAVX, NoVLX] in {
2080 def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
2081 (VCVTDQ2PSrr VR128:$src)>;
2082 def : Pat<(v4f32 (sint_to_fp (bc_v4i32 (loadv2i64 addr:$src)))),
2083 (VCVTDQ2PSrm addr:$src)>;
2085 def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
2086 (VCVTTPS2DQrr VR128:$src)>;
2087 def : Pat<(v4i32 (fp_to_sint (loadv4f32 addr:$src))),
2088 (VCVTTPS2DQrm addr:$src)>;
2090 def : Pat<(v8f32 (sint_to_fp (v8i32 VR256:$src))),
2091 (VCVTDQ2PSYrr VR256:$src)>;
2092 def : Pat<(v8f32 (sint_to_fp (bc_v8i32 (loadv4i64 addr:$src)))),
2093 (VCVTDQ2PSYrm addr:$src)>;
2095 def : Pat<(v8i32 (fp_to_sint (v8f32 VR256:$src))),
2096 (VCVTTPS2DQYrr VR256:$src)>;
2097 def : Pat<(v8i32 (fp_to_sint (loadv8f32 addr:$src))),
2098 (VCVTTPS2DQYrm addr:$src)>;
2101 let Predicates = [UseSSE2] in {
2102 def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
2103 (CVTDQ2PSrr VR128:$src)>;
2104 def : Pat<(v4f32 (sint_to_fp (bc_v4i32 (memopv2i64 addr:$src)))),
2105 (CVTDQ2PSrm addr:$src)>;
2107 def : Pat<(int_x86_sse2_cvtdq2ps VR128:$src),
2108 (CVTDQ2PSrr VR128:$src)>;
2109 def : Pat<(int_x86_sse2_cvtdq2ps (bc_v4i32 (memopv2i64 addr:$src))),
2110 (CVTDQ2PSrm addr:$src)>;
2112 def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
2113 (CVTTPS2DQrr VR128:$src)>;
2114 def : Pat<(v4i32 (fp_to_sint (memopv4f32 addr:$src))),
2115 (CVTTPS2DQrm addr:$src)>;
2118 def VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2119 "cvttpd2dq\t{$src, $dst|$dst, $src}",
2121 (int_x86_sse2_cvttpd2dq VR128:$src))],
2122 IIC_SSE_CVT_PD_RR>, VEX, Sched<[WriteCvtF2I]>;
2124 // The assembler can recognize rr 256-bit instructions by seeing a ymm
2125 // register, but the same isn't true when using memory operands instead.
2126 // Provide other assembly rr and rm forms to address this explicitly.
2129 def : InstAlias<"vcvttpd2dqx\t{$src, $dst|$dst, $src}",
2130 (VCVTTPD2DQrr VR128:$dst, VR128:$src), 0>;
2131 def VCVTTPD2DQXrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2132 "cvttpd2dqx\t{$src, $dst|$dst, $src}",
2133 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
2134 (loadv2f64 addr:$src)))],
2135 IIC_SSE_CVT_PD_RM>, VEX, Sched<[WriteCvtF2ILd]>;
2138 def VCVTTPD2DQYrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
2139 "cvttpd2dq{y}\t{$src, $dst|$dst, $src}",
2141 (int_x86_avx_cvtt_pd2dq_256 VR256:$src))],
2142 IIC_SSE_CVT_PD_RR>, VEX, VEX_L, Sched<[WriteCvtF2I]>;
2143 def VCVTTPD2DQYrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
2144 "cvttpd2dq{y}\t{$src, $dst|$dst, $src}",
2146 (int_x86_avx_cvtt_pd2dq_256 (loadv4f64 addr:$src)))],
2147 IIC_SSE_CVT_PD_RM>, VEX, VEX_L, Sched<[WriteCvtF2ILd]>;
2148 def : InstAlias<"vcvttpd2dq\t{$src, $dst|$dst, $src}",
2149 (VCVTTPD2DQYrr VR128:$dst, VR256:$src), 0>;
2151 let Predicates = [HasAVX, NoVLX] in {
2152 def : Pat<(v4i32 (fp_to_sint (v4f64 VR256:$src))),
2153 (VCVTTPD2DQYrr VR256:$src)>;
2154 def : Pat<(v4i32 (fp_to_sint (loadv4f64 addr:$src))),
2155 (VCVTTPD2DQYrm addr:$src)>;
2156 } // Predicates = [HasAVX]
2158 def CVTTPD2DQrr : PDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2159 "cvttpd2dq\t{$src, $dst|$dst, $src}",
2160 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))],
2161 IIC_SSE_CVT_PD_RR>, Sched<[WriteCvtF2I]>;
2162 def CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst),(ins f128mem:$src),
2163 "cvttpd2dq\t{$src, $dst|$dst, $src}",
2164 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
2165 (memopv2f64 addr:$src)))],
2167 Sched<[WriteCvtF2ILd]>;
2169 // Convert packed single to packed double
2170 let Predicates = [HasAVX] in {
2171 // SSE2 instructions without OpSize prefix
2172 def VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2173 "vcvtps2pd\t{$src, $dst|$dst, $src}",
2174 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))],
2175 IIC_SSE_CVT_PD_RR>, PS, VEX, Sched<[WriteCvtF2F]>;
2176 def VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
2177 "vcvtps2pd\t{$src, $dst|$dst, $src}",
2178 [(set VR128:$dst, (v2f64 (extloadv2f32 addr:$src)))],
2179 IIC_SSE_CVT_PD_RM>, PS, VEX, Sched<[WriteCvtF2FLd]>;
2180 def VCVTPS2PDYrr : I<0x5A, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
2181 "vcvtps2pd\t{$src, $dst|$dst, $src}",
2183 (int_x86_avx_cvt_ps2_pd_256 VR128:$src))],
2184 IIC_SSE_CVT_PD_RR>, PS, VEX, VEX_L, Sched<[WriteCvtF2F]>;
2185 def VCVTPS2PDYrm : I<0x5A, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
2186 "vcvtps2pd\t{$src, $dst|$dst, $src}",
2188 (int_x86_avx_cvt_ps2_pd_256 (loadv4f32 addr:$src)))],
2189 IIC_SSE_CVT_PD_RM>, PS, VEX, VEX_L, Sched<[WriteCvtF2FLd]>;
2192 let Predicates = [UseSSE2] in {
2193 def CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2194 "cvtps2pd\t{$src, $dst|$dst, $src}",
2195 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))],
2196 IIC_SSE_CVT_PD_RR>, PS, Sched<[WriteCvtF2F]>;
2197 def CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
2198 "cvtps2pd\t{$src, $dst|$dst, $src}",
2199 [(set VR128:$dst, (v2f64 (extloadv2f32 addr:$src)))],
2200 IIC_SSE_CVT_PD_RM>, PS, Sched<[WriteCvtF2FLd]>;
2203 // Convert Packed DW Integers to Packed Double FP
2204 let Predicates = [HasAVX] in {
2205 let hasSideEffects = 0, mayLoad = 1 in
2206 def VCVTDQ2PDrm : S2SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
2207 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
2208 []>, VEX, Sched<[WriteCvtI2FLd]>;
2209 def VCVTDQ2PDrr : S2SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2210 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
2212 (int_x86_sse2_cvtdq2pd VR128:$src))]>, VEX,
2213 Sched<[WriteCvtI2F]>;
2214 def VCVTDQ2PDYrm : S2SI<0xE6, MRMSrcMem, (outs VR256:$dst), (ins i128mem:$src),
2215 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
2217 (int_x86_avx_cvtdq2_pd_256
2218 (bitconvert (loadv2i64 addr:$src))))]>, VEX, VEX_L,
2219 Sched<[WriteCvtI2FLd]>;
2220 def VCVTDQ2PDYrr : S2SI<0xE6, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
2221 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
2223 (int_x86_avx_cvtdq2_pd_256 VR128:$src))]>, VEX, VEX_L,
2224 Sched<[WriteCvtI2F]>;
2227 let hasSideEffects = 0, mayLoad = 1 in
2228 def CVTDQ2PDrm : S2SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
2229 "cvtdq2pd\t{$src, $dst|$dst, $src}", [],
2230 IIC_SSE_CVT_PD_RR>, Sched<[WriteCvtI2FLd]>;
2231 def CVTDQ2PDrr : S2SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2232 "cvtdq2pd\t{$src, $dst|$dst, $src}",
2233 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))],
2234 IIC_SSE_CVT_PD_RM>, Sched<[WriteCvtI2F]>;
2236 // AVX register conversion intrinsics
2237 let Predicates = [HasAVX] in {
2238 def : Pat<(v2f64 (X86cvtdq2pd (v4i32 VR128:$src))),
2239 (VCVTDQ2PDrr VR128:$src)>;
2240 def : Pat<(v2f64 (X86cvtdq2pd (bc_v4i32 (loadv2i64 addr:$src)))),
2241 (VCVTDQ2PDrm addr:$src)>;
2243 def : Pat<(v4f64 (sint_to_fp (v4i32 VR128:$src))),
2244 (VCVTDQ2PDYrr VR128:$src)>;
2245 def : Pat<(v4f64 (sint_to_fp (bc_v4i32 (loadv2i64 addr:$src)))),
2246 (VCVTDQ2PDYrm addr:$src)>;
2247 } // Predicates = [HasAVX]
2249 // SSE2 register conversion intrinsics
2250 let Predicates = [HasSSE2] in {
2251 def : Pat<(v2f64 (X86cvtdq2pd (v4i32 VR128:$src))),
2252 (CVTDQ2PDrr VR128:$src)>;
2253 def : Pat<(v2f64 (X86cvtdq2pd (bc_v4i32 (loadv2i64 addr:$src)))),
2254 (CVTDQ2PDrm addr:$src)>;
2255 } // Predicates = [HasSSE2]
2257 // Convert packed double to packed single
2258 // The assembler can recognize rr 256-bit instructions by seeing a ymm
2259 // register, but the same isn't true when using memory operands instead.
2260 // Provide other assembly rr and rm forms to address this explicitly.
2261 def VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2262 "cvtpd2ps\t{$src, $dst|$dst, $src}",
2263 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))],
2264 IIC_SSE_CVT_PD_RR>, VEX, Sched<[WriteCvtF2F]>;
2267 def : InstAlias<"vcvtpd2psx\t{$src, $dst|$dst, $src}",
2268 (VCVTPD2PSrr VR128:$dst, VR128:$src), 0>;
2269 def VCVTPD2PSXrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2270 "cvtpd2psx\t{$src, $dst|$dst, $src}",
2272 (int_x86_sse2_cvtpd2ps (loadv2f64 addr:$src)))],
2273 IIC_SSE_CVT_PD_RM>, VEX, Sched<[WriteCvtF2FLd]>;
2276 def VCVTPD2PSYrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
2277 "cvtpd2ps{y}\t{$src, $dst|$dst, $src}",
2279 (int_x86_avx_cvt_pd2_ps_256 VR256:$src))],
2280 IIC_SSE_CVT_PD_RR>, VEX, VEX_L, Sched<[WriteCvtF2F]>;
2281 def VCVTPD2PSYrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
2282 "cvtpd2ps{y}\t{$src, $dst|$dst, $src}",
2284 (int_x86_avx_cvt_pd2_ps_256 (loadv4f64 addr:$src)))],
2285 IIC_SSE_CVT_PD_RM>, VEX, VEX_L, Sched<[WriteCvtF2FLd]>;
2286 def : InstAlias<"vcvtpd2ps\t{$src, $dst|$dst, $src}",
2287 (VCVTPD2PSYrr VR128:$dst, VR256:$src), 0>;
2289 def CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2290 "cvtpd2ps\t{$src, $dst|$dst, $src}",
2291 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))],
2292 IIC_SSE_CVT_PD_RR>, Sched<[WriteCvtF2F]>;
2293 def CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2294 "cvtpd2ps\t{$src, $dst|$dst, $src}",
2296 (int_x86_sse2_cvtpd2ps (memopv2f64 addr:$src)))],
2297 IIC_SSE_CVT_PD_RM>, Sched<[WriteCvtF2FLd]>;
2300 // AVX 256-bit register conversion intrinsics
2301 // FIXME: Migrate SSE conversion intrinsics matching to use patterns as below
2302 // whenever possible to avoid declaring two versions of each one.
2303 let Predicates = [HasAVX] in {
2304 def : Pat<(int_x86_avx_cvtdq2_ps_256 VR256:$src),
2305 (VCVTDQ2PSYrr VR256:$src)>;
2306 def : Pat<(int_x86_avx_cvtdq2_ps_256 (bitconvert (loadv4i64 addr:$src))),
2307 (VCVTDQ2PSYrm addr:$src)>;
2310 let Predicates = [HasAVX, NoVLX] in {
2311 // Match fround and fextend for 128/256-bit conversions
2312 def : Pat<(v4f32 (X86vfpround (v2f64 VR128:$src))),
2313 (VCVTPD2PSrr VR128:$src)>;
2314 def : Pat<(v4f32 (X86vfpround (loadv2f64 addr:$src))),
2315 (VCVTPD2PSXrm addr:$src)>;
2316 def : Pat<(v4f32 (fround (v4f64 VR256:$src))),
2317 (VCVTPD2PSYrr VR256:$src)>;
2318 def : Pat<(v4f32 (fround (loadv4f64 addr:$src))),
2319 (VCVTPD2PSYrm addr:$src)>;
2321 def : Pat<(v2f64 (X86vfpext (v4f32 VR128:$src))),
2322 (VCVTPS2PDrr VR128:$src)>;
2323 def : Pat<(v4f64 (fextend (v4f32 VR128:$src))),
2324 (VCVTPS2PDYrr VR128:$src)>;
2325 def : Pat<(v4f64 (extloadv4f32 addr:$src)),
2326 (VCVTPS2PDYrm addr:$src)>;
2329 let Predicates = [UseSSE2] in {
2330 // Match fround and fextend for 128 conversions
2331 def : Pat<(v4f32 (X86vfpround (v2f64 VR128:$src))),
2332 (CVTPD2PSrr VR128:$src)>;
2333 def : Pat<(v4f32 (X86vfpround (memopv2f64 addr:$src))),
2334 (CVTPD2PSrm addr:$src)>;
2336 def : Pat<(v2f64 (X86vfpext (v4f32 VR128:$src))),
2337 (CVTPS2PDrr VR128:$src)>;
2340 //===----------------------------------------------------------------------===//
2341 // SSE 1 & 2 - Compare Instructions
2342 //===----------------------------------------------------------------------===//
2344 // sse12_cmp_scalar - sse 1 & 2 compare scalar instructions
2345 multiclass sse12_cmp_scalar<RegisterClass RC, X86MemOperand x86memop,
2346 Operand CC, SDNode OpNode, ValueType VT,
2347 PatFrag ld_frag, string asm, string asm_alt,
2348 OpndItins itins, ImmLeaf immLeaf> {
2349 def rr : SIi8<0xC2, MRMSrcReg,
2350 (outs RC:$dst), (ins RC:$src1, RC:$src2, CC:$cc), asm,
2351 [(set RC:$dst, (OpNode (VT RC:$src1), RC:$src2, immLeaf:$cc))],
2352 itins.rr>, Sched<[itins.Sched]>;
2353 def rm : SIi8<0xC2, MRMSrcMem,
2354 (outs RC:$dst), (ins RC:$src1, x86memop:$src2, CC:$cc), asm,
2355 [(set RC:$dst, (OpNode (VT RC:$src1),
2356 (ld_frag addr:$src2), immLeaf:$cc))],
2358 Sched<[itins.Sched.Folded, ReadAfterLd]>;
2360 // Accept explicit immediate argument form instead of comparison code.
2361 let isAsmParserOnly = 1, hasSideEffects = 0 in {
2362 def rr_alt : SIi8<0xC2, MRMSrcReg, (outs RC:$dst),
2363 (ins RC:$src1, RC:$src2, u8imm:$cc), asm_alt, [],
2364 IIC_SSE_ALU_F32S_RR>, Sched<[itins.Sched]>;
2366 def rm_alt : SIi8<0xC2, MRMSrcMem, (outs RC:$dst),
2367 (ins RC:$src1, x86memop:$src2, u8imm:$cc), asm_alt, [],
2368 IIC_SSE_ALU_F32S_RM>,
2369 Sched<[itins.Sched.Folded, ReadAfterLd]>;
2373 defm VCMPSS : sse12_cmp_scalar<FR32, f32mem, AVXCC, X86cmps, f32, loadf32,
2374 "cmp${cc}ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2375 "cmpss\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2376 SSE_ALU_F32S, i8immZExt5>, XS, VEX_4V, VEX_LIG;
2377 defm VCMPSD : sse12_cmp_scalar<FR64, f64mem, AVXCC, X86cmps, f64, loadf64,
2378 "cmp${cc}sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2379 "cmpsd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2380 SSE_ALU_F32S, i8immZExt5>, // same latency as 32 bit compare
2381 XD, VEX_4V, VEX_LIG;
2383 let Constraints = "$src1 = $dst" in {
2384 defm CMPSS : sse12_cmp_scalar<FR32, f32mem, SSECC, X86cmps, f32, loadf32,
2385 "cmp${cc}ss\t{$src2, $dst|$dst, $src2}",
2386 "cmpss\t{$cc, $src2, $dst|$dst, $src2, $cc}", SSE_ALU_F32S,
2388 defm CMPSD : sse12_cmp_scalar<FR64, f64mem, SSECC, X86cmps, f64, loadf64,
2389 "cmp${cc}sd\t{$src2, $dst|$dst, $src2}",
2390 "cmpsd\t{$cc, $src2, $dst|$dst, $src2, $cc}",
2391 SSE_ALU_F64S, i8immZExt3>, XD;
2394 multiclass sse12_cmp_scalar_int<X86MemOperand x86memop, Operand CC,
2395 Intrinsic Int, string asm, OpndItins itins,
2397 def rr : SIi8<0xC2, MRMSrcReg, (outs VR128:$dst),
2398 (ins VR128:$src1, VR128:$src, CC:$cc), asm,
2399 [(set VR128:$dst, (Int VR128:$src1,
2400 VR128:$src, immLeaf:$cc))],
2402 Sched<[itins.Sched]>;
2403 def rm : SIi8<0xC2, MRMSrcMem, (outs VR128:$dst),
2404 (ins VR128:$src1, x86memop:$src, CC:$cc), asm,
2405 [(set VR128:$dst, (Int VR128:$src1,
2406 (load addr:$src), immLeaf:$cc))],
2408 Sched<[itins.Sched.Folded, ReadAfterLd]>;
2411 let isCodeGenOnly = 1 in {
2412 // Aliases to match intrinsics which expect XMM operand(s).
2413 defm Int_VCMPSS : sse12_cmp_scalar_int<f32mem, AVXCC, int_x86_sse_cmp_ss,
2414 "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}",
2415 SSE_ALU_F32S, i8immZExt5>,
2417 defm Int_VCMPSD : sse12_cmp_scalar_int<f64mem, AVXCC, int_x86_sse2_cmp_sd,
2418 "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}",
2419 SSE_ALU_F32S, i8immZExt5>, // same latency as f32
2421 let Constraints = "$src1 = $dst" in {
2422 defm Int_CMPSS : sse12_cmp_scalar_int<f32mem, SSECC, int_x86_sse_cmp_ss,
2423 "cmp${cc}ss\t{$src, $dst|$dst, $src}",
2424 SSE_ALU_F32S, i8immZExt3>, XS;
2425 defm Int_CMPSD : sse12_cmp_scalar_int<f64mem, SSECC, int_x86_sse2_cmp_sd,
2426 "cmp${cc}sd\t{$src, $dst|$dst, $src}",
2427 SSE_ALU_F64S, i8immZExt3>,
2433 // sse12_ord_cmp - Unordered/Ordered scalar fp compare and set EFLAGS
2434 multiclass sse12_ord_cmp<bits<8> opc, RegisterClass RC, SDNode OpNode,
2435 ValueType vt, X86MemOperand x86memop,
2436 PatFrag ld_frag, string OpcodeStr> {
2437 def rr: SI<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
2438 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
2439 [(set EFLAGS, (OpNode (vt RC:$src1), RC:$src2))],
2442 def rm: SI<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
2443 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
2444 [(set EFLAGS, (OpNode (vt RC:$src1),
2445 (ld_frag addr:$src2)))],
2447 Sched<[WriteFAddLd, ReadAfterLd]>;
2450 let Defs = [EFLAGS] in {
2451 defm VUCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
2452 "ucomiss">, PS, VEX, VEX_LIG;
2453 defm VUCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
2454 "ucomisd">, PD, VEX, VEX_LIG;
2455 let Pattern = []<dag> in {
2456 defm VCOMISS : sse12_ord_cmp<0x2F, FR32, undef, f32, f32mem, loadf32,
2457 "comiss">, PS, VEX, VEX_LIG;
2458 defm VCOMISD : sse12_ord_cmp<0x2F, FR64, undef, f64, f64mem, loadf64,
2459 "comisd">, PD, VEX, VEX_LIG;
2462 let isCodeGenOnly = 1 in {
2463 defm Int_VUCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
2464 load, "ucomiss">, PS, VEX;
2465 defm Int_VUCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
2466 load, "ucomisd">, PD, VEX;
2468 defm Int_VCOMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem,
2469 load, "comiss">, PS, VEX;
2470 defm Int_VCOMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem,
2471 load, "comisd">, PD, VEX;
2473 defm UCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
2475 defm UCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
2478 let Pattern = []<dag> in {
2479 defm COMISS : sse12_ord_cmp<0x2F, FR32, undef, f32, f32mem, loadf32,
2481 defm COMISD : sse12_ord_cmp<0x2F, FR64, undef, f64, f64mem, loadf64,
2485 let isCodeGenOnly = 1 in {
2486 defm Int_UCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
2487 load, "ucomiss">, PS;
2488 defm Int_UCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
2489 load, "ucomisd">, PD;
2491 defm Int_COMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem, load,
2493 defm Int_COMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem, load,
2496 } // Defs = [EFLAGS]
2498 // sse12_cmp_packed - sse 1 & 2 compare packed instructions
2499 multiclass sse12_cmp_packed<RegisterClass RC, X86MemOperand x86memop,
2500 Operand CC, Intrinsic Int, string asm,
2501 string asm_alt, Domain d, ImmLeaf immLeaf,
2502 PatFrag ld_frag, OpndItins itins = SSE_ALU_F32P> {
2503 let isCommutable = 1 in
2504 def rri : PIi8<0xC2, MRMSrcReg,
2505 (outs RC:$dst), (ins RC:$src1, RC:$src2, CC:$cc), asm,
2506 [(set RC:$dst, (Int RC:$src1, RC:$src2, immLeaf:$cc))],
2509 def rmi : PIi8<0xC2, MRMSrcMem,
2510 (outs RC:$dst), (ins RC:$src1, x86memop:$src2, CC:$cc), asm,
2511 [(set RC:$dst, (Int RC:$src1, (ld_frag addr:$src2), immLeaf:$cc))],
2513 Sched<[WriteFAddLd, ReadAfterLd]>;
2515 // Accept explicit immediate argument form instead of comparison code.
2516 let isAsmParserOnly = 1, hasSideEffects = 0 in {
2517 def rri_alt : PIi8<0xC2, MRMSrcReg,
2518 (outs RC:$dst), (ins RC:$src1, RC:$src2, u8imm:$cc),
2519 asm_alt, [], itins.rr, d>, Sched<[WriteFAdd]>;
2521 def rmi_alt : PIi8<0xC2, MRMSrcMem,
2522 (outs RC:$dst), (ins RC:$src1, x86memop:$src2, u8imm:$cc),
2523 asm_alt, [], itins.rm, d>,
2524 Sched<[WriteFAddLd, ReadAfterLd]>;
2528 defm VCMPPS : sse12_cmp_packed<VR128, f128mem, AVXCC, int_x86_sse_cmp_ps,
2529 "cmp${cc}ps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2530 "cmpps\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2531 SSEPackedSingle, i8immZExt5, loadv4f32>, PS, VEX_4V;
2532 defm VCMPPD : sse12_cmp_packed<VR128, f128mem, AVXCC, int_x86_sse2_cmp_pd,
2533 "cmp${cc}pd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2534 "cmppd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2535 SSEPackedDouble, i8immZExt5, loadv2f64>, PD, VEX_4V;
2536 defm VCMPPSY : sse12_cmp_packed<VR256, f256mem, AVXCC, int_x86_avx_cmp_ps_256,
2537 "cmp${cc}ps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2538 "cmpps\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2539 SSEPackedSingle, i8immZExt5, loadv8f32>, PS, VEX_4V, VEX_L;
2540 defm VCMPPDY : sse12_cmp_packed<VR256, f256mem, AVXCC, int_x86_avx_cmp_pd_256,
2541 "cmp${cc}pd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2542 "cmppd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2543 SSEPackedDouble, i8immZExt5, loadv4f64>, PD, VEX_4V, VEX_L;
2544 let Constraints = "$src1 = $dst" in {
2545 defm CMPPS : sse12_cmp_packed<VR128, f128mem, SSECC, int_x86_sse_cmp_ps,
2546 "cmp${cc}ps\t{$src2, $dst|$dst, $src2}",
2547 "cmpps\t{$cc, $src2, $dst|$dst, $src2, $cc}",
2548 SSEPackedSingle, i8immZExt5, memopv4f32, SSE_ALU_F32P>, PS;
2549 defm CMPPD : sse12_cmp_packed<VR128, f128mem, SSECC, int_x86_sse2_cmp_pd,
2550 "cmp${cc}pd\t{$src2, $dst|$dst, $src2}",
2551 "cmppd\t{$cc, $src2, $dst|$dst, $src2, $cc}",
2552 SSEPackedDouble, i8immZExt5, memopv2f64, SSE_ALU_F64P>, PD;
2555 let Predicates = [HasAVX] in {
2556 def : Pat<(v4i32 (X86cmpp (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
2557 (VCMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
2558 def : Pat<(v4i32 (X86cmpp (v4f32 VR128:$src1), (loadv4f32 addr:$src2), imm:$cc)),
2559 (VCMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
2560 def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
2561 (VCMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
2562 def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), (loadv2f64 addr:$src2), imm:$cc)),
2563 (VCMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
2565 def : Pat<(v8i32 (X86cmpp (v8f32 VR256:$src1), VR256:$src2, imm:$cc)),
2566 (VCMPPSYrri (v8f32 VR256:$src1), (v8f32 VR256:$src2), imm:$cc)>;
2567 def : Pat<(v8i32 (X86cmpp (v8f32 VR256:$src1), (loadv8f32 addr:$src2), imm:$cc)),
2568 (VCMPPSYrmi (v8f32 VR256:$src1), addr:$src2, imm:$cc)>;
2569 def : Pat<(v4i64 (X86cmpp (v4f64 VR256:$src1), VR256:$src2, imm:$cc)),
2570 (VCMPPDYrri VR256:$src1, VR256:$src2, imm:$cc)>;
2571 def : Pat<(v4i64 (X86cmpp (v4f64 VR256:$src1), (loadv4f64 addr:$src2), imm:$cc)),
2572 (VCMPPDYrmi VR256:$src1, addr:$src2, imm:$cc)>;
2575 let Predicates = [UseSSE1] in {
2576 def : Pat<(v4i32 (X86cmpp (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
2577 (CMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
2578 def : Pat<(v4i32 (X86cmpp (v4f32 VR128:$src1), (memopv4f32 addr:$src2), imm:$cc)),
2579 (CMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
2582 let Predicates = [UseSSE2] in {
2583 def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
2584 (CMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
2585 def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), (memopv2f64 addr:$src2), imm:$cc)),
2586 (CMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
2589 //===----------------------------------------------------------------------===//
2590 // SSE 1 & 2 - Shuffle Instructions
2591 //===----------------------------------------------------------------------===//
2593 /// sse12_shuffle - sse 1 & 2 fp shuffle instructions
2594 multiclass sse12_shuffle<RegisterClass RC, X86MemOperand x86memop,
2595 ValueType vt, string asm, PatFrag mem_frag,
2597 def rmi : PIi8<0xC6, MRMSrcMem, (outs RC:$dst),
2598 (ins RC:$src1, x86memop:$src2, u8imm:$src3), asm,
2599 [(set RC:$dst, (vt (X86Shufp RC:$src1, (mem_frag addr:$src2),
2600 (i8 imm:$src3))))], IIC_SSE_SHUFP, d>,
2601 Sched<[WriteFShuffleLd, ReadAfterLd]>;
2602 def rri : PIi8<0xC6, MRMSrcReg, (outs RC:$dst),
2603 (ins RC:$src1, RC:$src2, u8imm:$src3), asm,
2604 [(set RC:$dst, (vt (X86Shufp RC:$src1, RC:$src2,
2605 (i8 imm:$src3))))], IIC_SSE_SHUFP, d>,
2606 Sched<[WriteFShuffle]>;
2609 let Predicates = [HasAVX, NoVLX] in {
2610 defm VSHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
2611 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2612 loadv4f32, SSEPackedSingle>, PS, VEX_4V;
2613 defm VSHUFPSY : sse12_shuffle<VR256, f256mem, v8f32,
2614 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2615 loadv8f32, SSEPackedSingle>, PS, VEX_4V, VEX_L;
2616 defm VSHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
2617 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2618 loadv2f64, SSEPackedDouble>, PD, VEX_4V;
2619 defm VSHUFPDY : sse12_shuffle<VR256, f256mem, v4f64,
2620 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2621 loadv4f64, SSEPackedDouble>, PD, VEX_4V, VEX_L;
2623 let Constraints = "$src1 = $dst" in {
2624 defm SHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
2625 "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2626 memopv4f32, SSEPackedSingle>, PS;
2627 defm SHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
2628 "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2629 memopv2f64, SSEPackedDouble>, PD;
2632 let Predicates = [HasAVX, NoVLX] in {
2633 def : Pat<(v4i32 (X86Shufp VR128:$src1,
2634 (bc_v4i32 (loadv2i64 addr:$src2)), (i8 imm:$imm))),
2635 (VSHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
2636 def : Pat<(v4i32 (X86Shufp VR128:$src1, VR128:$src2, (i8 imm:$imm))),
2637 (VSHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
2639 def : Pat<(v2i64 (X86Shufp VR128:$src1,
2640 (loadv2i64 addr:$src2), (i8 imm:$imm))),
2641 (VSHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>;
2642 def : Pat<(v2i64 (X86Shufp VR128:$src1, VR128:$src2, (i8 imm:$imm))),
2643 (VSHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
2646 def : Pat<(v8i32 (X86Shufp VR256:$src1, VR256:$src2, (i8 imm:$imm))),
2647 (VSHUFPSYrri VR256:$src1, VR256:$src2, imm:$imm)>;
2648 def : Pat<(v8i32 (X86Shufp VR256:$src1,
2649 (bc_v8i32 (loadv4i64 addr:$src2)), (i8 imm:$imm))),
2650 (VSHUFPSYrmi VR256:$src1, addr:$src2, imm:$imm)>;
2652 def : Pat<(v4i64 (X86Shufp VR256:$src1, VR256:$src2, (i8 imm:$imm))),
2653 (VSHUFPDYrri VR256:$src1, VR256:$src2, imm:$imm)>;
2654 def : Pat<(v4i64 (X86Shufp VR256:$src1,
2655 (loadv4i64 addr:$src2), (i8 imm:$imm))),
2656 (VSHUFPDYrmi VR256:$src1, addr:$src2, imm:$imm)>;
2659 let Predicates = [UseSSE1] in {
2660 def : Pat<(v4i32 (X86Shufp VR128:$src1,
2661 (bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))),
2662 (SHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
2663 def : Pat<(v4i32 (X86Shufp VR128:$src1, VR128:$src2, (i8 imm:$imm))),
2664 (SHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
2667 let Predicates = [UseSSE2] in {
2668 // Generic SHUFPD patterns
2669 def : Pat<(v2i64 (X86Shufp VR128:$src1,
2670 (memopv2i64 addr:$src2), (i8 imm:$imm))),
2671 (SHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>;
2672 def : Pat<(v2i64 (X86Shufp VR128:$src1, VR128:$src2, (i8 imm:$imm))),
2673 (SHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
2676 //===----------------------------------------------------------------------===//
2677 // SSE 1 & 2 - Unpack FP Instructions
2678 //===----------------------------------------------------------------------===//
2680 /// sse12_unpack_interleave - sse 1 & 2 fp unpack and interleave
2681 multiclass sse12_unpack_interleave<bits<8> opc, SDNode OpNode, ValueType vt,
2682 PatFrag mem_frag, RegisterClass RC,
2683 X86MemOperand x86memop, string asm,
2685 def rr : PI<opc, MRMSrcReg,
2686 (outs RC:$dst), (ins RC:$src1, RC:$src2),
2688 (vt (OpNode RC:$src1, RC:$src2)))],
2689 IIC_SSE_UNPCK, d>, Sched<[WriteFShuffle]>;
2690 def rm : PI<opc, MRMSrcMem,
2691 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
2693 (vt (OpNode RC:$src1,
2694 (mem_frag addr:$src2))))],
2696 Sched<[WriteFShuffleLd, ReadAfterLd]>;
2699 let Predicates = [HasAVX, NoVLX] in {
2700 defm VUNPCKHPS: sse12_unpack_interleave<0x15, X86Unpckh, v4f32, loadv4f32,
2701 VR128, f128mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2702 SSEPackedSingle>, PS, VEX_4V;
2703 defm VUNPCKHPD: sse12_unpack_interleave<0x15, X86Unpckh, v2f64, loadv2f64,
2704 VR128, f128mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2705 SSEPackedDouble>, PD, VEX_4V;
2706 defm VUNPCKLPS: sse12_unpack_interleave<0x14, X86Unpckl, v4f32, loadv4f32,
2707 VR128, f128mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2708 SSEPackedSingle>, PS, VEX_4V;
2709 defm VUNPCKLPD: sse12_unpack_interleave<0x14, X86Unpckl, v2f64, loadv2f64,
2710 VR128, f128mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2711 SSEPackedDouble>, PD, VEX_4V;
2713 defm VUNPCKHPSY: sse12_unpack_interleave<0x15, X86Unpckh, v8f32, loadv8f32,
2714 VR256, f256mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2715 SSEPackedSingle>, PS, VEX_4V, VEX_L;
2716 defm VUNPCKHPDY: sse12_unpack_interleave<0x15, X86Unpckh, v4f64, loadv4f64,
2717 VR256, f256mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2718 SSEPackedDouble>, PD, VEX_4V, VEX_L;
2719 defm VUNPCKLPSY: sse12_unpack_interleave<0x14, X86Unpckl, v8f32, loadv8f32,
2720 VR256, f256mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2721 SSEPackedSingle>, PS, VEX_4V, VEX_L;
2722 defm VUNPCKLPDY: sse12_unpack_interleave<0x14, X86Unpckl, v4f64, loadv4f64,
2723 VR256, f256mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2724 SSEPackedDouble>, PD, VEX_4V, VEX_L;
2725 }// Predicates = [HasAVX, NoVLX]
2726 let Constraints = "$src1 = $dst" in {
2727 defm UNPCKHPS: sse12_unpack_interleave<0x15, X86Unpckh, v4f32, memopv4f32,
2728 VR128, f128mem, "unpckhps\t{$src2, $dst|$dst, $src2}",
2729 SSEPackedSingle>, PS;
2730 defm UNPCKHPD: sse12_unpack_interleave<0x15, X86Unpckh, v2f64, memopv2f64,
2731 VR128, f128mem, "unpckhpd\t{$src2, $dst|$dst, $src2}",
2732 SSEPackedDouble>, PD;
2733 defm UNPCKLPS: sse12_unpack_interleave<0x14, X86Unpckl, v4f32, memopv4f32,
2734 VR128, f128mem, "unpcklps\t{$src2, $dst|$dst, $src2}",
2735 SSEPackedSingle>, PS;
2736 defm UNPCKLPD: sse12_unpack_interleave<0x14, X86Unpckl, v2f64, memopv2f64,
2737 VR128, f128mem, "unpcklpd\t{$src2, $dst|$dst, $src2}",
2738 SSEPackedDouble>, PD;
2739 } // Constraints = "$src1 = $dst"
2741 let Predicates = [HasAVX1Only] in {
2742 def : Pat<(v8i32 (X86Unpckl VR256:$src1, (bc_v8i32 (loadv4i64 addr:$src2)))),
2743 (VUNPCKLPSYrm VR256:$src1, addr:$src2)>;
2744 def : Pat<(v8i32 (X86Unpckl VR256:$src1, VR256:$src2)),
2745 (VUNPCKLPSYrr VR256:$src1, VR256:$src2)>;
2746 def : Pat<(v8i32 (X86Unpckh VR256:$src1, (bc_v8i32 (loadv4i64 addr:$src2)))),
2747 (VUNPCKHPSYrm VR256:$src1, addr:$src2)>;
2748 def : Pat<(v8i32 (X86Unpckh VR256:$src1, VR256:$src2)),
2749 (VUNPCKHPSYrr VR256:$src1, VR256:$src2)>;
2751 def : Pat<(v4i64 (X86Unpckl VR256:$src1, (loadv4i64 addr:$src2))),
2752 (VUNPCKLPDYrm VR256:$src1, addr:$src2)>;
2753 def : Pat<(v4i64 (X86Unpckl VR256:$src1, VR256:$src2)),
2754 (VUNPCKLPDYrr VR256:$src1, VR256:$src2)>;
2755 def : Pat<(v4i64 (X86Unpckh VR256:$src1, (loadv4i64 addr:$src2))),
2756 (VUNPCKHPDYrm VR256:$src1, addr:$src2)>;
2757 def : Pat<(v4i64 (X86Unpckh VR256:$src1, VR256:$src2)),
2758 (VUNPCKHPDYrr VR256:$src1, VR256:$src2)>;
2761 //===----------------------------------------------------------------------===//
2762 // SSE 1 & 2 - Extract Floating-Point Sign mask
2763 //===----------------------------------------------------------------------===//
2765 /// sse12_extr_sign_mask - sse 1 & 2 unpack and interleave
2766 multiclass sse12_extr_sign_mask<RegisterClass RC, Intrinsic Int, string asm,
2768 def rr : PI<0x50, MRMSrcReg, (outs GR32orGR64:$dst), (ins RC:$src),
2769 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
2770 [(set GR32orGR64:$dst, (Int RC:$src))], IIC_SSE_MOVMSK, d>,
2771 Sched<[WriteVecLogic]>;
2774 let Predicates = [HasAVX] in {
2775 defm VMOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps,
2776 "movmskps", SSEPackedSingle>, PS, VEX;
2777 defm VMOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd,
2778 "movmskpd", SSEPackedDouble>, PD, VEX;
2779 defm VMOVMSKPSY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_ps_256,
2780 "movmskps", SSEPackedSingle>, PS,
2782 defm VMOVMSKPDY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_pd_256,
2783 "movmskpd", SSEPackedDouble>, PD,
2786 def : Pat<(i32 (X86fgetsign FR32:$src)),
2787 (VMOVMSKPSrr (COPY_TO_REGCLASS FR32:$src, VR128))>;
2788 def : Pat<(i64 (X86fgetsign FR32:$src)),
2789 (SUBREG_TO_REG (i64 0),
2790 (VMOVMSKPSrr (COPY_TO_REGCLASS FR32:$src, VR128)), sub_32bit)>;
2791 def : Pat<(i32 (X86fgetsign FR64:$src)),
2792 (VMOVMSKPDrr (COPY_TO_REGCLASS FR64:$src, VR128))>;
2793 def : Pat<(i64 (X86fgetsign FR64:$src)),
2794 (SUBREG_TO_REG (i64 0),
2795 (VMOVMSKPDrr (COPY_TO_REGCLASS FR64:$src, VR128)), sub_32bit)>;
2798 defm MOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps, "movmskps",
2799 SSEPackedSingle>, PS;
2800 defm MOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd, "movmskpd",
2801 SSEPackedDouble>, PD;
2803 def : Pat<(i32 (X86fgetsign FR32:$src)),
2804 (MOVMSKPSrr (COPY_TO_REGCLASS FR32:$src, VR128))>,
2805 Requires<[UseSSE1]>;
2806 def : Pat<(i64 (X86fgetsign FR32:$src)),
2807 (SUBREG_TO_REG (i64 0),
2808 (MOVMSKPSrr (COPY_TO_REGCLASS FR32:$src, VR128)), sub_32bit)>,
2809 Requires<[UseSSE1]>;
2810 def : Pat<(i32 (X86fgetsign FR64:$src)),
2811 (MOVMSKPDrr (COPY_TO_REGCLASS FR64:$src, VR128))>,
2812 Requires<[UseSSE2]>;
2813 def : Pat<(i64 (X86fgetsign FR64:$src)),
2814 (SUBREG_TO_REG (i64 0),
2815 (MOVMSKPDrr (COPY_TO_REGCLASS FR64:$src, VR128)), sub_32bit)>,
2816 Requires<[UseSSE2]>;
2818 //===---------------------------------------------------------------------===//
2819 // SSE2 - Packed Integer Logical Instructions
2820 //===---------------------------------------------------------------------===//
2822 let ExeDomain = SSEPackedInt in { // SSE integer instructions
2824 /// PDI_binop_rm - Simple SSE2 binary operator.
2825 multiclass PDI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
2826 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
2827 X86MemOperand x86memop, OpndItins itins,
2828 bit IsCommutable, bit Is2Addr> {
2829 let isCommutable = IsCommutable in
2830 def rr : PDI<opc, MRMSrcReg, (outs RC:$dst),
2831 (ins RC:$src1, RC:$src2),
2833 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2834 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2835 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2)))], itins.rr>,
2836 Sched<[itins.Sched]>;
2837 def rm : PDI<opc, MRMSrcMem, (outs RC:$dst),
2838 (ins RC:$src1, x86memop:$src2),
2840 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2841 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2842 [(set RC:$dst, (OpVT (OpNode RC:$src1,
2843 (bitconvert (memop_frag addr:$src2)))))],
2845 Sched<[itins.Sched.Folded, ReadAfterLd]>;
2847 } // ExeDomain = SSEPackedInt
2849 multiclass PDI_binop_all<bits<8> opc, string OpcodeStr, SDNode Opcode,
2850 ValueType OpVT128, ValueType OpVT256,
2851 OpndItins itins, bit IsCommutable = 0, Predicate prd> {
2852 let Predicates = [HasAVX, prd] in
2853 defm V#NAME : PDI_binop_rm<opc, !strconcat("v", OpcodeStr), Opcode, OpVT128,
2854 VR128, loadv2i64, i128mem, itins, IsCommutable, 0>, VEX_4V;
2856 let Constraints = "$src1 = $dst" in
2857 defm NAME : PDI_binop_rm<opc, OpcodeStr, Opcode, OpVT128, VR128,
2858 memopv2i64, i128mem, itins, IsCommutable, 1>;
2860 let Predicates = [HasAVX2, prd] in
2861 defm V#NAME#Y : PDI_binop_rm<opc, !strconcat("v", OpcodeStr), Opcode,
2862 OpVT256, VR256, loadv4i64, i256mem, itins,
2863 IsCommutable, 0>, VEX_4V, VEX_L;
2866 // These are ordered here for pattern ordering requirements with the fp versions
2868 defm PAND : PDI_binop_all<0xDB, "pand", and, v2i64, v4i64,
2869 SSE_VEC_BIT_ITINS_P, 1, NoVLX>;
2870 defm POR : PDI_binop_all<0xEB, "por", or, v2i64, v4i64,
2871 SSE_VEC_BIT_ITINS_P, 1, NoVLX>;
2872 defm PXOR : PDI_binop_all<0xEF, "pxor", xor, v2i64, v4i64,
2873 SSE_VEC_BIT_ITINS_P, 1, NoVLX>;
2874 defm PANDN : PDI_binop_all<0xDF, "pandn", X86andnp, v2i64, v4i64,
2875 SSE_VEC_BIT_ITINS_P, 0, NoVLX>;
2877 //===----------------------------------------------------------------------===//
2878 // SSE 1 & 2 - Logical Instructions
2879 //===----------------------------------------------------------------------===//
2881 // Multiclass for scalars using the X86 logical operation aliases for FP.
2882 multiclass sse12_fp_packed_scalar_logical_alias<
2883 bits<8> opc, string OpcodeStr, SDNode OpNode, OpndItins itins> {
2884 defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
2885 FR32, f32, f128mem, loadf32_128, SSEPackedSingle, itins, 0>,
2888 defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
2889 FR64, f64, f128mem, loadf64_128, SSEPackedDouble, itins, 0>,
2892 let Constraints = "$src1 = $dst" in {
2893 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, FR32,
2894 f32, f128mem, memopfsf32_128, SSEPackedSingle, itins>, PS;
2896 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, FR64,
2897 f64, f128mem, memopfsf64_128, SSEPackedDouble, itins>, PD;
2901 let isCodeGenOnly = 1 in {
2902 defm FsAND : sse12_fp_packed_scalar_logical_alias<0x54, "and", X86fand,
2904 defm FsOR : sse12_fp_packed_scalar_logical_alias<0x56, "or", X86for,
2906 defm FsXOR : sse12_fp_packed_scalar_logical_alias<0x57, "xor", X86fxor,
2909 let isCommutable = 0 in
2910 defm FsANDN : sse12_fp_packed_scalar_logical_alias<0x55, "andn", X86fandn,
2914 // Multiclass for vectors using the X86 logical operation aliases for FP.
2915 multiclass sse12_fp_packed_vector_logical_alias<
2916 bits<8> opc, string OpcodeStr, SDNode OpNode, OpndItins itins> {
2917 let Predicates = [HasAVX, NoVLX_Or_NoDQI] in {
2918 defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
2919 VR128, v4f32, f128mem, loadv4f32, SSEPackedSingle, itins, 0>,
2922 defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
2923 VR128, v2f64, f128mem, loadv2f64, SSEPackedDouble, itins, 0>,
2926 defm V#NAME#PSY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
2927 VR256, v8f32, f256mem, loadv8f32, SSEPackedSingle, itins, 0>,
2930 defm V#NAME#PDY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
2931 VR256, v4f64, f256mem, loadv4f64, SSEPackedDouble, itins, 0>,
2935 let Constraints = "$src1 = $dst" in {
2936 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR128,
2937 v4f32, f128mem, memopv4f32, SSEPackedSingle, itins>,
2940 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR128,
2941 v2f64, f128mem, memopv2f64, SSEPackedDouble, itins>,
2946 let isCodeGenOnly = 1 in {
2947 defm FvAND : sse12_fp_packed_vector_logical_alias<0x54, "and", X86fand,
2949 defm FvOR : sse12_fp_packed_vector_logical_alias<0x56, "or", X86for,
2951 defm FvXOR : sse12_fp_packed_vector_logical_alias<0x57, "xor", X86fxor,
2954 let isCommutable = 0 in
2955 defm FvANDN : sse12_fp_packed_vector_logical_alias<0x55, "andn", X86fandn,
2959 /// sse12_fp_packed_logical - SSE 1 & 2 packed FP logical ops
2961 multiclass sse12_fp_packed_logical<bits<8> opc, string OpcodeStr,
2963 let Predicates = [HasAVX, NoVLX] in {
2964 defm V#NAME#PSY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedSingle,
2965 !strconcat(OpcodeStr, "ps"), f256mem,
2966 [(set VR256:$dst, (v4i64 (OpNode VR256:$src1, VR256:$src2)))],
2967 [(set VR256:$dst, (OpNode (bc_v4i64 (v8f32 VR256:$src1)),
2968 (loadv4i64 addr:$src2)))], 0>, PS, VEX_4V, VEX_L;
2970 defm V#NAME#PDY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedDouble,
2971 !strconcat(OpcodeStr, "pd"), f256mem,
2972 [(set VR256:$dst, (OpNode (bc_v4i64 (v4f64 VR256:$src1)),
2973 (bc_v4i64 (v4f64 VR256:$src2))))],
2974 [(set VR256:$dst, (OpNode (bc_v4i64 (v4f64 VR256:$src1)),
2975 (loadv4i64 addr:$src2)))], 0>,
2978 // In AVX no need to add a pattern for 128-bit logical rr ps, because they
2979 // are all promoted to v2i64, and the patterns are covered by the int
2980 // version. This is needed in SSE only, because v2i64 isn't supported on
2981 // SSE1, but only on SSE2.
2982 defm V#NAME#PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
2983 !strconcat(OpcodeStr, "ps"), f128mem, [],
2984 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
2985 (loadv2i64 addr:$src2)))], 0>, PS, VEX_4V;
2987 defm V#NAME#PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
2988 !strconcat(OpcodeStr, "pd"), f128mem,
2989 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
2990 (bc_v2i64 (v2f64 VR128:$src2))))],
2991 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
2992 (loadv2i64 addr:$src2)))], 0>,
2996 let Constraints = "$src1 = $dst" in {
2997 defm PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
2998 !strconcat(OpcodeStr, "ps"), f128mem,
2999 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))],
3000 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
3001 (memopv2i64 addr:$src2)))]>, PS;
3003 defm PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
3004 !strconcat(OpcodeStr, "pd"), f128mem,
3005 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
3006 (bc_v2i64 (v2f64 VR128:$src2))))],
3007 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
3008 (memopv2i64 addr:$src2)))]>, PD;
3012 defm AND : sse12_fp_packed_logical<0x54, "and", and>;
3013 defm OR : sse12_fp_packed_logical<0x56, "or", or>;
3014 defm XOR : sse12_fp_packed_logical<0x57, "xor", xor>;
3015 let isCommutable = 0 in
3016 defm ANDN : sse12_fp_packed_logical<0x55, "andn", X86andnp>;
3018 // AVX1 requires type coercions in order to fold loads directly into logical
3020 let Predicates = [HasAVX1Only] in {
3021 def : Pat<(bc_v8f32 (and VR256:$src1, (loadv4i64 addr:$src2))),
3022 (VANDPSYrm VR256:$src1, addr:$src2)>;
3023 def : Pat<(bc_v8f32 (or VR256:$src1, (loadv4i64 addr:$src2))),
3024 (VORPSYrm VR256:$src1, addr:$src2)>;
3025 def : Pat<(bc_v8f32 (xor VR256:$src1, (loadv4i64 addr:$src2))),
3026 (VXORPSYrm VR256:$src1, addr:$src2)>;
3027 def : Pat<(bc_v8f32 (X86andnp VR256:$src1, (loadv4i64 addr:$src2))),
3028 (VANDNPSYrm VR256:$src1, addr:$src2)>;
3031 //===----------------------------------------------------------------------===//
3032 // SSE 1 & 2 - Arithmetic Instructions
3033 //===----------------------------------------------------------------------===//
3035 /// basic_sse12_fp_binop_xxx - SSE 1 & 2 binops come in both scalar and
3038 /// In addition, we also have a special variant of the scalar form here to
3039 /// represent the associated intrinsic operation. This form is unlike the
3040 /// plain scalar form, in that it takes an entire vector (instead of a scalar)
3041 /// and leaves the top elements unmodified (therefore these cannot be commuted).
3043 /// These three forms can each be reg+reg or reg+mem.
3046 /// FIXME: once all 256-bit intrinsics are matched, cleanup and refactor those
3048 multiclass basic_sse12_fp_binop_p<bits<8> opc, string OpcodeStr,
3049 SDNode OpNode, SizeItins itins> {
3050 let Predicates = [HasAVX, NoVLX] in {
3051 defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
3052 VR128, v4f32, f128mem, loadv4f32,
3053 SSEPackedSingle, itins.s, 0>, PS, VEX_4V;
3054 defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
3055 VR128, v2f64, f128mem, loadv2f64,
3056 SSEPackedDouble, itins.d, 0>, PD, VEX_4V;
3058 defm V#NAME#PSY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"),
3059 OpNode, VR256, v8f32, f256mem, loadv8f32,
3060 SSEPackedSingle, itins.s, 0>, PS, VEX_4V, VEX_L;
3061 defm V#NAME#PDY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"),
3062 OpNode, VR256, v4f64, f256mem, loadv4f64,
3063 SSEPackedDouble, itins.d, 0>, PD, VEX_4V, VEX_L;
3066 let Constraints = "$src1 = $dst" in {
3067 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR128,
3068 v4f32, f128mem, memopv4f32, SSEPackedSingle,
3070 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR128,
3071 v2f64, f128mem, memopv2f64, SSEPackedDouble,
3076 multiclass basic_sse12_fp_binop_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
3078 defm V#NAME#SS : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"),
3079 OpNode, FR32, f32mem, SSEPackedSingle, itins.s, 0>,
3080 XS, VEX_4V, VEX_LIG;
3081 defm V#NAME#SD : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd"),
3082 OpNode, FR64, f64mem, SSEPackedDouble, itins.d, 0>,
3083 XD, VEX_4V, VEX_LIG;
3085 let Constraints = "$src1 = $dst" in {
3086 defm SS : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"),
3087 OpNode, FR32, f32mem, SSEPackedSingle,
3089 defm SD : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd"),
3090 OpNode, FR64, f64mem, SSEPackedDouble,
3095 multiclass basic_sse12_fp_binop_s_int<bits<8> opc, string OpcodeStr,
3097 defm V#NAME#SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
3098 !strconcat(OpcodeStr, "ss"), "", "_ss", ssmem, sse_load_f32,
3099 SSEPackedSingle, itins.s, 0>, XS, VEX_4V, VEX_LIG;
3100 defm V#NAME#SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
3101 !strconcat(OpcodeStr, "sd"), "2", "_sd", sdmem, sse_load_f64,
3102 SSEPackedDouble, itins.d, 0>, XD, VEX_4V, VEX_LIG;
3104 let Constraints = "$src1 = $dst" in {
3105 defm SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
3106 !strconcat(OpcodeStr, "ss"), "", "_ss", ssmem, sse_load_f32,
3107 SSEPackedSingle, itins.s>, XS;
3108 defm SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
3109 !strconcat(OpcodeStr, "sd"), "2", "_sd", sdmem, sse_load_f64,
3110 SSEPackedDouble, itins.d>, XD;
3114 // Binary Arithmetic instructions
3115 defm ADD : basic_sse12_fp_binop_p<0x58, "add", fadd, SSE_ALU_ITINS_P>,
3116 basic_sse12_fp_binop_s<0x58, "add", fadd, SSE_ALU_ITINS_S>,
3117 basic_sse12_fp_binop_s_int<0x58, "add", SSE_ALU_ITINS_S>;
3118 defm MUL : basic_sse12_fp_binop_p<0x59, "mul", fmul, SSE_MUL_ITINS_P>,
3119 basic_sse12_fp_binop_s<0x59, "mul", fmul, SSE_MUL_ITINS_S>,
3120 basic_sse12_fp_binop_s_int<0x59, "mul", SSE_MUL_ITINS_S>;
3121 let isCommutable = 0 in {
3122 defm SUB : basic_sse12_fp_binop_p<0x5C, "sub", fsub, SSE_ALU_ITINS_P>,