1 //====- X86InstrSSE.td - Describe the X86 Instruction Set --*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86 SSE instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
17 //===----------------------------------------------------------------------===//
18 // SSE scalar FP Instructions
19 //===----------------------------------------------------------------------===//
21 // CMOV* - Used to implement the SSE SELECT DAG operation. Expanded after
22 // instruction selection into a branch sequence.
23 let Uses = [EFLAGS], usesCustomInserter = 1 in {
24 def CMOV_FR32 : I<0, Pseudo,
25 (outs FR32:$dst), (ins FR32:$t, FR32:$f, i8imm:$cond),
27 [(set FR32:$dst, (X86cmov FR32:$t, FR32:$f, imm:$cond,
29 def CMOV_FR64 : I<0, Pseudo,
30 (outs FR64:$dst), (ins FR64:$t, FR64:$f, i8imm:$cond),
32 [(set FR64:$dst, (X86cmov FR64:$t, FR64:$f, imm:$cond,
34 def CMOV_V4F32 : I<0, Pseudo,
35 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
36 "#CMOV_V4F32 PSEUDO!",
38 (v4f32 (X86cmov VR128:$t, VR128:$f, imm:$cond,
40 def CMOV_V2F64 : I<0, Pseudo,
41 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
42 "#CMOV_V2F64 PSEUDO!",
44 (v2f64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
46 def CMOV_V2I64 : I<0, Pseudo,
47 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
48 "#CMOV_V2I64 PSEUDO!",
50 (v2i64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
54 //===----------------------------------------------------------------------===//
55 // SSE 1 & 2 Instructions Classes
56 //===----------------------------------------------------------------------===//
58 /// sse12_fp_scalar - SSE 1 & 2 scalar instructions class
59 multiclass sse12_fp_scalar<bits<8> opc, string OpcodeStr, SDNode OpNode,
60 RegisterClass RC, X86MemOperand x86memop,
62 let isCommutable = 1 in {
63 def rr : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
65 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
66 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
67 [(set RC:$dst, (OpNode RC:$src1, RC:$src2))]>;
69 def rm : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
71 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
72 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
73 [(set RC:$dst, (OpNode RC:$src1, (load addr:$src2)))]>;
76 /// sse12_fp_scalar_int - SSE 1 & 2 scalar instructions intrinsics class
77 multiclass sse12_fp_scalar_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
78 string asm, string SSEVer, string FPSizeStr,
79 Operand memopr, ComplexPattern mem_cpat,
81 def rr_Int : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
83 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
84 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
85 [(set RC:$dst, (!nameconcat<Intrinsic>("int_x86_sse",
86 !strconcat(SSEVer, !strconcat("_",
87 !strconcat(OpcodeStr, FPSizeStr))))
88 RC:$src1, RC:$src2))]>;
89 def rm_Int : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, memopr:$src2),
91 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
92 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
93 [(set RC:$dst, (!nameconcat<Intrinsic>("int_x86_sse",
94 !strconcat(SSEVer, !strconcat("_",
95 !strconcat(OpcodeStr, FPSizeStr))))
96 RC:$src1, mem_cpat:$src2))]>;
99 /// sse12_fp_packed - SSE 1 & 2 packed instructions class
100 multiclass sse12_fp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
101 RegisterClass RC, ValueType vt,
102 X86MemOperand x86memop, PatFrag mem_frag,
103 Domain d, bit Is2Addr = 1> {
104 let isCommutable = 1 in
105 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
107 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
108 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
109 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], d>;
111 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
113 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
114 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
115 [(set RC:$dst, (OpNode RC:$src1, (mem_frag addr:$src2)))], d>;
118 /// sse12_fp_packed_logical_rm - SSE 1 & 2 packed instructions class
119 multiclass sse12_fp_packed_logical_rm<bits<8> opc, RegisterClass RC, Domain d,
120 string OpcodeStr, X86MemOperand x86memop,
121 list<dag> pat_rr, list<dag> pat_rm,
123 let isCommutable = 1 in
124 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
126 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
127 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
129 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
131 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
132 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
136 /// sse12_fp_packed_int - SSE 1 & 2 packed instructions intrinsics class
137 multiclass sse12_fp_packed_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
138 string asm, string SSEVer, string FPSizeStr,
139 X86MemOperand x86memop, PatFrag mem_frag,
140 Domain d, bit Is2Addr = 1> {
141 def rr_Int : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
143 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
144 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
145 [(set RC:$dst, (!nameconcat<Intrinsic>("int_x86_",
146 !strconcat(SSEVer, !strconcat("_",
147 !strconcat(OpcodeStr, FPSizeStr))))
148 RC:$src1, RC:$src2))], d>;
149 def rm_Int : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1,x86memop:$src2),
151 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
152 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
153 [(set RC:$dst, (!nameconcat<Intrinsic>("int_x86_",
154 !strconcat(SSEVer, !strconcat("_",
155 !strconcat(OpcodeStr, FPSizeStr))))
156 RC:$src1, (mem_frag addr:$src2)))], d>;
159 //===----------------------------------------------------------------------===//
160 // SSE 1 & 2 - Move Instructions
161 //===----------------------------------------------------------------------===//
163 class sse12_move_rr<RegisterClass RC, ValueType vt, string asm> :
164 SI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, RC:$src2), asm,
165 [(set (vt VR128:$dst), (movl VR128:$src1, (scalar_to_vector RC:$src2)))]>;
167 // Loading from memory automatically zeroing upper bits.
168 class sse12_move_rm<RegisterClass RC, X86MemOperand x86memop,
169 PatFrag mem_pat, string OpcodeStr> :
170 SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
171 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
172 [(set RC:$dst, (mem_pat addr:$src))]>;
174 // Move Instructions. Register-to-register movss/movsd is not used for FR32/64
175 // register copies because it's a partial register update; FsMOVAPSrr/FsMOVAPDrr
176 // is used instead. Register-to-register movss/movsd is not modeled as an
177 // INSERT_SUBREG because INSERT_SUBREG requires that the insert be implementable
178 // in terms of a copy, and just mentioned, we don't use movss/movsd for copies.
179 let isAsmParserOnly = 1 in {
180 def VMOVSSrr : sse12_move_rr<FR32, v4f32,
181 "movss\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XS, VEX_4V;
182 def VMOVSDrr : sse12_move_rr<FR64, v2f64,
183 "movsd\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XD, VEX_4V;
185 let canFoldAsLoad = 1, isReMaterializable = 1 in {
186 def VMOVSSrm : sse12_move_rm<FR32, f32mem, loadf32, "movss">, XS, VEX;
188 let AddedComplexity = 20 in
189 def VMOVSDrm : sse12_move_rm<FR64, f64mem, loadf64, "movsd">, XD, VEX;
193 let Constraints = "$src1 = $dst" in {
194 def MOVSSrr : sse12_move_rr<FR32, v4f32,
195 "movss\t{$src2, $dst|$dst, $src2}">, XS;
196 def MOVSDrr : sse12_move_rr<FR64, v2f64,
197 "movsd\t{$src2, $dst|$dst, $src2}">, XD;
200 let canFoldAsLoad = 1, isReMaterializable = 1 in {
201 def MOVSSrm : sse12_move_rm<FR32, f32mem, loadf32, "movss">, XS;
203 let AddedComplexity = 20 in
204 def MOVSDrm : sse12_move_rm<FR64, f64mem, loadf64, "movsd">, XD;
207 let AddedComplexity = 15 in {
208 // Extract the low 32-bit value from one vector and insert it into another.
209 def : Pat<(v4f32 (movl VR128:$src1, VR128:$src2)),
210 (MOVSSrr (v4f32 VR128:$src1),
211 (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>;
212 // Extract the low 64-bit value from one vector and insert it into another.
213 def : Pat<(v2f64 (movl VR128:$src1, VR128:$src2)),
214 (MOVSDrr (v2f64 VR128:$src1),
215 (EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>;
218 // Implicitly promote a 32-bit scalar to a vector.
219 def : Pat<(v4f32 (scalar_to_vector FR32:$src)),
220 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src, sub_ss)>;
221 // Implicitly promote a 64-bit scalar to a vector.
222 def : Pat<(v2f64 (scalar_to_vector FR64:$src)),
223 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src, sub_sd)>;
225 let AddedComplexity = 20 in {
226 // MOVSSrm zeros the high parts of the register; represent this
227 // with SUBREG_TO_REG.
228 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
229 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
230 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
231 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
232 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
233 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
234 // MOVSDrm zeros the high parts of the register; represent this
235 // with SUBREG_TO_REG.
236 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
237 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
238 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
239 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
240 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
241 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
242 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
243 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
244 def : Pat<(v2f64 (X86vzload addr:$src)),
245 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
248 // Store scalar value to memory.
249 def MOVSSmr : SSI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
250 "movss\t{$src, $dst|$dst, $src}",
251 [(store FR32:$src, addr:$dst)]>;
252 def MOVSDmr : SDI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
253 "movsd\t{$src, $dst|$dst, $src}",
254 [(store FR64:$src, addr:$dst)]>;
256 let isAsmParserOnly = 1 in {
257 def VMOVSSmr : SI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
258 "movss\t{$src, $dst|$dst, $src}",
259 [(store FR32:$src, addr:$dst)]>, XS, VEX;
260 def VMOVSDmr : SI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
261 "movsd\t{$src, $dst|$dst, $src}",
262 [(store FR64:$src, addr:$dst)]>, XD, VEX;
265 // Extract and store.
266 def : Pat<(store (f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
269 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
270 def : Pat<(store (f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
273 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
275 // Move Aligned/Unaligned floating point values
276 multiclass sse12_mov_packed<bits<8> opc, RegisterClass RC,
277 X86MemOperand x86memop, PatFrag ld_frag,
278 string asm, Domain d,
279 bit IsReMaterializable = 1> {
280 let neverHasSideEffects = 1 in
281 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
282 !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], d>;
283 let canFoldAsLoad = 1, isReMaterializable = IsReMaterializable in
284 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
285 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
286 [(set RC:$dst, (ld_frag addr:$src))], d>;
289 let isAsmParserOnly = 1 in {
290 defm VMOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
291 "movaps", SSEPackedSingle>, VEX;
292 defm VMOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
293 "movapd", SSEPackedDouble>, OpSize, VEX;
294 defm VMOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
295 "movups", SSEPackedSingle>, VEX;
296 defm VMOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
297 "movupd", SSEPackedDouble, 0>, OpSize, VEX;
299 defm VMOVAPSY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv8f32,
300 "movaps", SSEPackedSingle>, VEX;
301 defm VMOVAPDY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv4f64,
302 "movapd", SSEPackedDouble>, OpSize, VEX;
303 defm VMOVUPSY : sse12_mov_packed<0x10, VR256, f256mem, loadv8f32,
304 "movups", SSEPackedSingle>, VEX;
305 defm VMOVUPDY : sse12_mov_packed<0x10, VR256, f256mem, loadv4f64,
306 "movupd", SSEPackedDouble, 0>, OpSize, VEX;
308 defm MOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
309 "movaps", SSEPackedSingle>, TB;
310 defm MOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
311 "movapd", SSEPackedDouble>, TB, OpSize;
312 defm MOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
313 "movups", SSEPackedSingle>, TB;
314 defm MOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
315 "movupd", SSEPackedDouble, 0>, TB, OpSize;
317 let isAsmParserOnly = 1 in {
318 def VMOVAPSmr : VPSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
319 "movaps\t{$src, $dst|$dst, $src}",
320 [(alignedstore (v4f32 VR128:$src), addr:$dst)]>, VEX;
321 def VMOVAPDmr : VPDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
322 "movapd\t{$src, $dst|$dst, $src}",
323 [(alignedstore (v2f64 VR128:$src), addr:$dst)]>, VEX;
324 def VMOVUPSmr : VPSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
325 "movups\t{$src, $dst|$dst, $src}",
326 [(store (v4f32 VR128:$src), addr:$dst)]>, VEX;
327 def VMOVUPDmr : VPDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
328 "movupd\t{$src, $dst|$dst, $src}",
329 [(store (v2f64 VR128:$src), addr:$dst)]>, VEX;
330 def VMOVAPSYmr : VPSI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
331 "movaps\t{$src, $dst|$dst, $src}",
332 [(alignedstore (v8f32 VR256:$src), addr:$dst)]>, VEX;
333 def VMOVAPDYmr : VPDI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
334 "movapd\t{$src, $dst|$dst, $src}",
335 [(alignedstore (v4f64 VR256:$src), addr:$dst)]>, VEX;
336 def VMOVUPSYmr : VPSI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
337 "movups\t{$src, $dst|$dst, $src}",
338 [(store (v8f32 VR256:$src), addr:$dst)]>, VEX;
339 def VMOVUPDYmr : VPDI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
340 "movupd\t{$src, $dst|$dst, $src}",
341 [(store (v4f64 VR256:$src), addr:$dst)]>, VEX;
344 def : Pat<(int_x86_avx_loadu_ps_256 addr:$src), (VMOVUPSYrm addr:$src)>;
345 def : Pat<(int_x86_avx_storeu_ps_256 addr:$dst, VR256:$src),
346 (VMOVUPSYmr addr:$dst, VR256:$src)>;
348 def : Pat<(int_x86_avx_loadu_pd_256 addr:$src), (VMOVUPDYrm addr:$src)>;
349 def : Pat<(int_x86_avx_storeu_pd_256 addr:$dst, VR256:$src),
350 (VMOVUPDYmr addr:$dst, VR256:$src)>;
352 def MOVAPSmr : PSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
353 "movaps\t{$src, $dst|$dst, $src}",
354 [(alignedstore (v4f32 VR128:$src), addr:$dst)]>;
355 def MOVAPDmr : PDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
356 "movapd\t{$src, $dst|$dst, $src}",
357 [(alignedstore (v2f64 VR128:$src), addr:$dst)]>;
358 def MOVUPSmr : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
359 "movups\t{$src, $dst|$dst, $src}",
360 [(store (v4f32 VR128:$src), addr:$dst)]>;
361 def MOVUPDmr : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
362 "movupd\t{$src, $dst|$dst, $src}",
363 [(store (v2f64 VR128:$src), addr:$dst)]>;
365 // Intrinsic forms of MOVUPS/D load and store
366 let isAsmParserOnly = 1 in {
367 let canFoldAsLoad = 1, isReMaterializable = 1 in
368 def VMOVUPSrm_Int : VPSI<0x10, MRMSrcMem, (outs VR128:$dst),
370 "movups\t{$src, $dst|$dst, $src}",
371 [(set VR128:$dst, (int_x86_sse_loadu_ps addr:$src))]>, VEX;
372 def VMOVUPDrm_Int : VPDI<0x10, MRMSrcMem, (outs VR128:$dst),
374 "movupd\t{$src, $dst|$dst, $src}",
375 [(set VR128:$dst, (int_x86_sse2_loadu_pd addr:$src))]>, VEX;
376 def VMOVUPSmr_Int : VPSI<0x11, MRMDestMem, (outs),
377 (ins f128mem:$dst, VR128:$src),
378 "movups\t{$src, $dst|$dst, $src}",
379 [(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>, VEX;
380 def VMOVUPDmr_Int : VPDI<0x11, MRMDestMem, (outs),
381 (ins f128mem:$dst, VR128:$src),
382 "movupd\t{$src, $dst|$dst, $src}",
383 [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>, VEX;
385 let canFoldAsLoad = 1, isReMaterializable = 1 in
386 def MOVUPSrm_Int : PSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
387 "movups\t{$src, $dst|$dst, $src}",
388 [(set VR128:$dst, (int_x86_sse_loadu_ps addr:$src))]>;
389 def MOVUPDrm_Int : PDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
390 "movupd\t{$src, $dst|$dst, $src}",
391 [(set VR128:$dst, (int_x86_sse2_loadu_pd addr:$src))]>;
393 def MOVUPSmr_Int : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
394 "movups\t{$src, $dst|$dst, $src}",
395 [(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>;
396 def MOVUPDmr_Int : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
397 "movupd\t{$src, $dst|$dst, $src}",
398 [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>;
400 // Move Low/High packed floating point values
401 multiclass sse12_mov_hilo_packed<bits<8>opc, RegisterClass RC,
402 PatFrag mov_frag, string base_opc,
404 def PSrm : PI<opc, MRMSrcMem,
405 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
406 !strconcat(!strconcat(base_opc,"s"), asm_opr),
409 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))))],
410 SSEPackedSingle>, TB;
412 def PDrm : PI<opc, MRMSrcMem,
413 (outs RC:$dst), (ins RC:$src1, f64mem:$src2),
414 !strconcat(!strconcat(base_opc,"d"), asm_opr),
415 [(set RC:$dst, (v2f64 (mov_frag RC:$src1,
416 (scalar_to_vector (loadf64 addr:$src2)))))],
417 SSEPackedDouble>, TB, OpSize;
420 let isAsmParserOnly = 1, AddedComplexity = 20 in {
421 defm VMOVL : sse12_mov_hilo_packed<0x12, VR128, movlp, "movlp",
422 "\t{$src2, $src1, $dst|$dst, $src1, $src2}">, VEX_4V;
423 defm VMOVH : sse12_mov_hilo_packed<0x16, VR128, movlhps, "movhp",
424 "\t{$src2, $src1, $dst|$dst, $src1, $src2}">, VEX_4V;
426 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
427 defm MOVL : sse12_mov_hilo_packed<0x12, VR128, movlp, "movlp",
428 "\t{$src2, $dst|$dst, $src2}">;
429 defm MOVH : sse12_mov_hilo_packed<0x16, VR128, movlhps, "movhp",
430 "\t{$src2, $dst|$dst, $src2}">;
433 let isAsmParserOnly = 1 in {
434 def VMOVLPSmr : VPSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
435 "movlps\t{$src, $dst|$dst, $src}",
436 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
437 (iPTR 0))), addr:$dst)]>, VEX;
438 def VMOVLPDmr : VPDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
439 "movlpd\t{$src, $dst|$dst, $src}",
440 [(store (f64 (vector_extract (v2f64 VR128:$src),
441 (iPTR 0))), addr:$dst)]>, VEX;
443 def MOVLPSmr : PSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
444 "movlps\t{$src, $dst|$dst, $src}",
445 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
446 (iPTR 0))), addr:$dst)]>;
447 def MOVLPDmr : PDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
448 "movlpd\t{$src, $dst|$dst, $src}",
449 [(store (f64 (vector_extract (v2f64 VR128:$src),
450 (iPTR 0))), addr:$dst)]>;
452 // v2f64 extract element 1 is always custom lowered to unpack high to low
453 // and extract element 0 so the non-store version isn't too horrible.
454 let isAsmParserOnly = 1 in {
455 def VMOVHPSmr : VPSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
456 "movhps\t{$src, $dst|$dst, $src}",
457 [(store (f64 (vector_extract
458 (unpckh (bc_v2f64 (v4f32 VR128:$src)),
459 (undef)), (iPTR 0))), addr:$dst)]>,
461 def VMOVHPDmr : VPDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
462 "movhpd\t{$src, $dst|$dst, $src}",
463 [(store (f64 (vector_extract
464 (v2f64 (unpckh VR128:$src, (undef))),
465 (iPTR 0))), addr:$dst)]>,
468 def MOVHPSmr : PSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
469 "movhps\t{$src, $dst|$dst, $src}",
470 [(store (f64 (vector_extract
471 (unpckh (bc_v2f64 (v4f32 VR128:$src)),
472 (undef)), (iPTR 0))), addr:$dst)]>;
473 def MOVHPDmr : PDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
474 "movhpd\t{$src, $dst|$dst, $src}",
475 [(store (f64 (vector_extract
476 (v2f64 (unpckh VR128:$src, (undef))),
477 (iPTR 0))), addr:$dst)]>;
479 let isAsmParserOnly = 1, AddedComplexity = 20 in {
480 def VMOVLHPSrr : VPSI<0x16, MRMSrcReg, (outs VR128:$dst),
481 (ins VR128:$src1, VR128:$src2),
482 "movlhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
484 (v4f32 (movlhps VR128:$src1, VR128:$src2)))]>,
486 def VMOVHLPSrr : VPSI<0x12, MRMSrcReg, (outs VR128:$dst),
487 (ins VR128:$src1, VR128:$src2),
488 "movhlps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
490 (v4f32 (movhlps VR128:$src1, VR128:$src2)))]>,
493 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
494 def MOVLHPSrr : PSI<0x16, MRMSrcReg, (outs VR128:$dst),
495 (ins VR128:$src1, VR128:$src2),
496 "movlhps\t{$src2, $dst|$dst, $src2}",
498 (v4f32 (movlhps VR128:$src1, VR128:$src2)))]>;
499 def MOVHLPSrr : PSI<0x12, MRMSrcReg, (outs VR128:$dst),
500 (ins VR128:$src1, VR128:$src2),
501 "movhlps\t{$src2, $dst|$dst, $src2}",
503 (v4f32 (movhlps VR128:$src1, VR128:$src2)))]>;
506 def : Pat<(movlhps VR128:$src1, (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
507 (MOVHPSrm (v4i32 VR128:$src1), addr:$src2)>;
508 let AddedComplexity = 20 in {
509 def : Pat<(v4f32 (movddup VR128:$src, (undef))),
510 (MOVLHPSrr (v4f32 VR128:$src), (v4f32 VR128:$src))>;
511 def : Pat<(v2i64 (movddup VR128:$src, (undef))),
512 (MOVLHPSrr (v2i64 VR128:$src), (v2i64 VR128:$src))>;
515 //===----------------------------------------------------------------------===//
516 // SSE 1 & 2 - Conversion Instructions
517 //===----------------------------------------------------------------------===//
519 multiclass sse12_cvt_s<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
520 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
522 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
523 [(set DstRC:$dst, (OpNode SrcRC:$src))]>;
524 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
525 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))]>;
528 multiclass sse12_cvt_s_np<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
529 X86MemOperand x86memop, string asm> {
530 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
532 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
536 multiclass sse12_cvt_p<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
537 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
538 string asm, Domain d> {
539 def rr : PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
540 [(set DstRC:$dst, (OpNode SrcRC:$src))], d>;
541 def rm : PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
542 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))], d>;
545 multiclass sse12_vcvt_avx<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
546 X86MemOperand x86memop, string asm> {
547 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src),
548 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>;
549 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
550 (ins DstRC:$src1, x86memop:$src),
551 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>;
554 let isAsmParserOnly = 1 in {
555 defm VCVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
556 "cvttss2si\t{$src, $dst|$dst, $src}">, XS, VEX;
557 defm VCVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
558 "cvttss2si\t{$src, $dst|$dst, $src}">, XS, VEX,
560 defm VCVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
561 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD, VEX;
562 defm VCVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
563 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD,
566 // The assembler can recognize rr 64-bit instructions by seeing a rxx
567 // register, but the same isn't true when only using memory operands,
568 // provide other assembly "l" and "q" forms to address this explicitly
569 // where appropriate to do so.
570 defm VCVTSI2SS : sse12_vcvt_avx<0x2A, GR32, FR32, i32mem, "cvtsi2ss">, XS,
572 defm VCVTSI2SS64 : sse12_vcvt_avx<0x2A, GR64, FR32, i64mem, "cvtsi2ss{q}">, XS,
574 defm VCVTSI2SD : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd">, XD,
576 defm VCVTSI2SDL : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd{l}">, XD,
578 defm VCVTSI2SD64 : sse12_vcvt_avx<0x2A, GR64, FR64, i64mem, "cvtsi2sd{q}">, XD,
582 defm CVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
583 "cvttss2si\t{$src, $dst|$dst, $src}">, XS;
584 defm CVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
585 "cvttss2si{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
586 defm CVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
587 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD;
588 defm CVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
589 "cvttsd2si{q}\t{$src, $dst|$dst, $src}">, XD, REX_W;
590 defm CVTSI2SS : sse12_cvt_s<0x2A, GR32, FR32, sint_to_fp, i32mem, loadi32,
591 "cvtsi2ss\t{$src, $dst|$dst, $src}">, XS;
592 defm CVTSI2SS64 : sse12_cvt_s<0x2A, GR64, FR32, sint_to_fp, i64mem, loadi64,
593 "cvtsi2ss{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
594 defm CVTSI2SD : sse12_cvt_s<0x2A, GR32, FR64, sint_to_fp, i32mem, loadi32,
595 "cvtsi2sd\t{$src, $dst|$dst, $src}">, XD;
596 defm CVTSI2SD64 : sse12_cvt_s<0x2A, GR64, FR64, sint_to_fp, i64mem, loadi64,
597 "cvtsi2sd{q}\t{$src, $dst|$dst, $src}">, XD, REX_W;
599 // Conversion Instructions Intrinsics - Match intrinsics which expect MM
600 // and/or XMM operand(s).
602 multiclass sse12_cvt_sint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
603 Intrinsic Int, X86MemOperand x86memop, PatFrag ld_frag,
605 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
606 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
607 [(set DstRC:$dst, (Int SrcRC:$src))]>;
608 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
609 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
610 [(set DstRC:$dst, (Int (ld_frag addr:$src)))]>;
613 multiclass sse12_cvt_sint_3addr<bits<8> opc, RegisterClass SrcRC,
614 RegisterClass DstRC, Intrinsic Int, X86MemOperand x86memop,
615 PatFrag ld_frag, string asm, bit Is2Addr = 1> {
616 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src2),
618 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
619 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
620 [(set DstRC:$dst, (Int DstRC:$src1, SrcRC:$src2))]>;
621 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
622 (ins DstRC:$src1, x86memop:$src2),
624 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
625 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
626 [(set DstRC:$dst, (Int DstRC:$src1, (ld_frag addr:$src2)))]>;
629 let isAsmParserOnly = 1 in {
630 defm Int_VCVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
631 f32mem, load, "cvtss2si">, XS, VEX;
632 defm Int_VCVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64,
633 int_x86_sse_cvtss2si64, f32mem, load, "cvtss2si">,
635 defm Int_VCVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
636 f128mem, load, "cvtsd2si">, XD, VEX;
637 defm Int_VCVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64,
638 int_x86_sse2_cvtsd2si64, f128mem, load, "cvtsd2si">,
641 // FIXME: The asm matcher has a hack to ignore instructions with _Int and Int_
642 // Get rid of this hack or rename the intrinsics, there are several
643 // intructions that only match with the intrinsic form, why create duplicates
644 // to let them be recognized by the assembler?
645 defm VCVTSD2SI_alt : sse12_cvt_s_np<0x2D, FR64, GR32, f64mem,
646 "cvtsd2si\t{$src, $dst|$dst, $src}">, XD, VEX;
647 defm VCVTSD2SI64 : sse12_cvt_s_np<0x2D, FR64, GR64, f64mem,
648 "cvtsd2si\t{$src, $dst|$dst, $src}">, XD, VEX, VEX_W;
650 defm Int_CVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
651 f32mem, load, "cvtss2si">, XS;
652 defm Int_CVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse_cvtss2si64,
653 f32mem, load, "cvtss2si{q}">, XS, REX_W;
654 defm Int_CVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
655 f128mem, load, "cvtsd2si">, XD;
656 defm Int_CVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse2_cvtsd2si64,
657 f128mem, load, "cvtsd2si">, XD, REX_W;
659 defm CVTSD2SI64 : sse12_cvt_s_np<0x2D, VR128, GR64, f64mem, "cvtsd2si{q}">, XD,
662 let isAsmParserOnly = 1 in {
663 defm Int_VCVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
664 int_x86_sse_cvtsi2ss, i32mem, loadi32, "cvtsi2ss", 0>, XS, VEX_4V;
665 defm Int_VCVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
666 int_x86_sse_cvtsi642ss, i64mem, loadi64, "cvtsi2ss", 0>, XS, VEX_4V,
668 defm Int_VCVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
669 int_x86_sse2_cvtsi2sd, i32mem, loadi32, "cvtsi2sd", 0>, XD, VEX_4V;
670 defm Int_VCVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
671 int_x86_sse2_cvtsi642sd, i64mem, loadi64, "cvtsi2sd", 0>, XD,
675 let Constraints = "$src1 = $dst" in {
676 defm Int_CVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
677 int_x86_sse_cvtsi2ss, i32mem, loadi32,
679 defm Int_CVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
680 int_x86_sse_cvtsi642ss, i64mem, loadi64,
681 "cvtsi2ss{q}">, XS, REX_W;
682 defm Int_CVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
683 int_x86_sse2_cvtsi2sd, i32mem, loadi32,
685 defm Int_CVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
686 int_x86_sse2_cvtsi642sd, i64mem, loadi64,
687 "cvtsi2sd">, XD, REX_W;
692 // Aliases for intrinsics
693 let isAsmParserOnly = 1 in {
694 defm Int_VCVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
695 f32mem, load, "cvttss2si">, XS, VEX;
696 defm Int_VCVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
697 int_x86_sse_cvttss2si64, f32mem, load,
698 "cvttss2si">, XS, VEX, VEX_W;
699 defm Int_VCVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
700 f128mem, load, "cvttss2si">, XD, VEX;
701 defm Int_VCVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
702 int_x86_sse2_cvttsd2si64, f128mem, load,
703 "cvttss2si">, XD, VEX, VEX_W;
705 defm Int_CVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
706 f32mem, load, "cvttss2si">, XS;
707 defm Int_CVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
708 int_x86_sse_cvttss2si64, f32mem, load,
709 "cvttss2si{q}">, XS, REX_W;
710 defm Int_CVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
711 f128mem, load, "cvttss2si">, XD;
712 defm Int_CVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
713 int_x86_sse2_cvttsd2si64, f128mem, load,
714 "cvttss2si{q}">, XD, REX_W;
716 let isAsmParserOnly = 1, Pattern = []<dag> in {
717 defm VCVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load,
718 "cvtss2si{l}\t{$src, $dst|$dst, $src}">, XS, VEX;
719 defm VCVTSS2SI64 : sse12_cvt_s<0x2D, FR32, GR64, undef, f32mem, load,
720 "cvtss2si\t{$src, $dst|$dst, $src}">, XS, VEX,
722 defm VCVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, i128mem, load,
723 "cvtdq2ps\t{$src, $dst|$dst, $src}",
724 SSEPackedSingle>, TB, VEX;
725 defm VCVTDQ2PSY : sse12_cvt_p<0x5B, VR256, VR256, undef, i256mem, load,
726 "cvtdq2ps\t{$src, $dst|$dst, $src}",
727 SSEPackedSingle>, TB, VEX;
729 let Pattern = []<dag> in {
730 defm CVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load /*dummy*/,
731 "cvtss2si{l}\t{$src, $dst|$dst, $src}">, XS;
732 defm CVTSS2SI64 : sse12_cvt_s<0x2D, FR32, GR64, undef, f32mem, load /*dummy*/,
733 "cvtss2si{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
734 defm CVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, i128mem, load /*dummy*/,
735 "cvtdq2ps\t{$src, $dst|$dst, $src}",
736 SSEPackedSingle>, TB; /* PD SSE3 form is avaiable */
741 // Convert scalar double to scalar single
742 let isAsmParserOnly = 1 in {
743 def VCVTSD2SSrr : VSDI<0x5A, MRMSrcReg, (outs FR32:$dst),
744 (ins FR64:$src1, FR64:$src2),
745 "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
747 def VCVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst),
748 (ins FR64:$src1, f64mem:$src2),
749 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
750 []>, XD, Requires<[HasAVX, OptForSize]>, VEX_4V;
752 def CVTSD2SSrr : SDI<0x5A, MRMSrcReg, (outs FR32:$dst), (ins FR64:$src),
753 "cvtsd2ss\t{$src, $dst|$dst, $src}",
754 [(set FR32:$dst, (fround FR64:$src))]>;
755 def CVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src),
756 "cvtsd2ss\t{$src, $dst|$dst, $src}",
757 [(set FR32:$dst, (fround (loadf64 addr:$src)))]>, XD,
758 Requires<[HasSSE2, OptForSize]>;
760 let isAsmParserOnly = 1 in
761 defm Int_VCVTSD2SS: sse12_cvt_sint_3addr<0x5A, VR128, VR128,
762 int_x86_sse2_cvtsd2ss, f64mem, load, "cvtsd2ss", 0>,
764 let Constraints = "$src1 = $dst" in
765 defm Int_CVTSD2SS: sse12_cvt_sint_3addr<0x5A, VR128, VR128,
766 int_x86_sse2_cvtsd2ss, f64mem, load, "cvtsd2ss">, XS;
768 // Convert scalar single to scalar double
769 let isAsmParserOnly = 1 in { // SSE2 instructions with XS prefix
770 def VCVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst),
771 (ins FR32:$src1, FR32:$src2),
772 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
773 []>, XS, Requires<[HasAVX]>, VEX_4V;
774 def VCVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst),
775 (ins FR32:$src1, f32mem:$src2),
776 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
777 []>, XS, VEX_4V, Requires<[HasAVX, OptForSize]>;
779 def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src),
780 "cvtss2sd\t{$src, $dst|$dst, $src}",
781 [(set FR64:$dst, (fextend FR32:$src))]>, XS,
783 def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src),
784 "cvtss2sd\t{$src, $dst|$dst, $src}",
785 [(set FR64:$dst, (extloadf32 addr:$src))]>, XS,
786 Requires<[HasSSE2, OptForSize]>;
788 let isAsmParserOnly = 1 in {
789 def Int_VCVTSS2SDrr: I<0x5A, MRMSrcReg,
790 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
791 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
792 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
793 VR128:$src2))]>, XS, VEX_4V,
795 def Int_VCVTSS2SDrm: I<0x5A, MRMSrcMem,
796 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
797 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
798 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
799 (load addr:$src2)))]>, XS, VEX_4V,
802 let Constraints = "$src1 = $dst" in { // SSE2 instructions with XS prefix
803 def Int_CVTSS2SDrr: I<0x5A, MRMSrcReg,
804 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
805 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
806 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
809 def Int_CVTSS2SDrm: I<0x5A, MRMSrcMem,
810 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
811 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
812 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
813 (load addr:$src2)))]>, XS,
817 def : Pat<(extloadf32 addr:$src),
818 (CVTSS2SDrr (MOVSSrm addr:$src))>,
819 Requires<[HasSSE2, OptForSpeed]>;
821 // Convert doubleword to packed single/double fp
822 let isAsmParserOnly = 1 in { // SSE2 instructions without OpSize prefix
823 def Int_VCVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
824 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
825 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
826 TB, VEX, Requires<[HasAVX]>;
827 def Int_VCVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
828 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
829 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps
830 (bitconvert (memopv2i64 addr:$src))))]>,
831 TB, VEX, Requires<[HasAVX]>;
833 def Int_CVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
834 "cvtdq2ps\t{$src, $dst|$dst, $src}",
835 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
836 TB, Requires<[HasSSE2]>;
837 def Int_CVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
838 "cvtdq2ps\t{$src, $dst|$dst, $src}",
839 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps
840 (bitconvert (memopv2i64 addr:$src))))]>,
841 TB, Requires<[HasSSE2]>;
843 // FIXME: why the non-intrinsic version is described as SSE3?
844 let isAsmParserOnly = 1 in { // SSE2 instructions with XS prefix
845 def Int_VCVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
846 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
847 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
848 XS, VEX, Requires<[HasAVX]>;
849 def Int_VCVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
850 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
851 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd
852 (bitconvert (memopv2i64 addr:$src))))]>,
853 XS, VEX, Requires<[HasAVX]>;
855 def Int_CVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
856 "cvtdq2pd\t{$src, $dst|$dst, $src}",
857 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
858 XS, Requires<[HasSSE2]>;
859 def Int_CVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
860 "cvtdq2pd\t{$src, $dst|$dst, $src}",
861 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd
862 (bitconvert (memopv2i64 addr:$src))))]>,
863 XS, Requires<[HasSSE2]>;
866 // Convert packed single/double fp to doubleword
867 let isAsmParserOnly = 1 in {
868 def VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
869 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
870 def VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
871 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
872 def VCVTPS2DQYrr : VPDI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
873 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
874 def VCVTPS2DQYrm : VPDI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
875 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
877 def CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
878 "cvtps2dq\t{$src, $dst|$dst, $src}", []>;
879 def CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
880 "cvtps2dq\t{$src, $dst|$dst, $src}", []>;
882 let isAsmParserOnly = 1 in {
883 def Int_VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
884 "cvtps2dq\t{$src, $dst|$dst, $src}",
885 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>,
887 def Int_VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst),
889 "cvtps2dq\t{$src, $dst|$dst, $src}",
890 [(set VR128:$dst, (int_x86_sse2_cvtps2dq
891 (memop addr:$src)))]>, VEX;
893 def Int_CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
894 "cvtps2dq\t{$src, $dst|$dst, $src}",
895 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>;
896 def Int_CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
897 "cvtps2dq\t{$src, $dst|$dst, $src}",
898 [(set VR128:$dst, (int_x86_sse2_cvtps2dq
899 (memop addr:$src)))]>;
901 let isAsmParserOnly = 1 in { // SSE2 packed instructions with XD prefix
902 def Int_VCVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
903 "vcvtpd2dq\t{$src, $dst|$dst, $src}",
904 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
905 XD, VEX, Requires<[HasAVX]>;
906 def Int_VCVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
907 "vcvtpd2dq\t{$src, $dst|$dst, $src}",
908 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
909 (memop addr:$src)))]>,
910 XD, VEX, Requires<[HasAVX]>;
912 def Int_CVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
913 "cvtpd2dq\t{$src, $dst|$dst, $src}",
914 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
915 XD, Requires<[HasSSE2]>;
916 def Int_CVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
917 "cvtpd2dq\t{$src, $dst|$dst, $src}",
918 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
919 (memop addr:$src)))]>,
920 XD, Requires<[HasSSE2]>;
923 // Convert with truncation packed single/double fp to doubleword
924 let isAsmParserOnly = 1 in { // SSE2 packed instructions with XS prefix
925 def VCVTTPS2DQrr : VSSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
926 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
927 def VCVTTPS2DQrm : VSSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
928 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
929 def VCVTTPS2DQYrr : VSSI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
930 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
931 def VCVTTPS2DQYrm : VSSI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
932 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
934 def CVTTPS2DQrr : SSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
935 "cvttps2dq\t{$src, $dst|$dst, $src}", []>;
936 def CVTTPS2DQrm : SSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
937 "cvttps2dq\t{$src, $dst|$dst, $src}", []>;
940 let isAsmParserOnly = 1 in {
941 def Int_VCVTTPS2DQrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
942 "vcvttps2dq\t{$src, $dst|$dst, $src}",
944 (int_x86_sse2_cvttps2dq VR128:$src))]>,
945 XS, VEX, Requires<[HasAVX]>;
946 def Int_VCVTTPS2DQrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
947 "vcvttps2dq\t{$src, $dst|$dst, $src}",
948 [(set VR128:$dst, (int_x86_sse2_cvttps2dq
949 (memop addr:$src)))]>,
950 XS, VEX, Requires<[HasAVX]>;
952 def Int_CVTTPS2DQrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
953 "cvttps2dq\t{$src, $dst|$dst, $src}",
955 (int_x86_sse2_cvttps2dq VR128:$src))]>,
956 XS, Requires<[HasSSE2]>;
957 def Int_CVTTPS2DQrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
958 "cvttps2dq\t{$src, $dst|$dst, $src}",
959 [(set VR128:$dst, (int_x86_sse2_cvttps2dq
960 (memop addr:$src)))]>,
961 XS, Requires<[HasSSE2]>;
963 let isAsmParserOnly = 1 in {
964 def Int_VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst),
966 "cvttpd2dq\t{$src, $dst|$dst, $src}",
967 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>,
969 def Int_VCVTTPD2DQrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst),
971 "cvttpd2dq\t{$src, $dst|$dst, $src}",
972 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
973 (memop addr:$src)))]>, VEX;
975 def Int_CVTTPD2DQrr : PDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
976 "cvttpd2dq\t{$src, $dst|$dst, $src}",
977 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>;
978 def Int_CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst),(ins f128mem:$src),
979 "cvttpd2dq\t{$src, $dst|$dst, $src}",
980 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
981 (memop addr:$src)))]>;
983 let isAsmParserOnly = 1 in {
984 // The assembler can recognize rr 256-bit instructions by seeing a ymm
985 // register, but the same isn't true when using memory operands instead.
986 // Provide other assembly rr and rm forms to address this explicitly.
987 def VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
988 "cvttpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
989 def VCVTTPD2DQXrYr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
990 "cvttpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
993 def VCVTTPD2DQXrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
994 "cvttpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
995 def VCVTTPD2DQXrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
996 "cvttpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
999 def VCVTTPD2DQYrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1000 "cvttpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX;
1001 def VCVTTPD2DQYrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
1002 "cvttpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
1005 // Convert packed single to packed double
1006 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
1007 // SSE2 instructions without OpSize prefix
1008 def VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1009 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
1010 def VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1011 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
1012 def VCVTPS2PDYrr : I<0x5A, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
1013 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
1014 def VCVTPS2PDYrm : I<0x5A, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
1015 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
1017 def CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1018 "cvtps2pd\t{$src, $dst|$dst, $src}", []>, TB;
1019 def CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1020 "cvtps2pd\t{$src, $dst|$dst, $src}", []>, TB;
1022 let isAsmParserOnly = 1 in {
1023 def Int_VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1024 "vcvtps2pd\t{$src, $dst|$dst, $src}",
1025 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
1026 VEX, Requires<[HasAVX]>;
1027 def Int_VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1028 "vcvtps2pd\t{$src, $dst|$dst, $src}",
1029 [(set VR128:$dst, (int_x86_sse2_cvtps2pd
1030 (load addr:$src)))]>,
1031 VEX, Requires<[HasAVX]>;
1033 def Int_CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1034 "cvtps2pd\t{$src, $dst|$dst, $src}",
1035 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
1036 TB, Requires<[HasSSE2]>;
1037 def Int_CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1038 "cvtps2pd\t{$src, $dst|$dst, $src}",
1039 [(set VR128:$dst, (int_x86_sse2_cvtps2pd
1040 (load addr:$src)))]>,
1041 TB, Requires<[HasSSE2]>;
1043 // Convert packed double to packed single
1044 let isAsmParserOnly = 1 in {
1045 // The assembler can recognize rr 256-bit instructions by seeing a ymm
1046 // register, but the same isn't true when using memory operands instead.
1047 // Provide other assembly rr and rm forms to address this explicitly.
1048 def VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1049 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>, VEX;
1050 def VCVTPD2PSXrYr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1051 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>, VEX;
1054 def VCVTPD2PSXrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1055 "cvtpd2psx\t{$src, $dst|$dst, $src}", []>, VEX;
1056 def VCVTPD2PSXrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1057 "cvtpd2psx\t{$src, $dst|$dst, $src}", []>, VEX;
1060 def VCVTPD2PSYrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1061 "cvtpd2psy\t{$src, $dst|$dst, $src}", []>, VEX;
1062 def VCVTPD2PSYrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
1063 "cvtpd2psy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
1065 def CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1066 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>;
1067 def CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1068 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>;
1071 let isAsmParserOnly = 1 in {
1072 def Int_VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1073 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1074 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
1075 def Int_VCVTPD2PSrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst),
1077 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1078 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps
1079 (memop addr:$src)))]>;
1081 def Int_CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1082 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1083 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
1084 def Int_CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1085 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1086 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps
1087 (memop addr:$src)))]>;
1089 // AVX 256-bit register conversion intrinsics
1090 // FIXME: Migrate SSE conversion intrinsics matching to use patterns as below
1091 // whenever possible to avoid declaring two versions of each one.
1092 def : Pat<(int_x86_avx_cvtdq2_ps_256 VR256:$src),
1093 (VCVTDQ2PSYrr VR256:$src)>;
1094 def : Pat<(int_x86_avx_cvtdq2_ps_256 (memopv8i32 addr:$src)),
1095 (VCVTDQ2PSYrm addr:$src)>;
1097 def : Pat<(int_x86_avx_cvt_pd2_ps_256 VR256:$src),
1098 (VCVTPD2PSYrr VR256:$src)>;
1099 def : Pat<(int_x86_avx_cvt_pd2_ps_256 (memopv4f64 addr:$src)),
1100 (VCVTPD2PSYrm addr:$src)>;
1102 def : Pat<(int_x86_avx_cvt_ps2dq_256 VR256:$src),
1103 (VCVTPS2DQYrr VR256:$src)>;
1104 def : Pat<(int_x86_avx_cvt_ps2dq_256 (memopv8f32 addr:$src)),
1105 (VCVTPS2DQYrm addr:$src)>;
1107 def : Pat<(int_x86_avx_cvt_ps2_pd_256 VR128:$src),
1108 (VCVTPS2PDYrr VR128:$src)>;
1109 def : Pat<(int_x86_avx_cvt_ps2_pd_256 (memopv4f32 addr:$src)),
1110 (VCVTPS2PDYrm addr:$src)>;
1112 def : Pat<(int_x86_avx_cvtt_pd2dq_256 VR256:$src),
1113 (VCVTTPD2DQYrr VR256:$src)>;
1114 def : Pat<(int_x86_avx_cvtt_pd2dq_256 (memopv4f64 addr:$src)),
1115 (VCVTTPD2DQYrm addr:$src)>;
1117 def : Pat<(int_x86_avx_cvtt_ps2dq_256 VR256:$src),
1118 (VCVTTPS2DQYrr VR256:$src)>;
1119 def : Pat<(int_x86_avx_cvtt_ps2dq_256 (memopv8f32 addr:$src)),
1120 (VCVTTPS2DQYrm addr:$src)>;
1122 //===----------------------------------------------------------------------===//
1123 // SSE 1 & 2 - Compare Instructions
1124 //===----------------------------------------------------------------------===//
1126 // sse12_cmp_scalar - sse 1 & 2 compare scalar instructions
1127 multiclass sse12_cmp_scalar<RegisterClass RC, X86MemOperand x86memop,
1128 string asm, string asm_alt> {
1129 def rr : SIi8<0xC2, MRMSrcReg,
1130 (outs RC:$dst), (ins RC:$src1, RC:$src, SSECC:$cc),
1133 def rm : SIi8<0xC2, MRMSrcMem,
1134 (outs RC:$dst), (ins RC:$src1, x86memop:$src, SSECC:$cc),
1136 // Accept explicit immediate argument form instead of comparison code.
1137 let isAsmParserOnly = 1 in {
1138 def rr_alt : SIi8<0xC2, MRMSrcReg,
1139 (outs RC:$dst), (ins RC:$src1, RC:$src, i8imm:$src2),
1142 def rm_alt : SIi8<0xC2, MRMSrcMem,
1143 (outs RC:$dst), (ins RC:$src1, x86memop:$src, i8imm:$src2),
1148 let neverHasSideEffects = 1, isAsmParserOnly = 1 in {
1149 defm VCMPSS : sse12_cmp_scalar<FR32, f32mem,
1150 "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}",
1151 "cmpss\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}">,
1153 defm VCMPSD : sse12_cmp_scalar<FR64, f64mem,
1154 "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}",
1155 "cmpsd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}">,
1159 let Constraints = "$src1 = $dst", neverHasSideEffects = 1 in {
1160 defm CMPSS : sse12_cmp_scalar<FR32, f32mem,
1161 "cmp${cc}ss\t{$src, $dst|$dst, $src}",
1162 "cmpss\t{$src2, $src, $dst|$dst, $src, $src2}">, XS;
1163 defm CMPSD : sse12_cmp_scalar<FR64, f64mem,
1164 "cmp${cc}sd\t{$src, $dst|$dst, $src}",
1165 "cmpsd\t{$src2, $src, $dst|$dst, $src, $src2}">, XD;
1168 multiclass sse12_cmp_scalar_int<RegisterClass RC, X86MemOperand x86memop,
1169 Intrinsic Int, string asm> {
1170 def rr : SIi8<0xC2, MRMSrcReg, (outs VR128:$dst),
1171 (ins VR128:$src1, VR128:$src, SSECC:$cc), asm,
1172 [(set VR128:$dst, (Int VR128:$src1,
1173 VR128:$src, imm:$cc))]>;
1174 def rm : SIi8<0xC2, MRMSrcMem, (outs VR128:$dst),
1175 (ins VR128:$src1, f32mem:$src, SSECC:$cc), asm,
1176 [(set VR128:$dst, (Int VR128:$src1,
1177 (load addr:$src), imm:$cc))]>;
1180 // Aliases to match intrinsics which expect XMM operand(s).
1181 let isAsmParserOnly = 1 in {
1182 defm Int_VCMPSS : sse12_cmp_scalar_int<VR128, f32mem, int_x86_sse_cmp_ss,
1183 "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}">,
1185 defm Int_VCMPSD : sse12_cmp_scalar_int<VR128, f64mem, int_x86_sse2_cmp_sd,
1186 "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}">,
1189 let Constraints = "$src1 = $dst" in {
1190 defm Int_CMPSS : sse12_cmp_scalar_int<VR128, f32mem, int_x86_sse_cmp_ss,
1191 "cmp${cc}ss\t{$src, $dst|$dst, $src}">, XS;
1192 defm Int_CMPSD : sse12_cmp_scalar_int<VR128, f64mem, int_x86_sse2_cmp_sd,
1193 "cmp${cc}sd\t{$src, $dst|$dst, $src}">, XD;
1197 // sse12_ord_cmp - Unordered/Ordered scalar fp compare and set EFLAGS
1198 multiclass sse12_ord_cmp<bits<8> opc, RegisterClass RC, SDNode OpNode,
1199 ValueType vt, X86MemOperand x86memop,
1200 PatFrag ld_frag, string OpcodeStr, Domain d> {
1201 def rr: PI<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
1202 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
1203 [(set EFLAGS, (OpNode (vt RC:$src1), RC:$src2))], d>;
1204 def rm: PI<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
1205 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
1206 [(set EFLAGS, (OpNode (vt RC:$src1),
1207 (ld_frag addr:$src2)))], d>;
1210 let Defs = [EFLAGS] in {
1211 let isAsmParserOnly = 1 in {
1212 defm VUCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
1213 "ucomiss", SSEPackedSingle>, VEX;
1214 defm VUCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
1215 "ucomisd", SSEPackedDouble>, OpSize, VEX;
1216 let Pattern = []<dag> in {
1217 defm VCOMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
1218 "comiss", SSEPackedSingle>, VEX;
1219 defm VCOMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
1220 "comisd", SSEPackedDouble>, OpSize, VEX;
1223 defm Int_VUCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
1224 load, "ucomiss", SSEPackedSingle>, VEX;
1225 defm Int_VUCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
1226 load, "ucomisd", SSEPackedDouble>, OpSize, VEX;
1228 defm Int_VCOMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem,
1229 load, "comiss", SSEPackedSingle>, VEX;
1230 defm Int_VCOMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem,
1231 load, "comisd", SSEPackedDouble>, OpSize, VEX;
1233 defm UCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
1234 "ucomiss", SSEPackedSingle>, TB;
1235 defm UCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
1236 "ucomisd", SSEPackedDouble>, TB, OpSize;
1238 let Pattern = []<dag> in {
1239 defm COMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
1240 "comiss", SSEPackedSingle>, TB;
1241 defm COMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
1242 "comisd", SSEPackedDouble>, TB, OpSize;
1245 defm Int_UCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
1246 load, "ucomiss", SSEPackedSingle>, TB;
1247 defm Int_UCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
1248 load, "ucomisd", SSEPackedDouble>, TB, OpSize;
1250 defm Int_COMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem, load,
1251 "comiss", SSEPackedSingle>, TB;
1252 defm Int_COMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem, load,
1253 "comisd", SSEPackedDouble>, TB, OpSize;
1254 } // Defs = [EFLAGS]
1256 // sse12_cmp_packed - sse 1 & 2 compared packed instructions
1257 multiclass sse12_cmp_packed<RegisterClass RC, X86MemOperand x86memop,
1258 Intrinsic Int, string asm, string asm_alt,
1260 def rri : PIi8<0xC2, MRMSrcReg,
1261 (outs RC:$dst), (ins RC:$src1, RC:$src, SSECC:$cc), asm,
1262 [(set RC:$dst, (Int RC:$src1, RC:$src, imm:$cc))], d>;
1263 def rmi : PIi8<0xC2, MRMSrcMem,
1264 (outs RC:$dst), (ins RC:$src1, f128mem:$src, SSECC:$cc), asm,
1265 [(set RC:$dst, (Int RC:$src1, (memop addr:$src), imm:$cc))], d>;
1266 // Accept explicit immediate argument form instead of comparison code.
1267 let isAsmParserOnly = 1 in {
1268 def rri_alt : PIi8<0xC2, MRMSrcReg,
1269 (outs RC:$dst), (ins RC:$src1, RC:$src, i8imm:$src2),
1271 def rmi_alt : PIi8<0xC2, MRMSrcMem,
1272 (outs RC:$dst), (ins RC:$src1, f128mem:$src, i8imm:$src2),
1277 let isAsmParserOnly = 1 in {
1278 defm VCMPPS : sse12_cmp_packed<VR128, f128mem, int_x86_sse_cmp_ps,
1279 "cmp${cc}ps\t{$src, $src1, $dst|$dst, $src1, $src}",
1280 "cmpps\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1281 SSEPackedSingle>, VEX_4V;
1282 defm VCMPPD : sse12_cmp_packed<VR128, f128mem, int_x86_sse2_cmp_pd,
1283 "cmp${cc}pd\t{$src, $src1, $dst|$dst, $src1, $src}",
1284 "cmppd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1285 SSEPackedDouble>, OpSize, VEX_4V;
1286 defm VCMPPSY : sse12_cmp_packed<VR256, f256mem, int_x86_avx_cmp_ps_256,
1287 "cmp${cc}ps\t{$src, $src1, $dst|$dst, $src1, $src}",
1288 "cmpps\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1289 SSEPackedSingle>, VEX_4V;
1290 defm VCMPPDY : sse12_cmp_packed<VR256, f256mem, int_x86_avx_cmp_pd_256,
1291 "cmp${cc}pd\t{$src, $src1, $dst|$dst, $src1, $src}",
1292 "cmppd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1293 SSEPackedDouble>, OpSize, VEX_4V;
1295 let Constraints = "$src1 = $dst" in {
1296 defm CMPPS : sse12_cmp_packed<VR128, f128mem, int_x86_sse_cmp_ps,
1297 "cmp${cc}ps\t{$src, $dst|$dst, $src}",
1298 "cmpps\t{$src2, $src, $dst|$dst, $src, $src2}",
1299 SSEPackedSingle>, TB;
1300 defm CMPPD : sse12_cmp_packed<VR128, f128mem, int_x86_sse2_cmp_pd,
1301 "cmp${cc}pd\t{$src, $dst|$dst, $src}",
1302 "cmppd\t{$src2, $src, $dst|$dst, $src, $src2}",
1303 SSEPackedDouble>, TB, OpSize;
1306 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
1307 (CMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
1308 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), (memop addr:$src2), imm:$cc)),
1309 (CMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
1310 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
1311 (CMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
1312 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), (memop addr:$src2), imm:$cc)),
1313 (CMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
1315 //===----------------------------------------------------------------------===//
1316 // SSE 1 & 2 - Shuffle Instructions
1317 //===----------------------------------------------------------------------===//
1319 /// sse12_shuffle - sse 1 & 2 shuffle instructions
1320 multiclass sse12_shuffle<RegisterClass RC, X86MemOperand x86memop,
1321 ValueType vt, string asm, PatFrag mem_frag,
1322 Domain d, bit IsConvertibleToThreeAddress = 0> {
1323 def rmi : PIi8<0xC6, MRMSrcMem, (outs RC:$dst),
1324 (ins RC:$src1, f128mem:$src2, i8imm:$src3), asm,
1325 [(set RC:$dst, (vt (shufp:$src3
1326 RC:$src1, (mem_frag addr:$src2))))], d>;
1327 let isConvertibleToThreeAddress = IsConvertibleToThreeAddress in
1328 def rri : PIi8<0xC6, MRMSrcReg, (outs RC:$dst),
1329 (ins RC:$src1, RC:$src2, i8imm:$src3), asm,
1331 (vt (shufp:$src3 RC:$src1, RC:$src2)))], d>;
1334 let isAsmParserOnly = 1 in {
1335 defm VSHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
1336 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
1337 memopv4f32, SSEPackedSingle>, VEX_4V;
1338 defm VSHUFPSY : sse12_shuffle<VR256, f256mem, v8f32,
1339 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
1340 memopv8f32, SSEPackedSingle>, VEX_4V;
1341 defm VSHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
1342 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src2, $src2, $src3}",
1343 memopv2f64, SSEPackedDouble>, OpSize, VEX_4V;
1344 defm VSHUFPDY : sse12_shuffle<VR256, f256mem, v4f64,
1345 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src2, $src2, $src3}",
1346 memopv4f64, SSEPackedDouble>, OpSize, VEX_4V;
1349 let Constraints = "$src1 = $dst" in {
1350 defm SHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
1351 "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1352 memopv4f32, SSEPackedSingle, 1 /* cvt to pshufd */>,
1354 defm SHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
1355 "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1356 memopv2f64, SSEPackedDouble>, TB, OpSize;
1359 //===----------------------------------------------------------------------===//
1360 // SSE 1 & 2 - Unpack Instructions
1361 //===----------------------------------------------------------------------===//
1363 /// sse12_unpack_interleave - sse 1 & 2 unpack and interleave
1364 multiclass sse12_unpack_interleave<bits<8> opc, PatFrag OpNode, ValueType vt,
1365 PatFrag mem_frag, RegisterClass RC,
1366 X86MemOperand x86memop, string asm,
1368 def rr : PI<opc, MRMSrcReg,
1369 (outs RC:$dst), (ins RC:$src1, RC:$src2),
1371 (vt (OpNode RC:$src1, RC:$src2)))], d>;
1372 def rm : PI<opc, MRMSrcMem,
1373 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
1375 (vt (OpNode RC:$src1,
1376 (mem_frag addr:$src2))))], d>;
1379 let AddedComplexity = 10 in {
1380 let isAsmParserOnly = 1 in {
1381 defm VUNPCKHPS: sse12_unpack_interleave<0x15, unpckh, v4f32, memopv4f32,
1382 VR128, f128mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1383 SSEPackedSingle>, VEX_4V;
1384 defm VUNPCKHPD: sse12_unpack_interleave<0x15, unpckh, v2f64, memopv2f64,
1385 VR128, f128mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1386 SSEPackedDouble>, OpSize, VEX_4V;
1387 defm VUNPCKLPS: sse12_unpack_interleave<0x14, unpckl, v4f32, memopv4f32,
1388 VR128, f128mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1389 SSEPackedSingle>, VEX_4V;
1390 defm VUNPCKLPD: sse12_unpack_interleave<0x14, unpckl, v2f64, memopv2f64,
1391 VR128, f128mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1392 SSEPackedDouble>, OpSize, VEX_4V;
1394 defm VUNPCKHPSY: sse12_unpack_interleave<0x15, unpckh, v8f32, memopv8f32,
1395 VR256, f256mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1396 SSEPackedSingle>, VEX_4V;
1397 defm VUNPCKHPDY: sse12_unpack_interleave<0x15, unpckh, v4f64, memopv4f64,
1398 VR256, f256mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1399 SSEPackedDouble>, OpSize, VEX_4V;
1400 defm VUNPCKLPSY: sse12_unpack_interleave<0x14, unpckl, v8f32, memopv8f32,
1401 VR256, f256mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1402 SSEPackedSingle>, VEX_4V;
1403 defm VUNPCKLPDY: sse12_unpack_interleave<0x14, unpckl, v4f64, memopv4f64,
1404 VR256, f256mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1405 SSEPackedDouble>, OpSize, VEX_4V;
1408 let Constraints = "$src1 = $dst" in {
1409 defm UNPCKHPS: sse12_unpack_interleave<0x15, unpckh, v4f32, memopv4f32,
1410 VR128, f128mem, "unpckhps\t{$src2, $dst|$dst, $src2}",
1411 SSEPackedSingle>, TB;
1412 defm UNPCKHPD: sse12_unpack_interleave<0x15, unpckh, v2f64, memopv2f64,
1413 VR128, f128mem, "unpckhpd\t{$src2, $dst|$dst, $src2}",
1414 SSEPackedDouble>, TB, OpSize;
1415 defm UNPCKLPS: sse12_unpack_interleave<0x14, unpckl, v4f32, memopv4f32,
1416 VR128, f128mem, "unpcklps\t{$src2, $dst|$dst, $src2}",
1417 SSEPackedSingle>, TB;
1418 defm UNPCKLPD: sse12_unpack_interleave<0x14, unpckl, v2f64, memopv2f64,
1419 VR128, f128mem, "unpcklpd\t{$src2, $dst|$dst, $src2}",
1420 SSEPackedDouble>, TB, OpSize;
1421 } // Constraints = "$src1 = $dst"
1422 } // AddedComplexity
1424 //===----------------------------------------------------------------------===//
1425 // SSE 1 & 2 - Extract Floating-Point Sign mask
1426 //===----------------------------------------------------------------------===//
1428 /// sse12_extr_sign_mask - sse 1 & 2 unpack and interleave
1429 multiclass sse12_extr_sign_mask<RegisterClass RC, Intrinsic Int, string asm,
1431 def rr : PI<0x50, MRMSrcReg, (outs GR32:$dst), (ins RC:$src),
1432 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
1433 [(set GR32:$dst, (Int RC:$src))], d>;
1437 defm MOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps, "movmskps",
1438 SSEPackedSingle>, TB;
1439 defm MOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd, "movmskpd",
1440 SSEPackedDouble>, TB, OpSize;
1442 let isAsmParserOnly = 1 in {
1443 defm VMOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps,
1444 "movmskps", SSEPackedSingle>, VEX;
1445 defm VMOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd,
1446 "movmskpd", SSEPackedDouble>, OpSize,
1448 defm VMOVMSKPSY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_ps_256,
1449 "movmskps", SSEPackedSingle>, VEX;
1450 defm VMOVMSKPDY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_pd_256,
1451 "movmskpd", SSEPackedDouble>, OpSize,
1455 def VMOVMSKPSr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1456 "movmskps\t{$src, $dst|$dst, $src}", [], SSEPackedSingle>, VEX;
1457 def VMOVMSKPDr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1458 "movmskpd\t{$src, $dst|$dst, $src}", [], SSEPackedDouble>, OpSize,
1460 def VMOVMSKPSYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
1461 "movmskps\t{$src, $dst|$dst, $src}", [], SSEPackedSingle>, VEX;
1462 def VMOVMSKPDYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
1463 "movmskpd\t{$src, $dst|$dst, $src}", [], SSEPackedDouble>, OpSize,
1467 //===----------------------------------------------------------------------===//
1468 // SSE 1 & 2 - Misc aliasing of packed SSE 1 & 2 instructions
1469 //===----------------------------------------------------------------------===//
1471 // Aliases of packed SSE1 & SSE2 instructions for scalar use. These all have
1472 // names that start with 'Fs'.
1474 // Alias instructions that map fld0 to pxor for sse.
1475 let isReMaterializable = 1, isAsCheapAsAMove = 1, isCodeGenOnly = 1,
1476 canFoldAsLoad = 1 in {
1477 // FIXME: Set encoding to pseudo!
1478 def FsFLD0SS : I<0xEF, MRMInitReg, (outs FR32:$dst), (ins), "",
1479 [(set FR32:$dst, fp32imm0)]>,
1480 Requires<[HasSSE1]>, TB, OpSize;
1481 def FsFLD0SD : I<0xEF, MRMInitReg, (outs FR64:$dst), (ins), "",
1482 [(set FR64:$dst, fpimm0)]>,
1483 Requires<[HasSSE2]>, TB, OpSize;
1486 // Alias instruction to do FR32 or FR64 reg-to-reg copy using movaps. Upper
1487 // bits are disregarded.
1488 let neverHasSideEffects = 1 in {
1489 def FsMOVAPSrr : PSI<0x28, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
1490 "movaps\t{$src, $dst|$dst, $src}", []>;
1491 def FsMOVAPDrr : PDI<0x28, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
1492 "movapd\t{$src, $dst|$dst, $src}", []>;
1495 // Alias instruction to load FR32 or FR64 from f128mem using movaps. Upper
1496 // bits are disregarded.
1497 let canFoldAsLoad = 1, isReMaterializable = 1 in {
1498 def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
1499 "movaps\t{$src, $dst|$dst, $src}",
1500 [(set FR32:$dst, (alignedloadfsf32 addr:$src))]>;
1501 def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
1502 "movapd\t{$src, $dst|$dst, $src}",
1503 [(set FR64:$dst, (alignedloadfsf64 addr:$src))]>;
1506 //===----------------------------------------------------------------------===//
1507 // SSE 1 & 2 - Logical Instructions
1508 //===----------------------------------------------------------------------===//
1510 /// sse12_fp_alias_pack_logical - SSE 1 & 2 aliased packed FP logical ops
1512 multiclass sse12_fp_alias_pack_logical<bits<8> opc, string OpcodeStr,
1514 let isAsmParserOnly = 1 in {
1515 defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
1516 FR32, f32, f128mem, memopfsf32, SSEPackedSingle, 0>, VEX_4V;
1518 defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
1519 FR64, f64, f128mem, memopfsf64, SSEPackedDouble, 0>, OpSize, VEX_4V;
1522 let Constraints = "$src1 = $dst" in {
1523 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, FR32,
1524 f32, f128mem, memopfsf32, SSEPackedSingle>, TB;
1526 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, FR64,
1527 f64, f128mem, memopfsf64, SSEPackedDouble>, TB, OpSize;
1531 // Alias bitwise logical operations using SSE logical ops on packed FP values.
1532 let mayLoad = 0 in {
1533 defm FsAND : sse12_fp_alias_pack_logical<0x54, "and", X86fand>;
1534 defm FsOR : sse12_fp_alias_pack_logical<0x56, "or", X86for>;
1535 defm FsXOR : sse12_fp_alias_pack_logical<0x57, "xor", X86fxor>;
1538 let neverHasSideEffects = 1, Pattern = []<dag>, isCommutable = 0 in
1539 defm FsANDN : sse12_fp_alias_pack_logical<0x55, "andn", undef>;
1541 /// sse12_fp_packed_logical - SSE 1 & 2 packed FP logical ops
1543 multiclass sse12_fp_packed_logical<bits<8> opc, string OpcodeStr,
1544 SDNode OpNode, int HasPat = 0,
1545 list<list<dag>> Pattern = []> {
1546 let isAsmParserOnly = 1, Pattern = []<dag> in {
1547 defm V#NAME#PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
1548 !strconcat(OpcodeStr, "ps"), f128mem,
1549 !if(HasPat, Pattern[0], // rr
1550 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1,
1552 !if(HasPat, Pattern[2], // rm
1553 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
1554 (memopv2i64 addr:$src2)))]), 0>,
1557 defm V#NAME#PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
1558 !strconcat(OpcodeStr, "pd"), f128mem,
1559 !if(HasPat, Pattern[1], // rr
1560 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1563 !if(HasPat, Pattern[3], // rm
1564 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1565 (memopv2i64 addr:$src2)))]), 0>,
1568 let Constraints = "$src1 = $dst" in {
1569 defm PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
1570 !strconcat(OpcodeStr, "ps"), f128mem,
1571 !if(HasPat, Pattern[0], // rr
1572 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1,
1574 !if(HasPat, Pattern[2], // rm
1575 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
1576 (memopv2i64 addr:$src2)))])>, TB;
1578 defm PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
1579 !strconcat(OpcodeStr, "pd"), f128mem,
1580 !if(HasPat, Pattern[1], // rr
1581 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1584 !if(HasPat, Pattern[3], // rm
1585 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1586 (memopv2i64 addr:$src2)))])>,
1591 /// sse12_fp_packed_logical_y - AVX 256-bit SSE 1 & 2 logical ops forms
1593 let isAsmParserOnly = 1 in {
1594 multiclass sse12_fp_packed_logical_y<bits<8> opc, string OpcodeStr> {
1595 defm PSY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedSingle,
1596 !strconcat(OpcodeStr, "ps"), f256mem, [], [], 0>, VEX_4V;
1598 defm PDY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedDouble,
1599 !strconcat(OpcodeStr, "pd"), f256mem, [], [], 0>, OpSize, VEX_4V;
1603 // AVX 256-bit packed logical ops forms
1604 defm VAND : sse12_fp_packed_logical_y<0x54, "and">;
1605 defm VOR : sse12_fp_packed_logical_y<0x56, "or">;
1606 defm VXOR : sse12_fp_packed_logical_y<0x57, "xor">;
1607 let isCommutable = 0 in
1608 defm VANDN : sse12_fp_packed_logical_y<0x55, "andn">;
1610 defm AND : sse12_fp_packed_logical<0x54, "and", and>;
1611 defm OR : sse12_fp_packed_logical<0x56, "or", or>;
1612 defm XOR : sse12_fp_packed_logical<0x57, "xor", xor>;
1613 let isCommutable = 0 in
1614 defm ANDN : sse12_fp_packed_logical<0x55, "andn", undef /* dummy */, 1, [
1616 [(set VR128:$dst, (v2i64 (and (xor VR128:$src1,
1617 (bc_v2i64 (v4i32 immAllOnesV))),
1620 [(set VR128:$dst, (and (vnot (bc_v2i64 (v2f64 VR128:$src1))),
1621 (bc_v2i64 (v2f64 VR128:$src2))))],
1623 [(set VR128:$dst, (v2i64 (and (xor (bc_v2i64 (v4f32 VR128:$src1)),
1624 (bc_v2i64 (v4i32 immAllOnesV))),
1625 (memopv2i64 addr:$src2))))],
1627 [(set VR128:$dst, (and (vnot (bc_v2i64 (v2f64 VR128:$src1))),
1628 (memopv2i64 addr:$src2)))]]>;
1630 //===----------------------------------------------------------------------===//
1631 // SSE 1 & 2 - Arithmetic Instructions
1632 //===----------------------------------------------------------------------===//
1634 /// basic_sse12_fp_binop_xxx - SSE 1 & 2 binops come in both scalar and
1637 /// In addition, we also have a special variant of the scalar form here to
1638 /// represent the associated intrinsic operation. This form is unlike the
1639 /// plain scalar form, in that it takes an entire vector (instead of a scalar)
1640 /// and leaves the top elements unmodified (therefore these cannot be commuted).
1642 /// These three forms can each be reg+reg or reg+mem.
1645 /// FIXME: once all 256-bit intrinsics are matched, cleanup and refactor those
1647 multiclass basic_sse12_fp_binop_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
1649 defm SS : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"),
1650 OpNode, FR32, f32mem, Is2Addr>, XS;
1651 defm SD : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd"),
1652 OpNode, FR64, f64mem, Is2Addr>, XD;
1655 multiclass basic_sse12_fp_binop_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
1657 let mayLoad = 0 in {
1658 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR128,
1659 v4f32, f128mem, memopv4f32, SSEPackedSingle, Is2Addr>, TB;
1660 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR128,
1661 v2f64, f128mem, memopv2f64, SSEPackedDouble, Is2Addr>, TB, OpSize;
1665 multiclass basic_sse12_fp_binop_p_y<bits<8> opc, string OpcodeStr,
1667 let mayLoad = 0 in {
1668 defm PSY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR256,
1669 v8f32, f256mem, memopv8f32, SSEPackedSingle, 0>, TB;
1670 defm PDY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR256,
1671 v4f64, f256mem, memopv4f64, SSEPackedDouble, 0>, TB, OpSize;
1675 multiclass basic_sse12_fp_binop_s_int<bits<8> opc, string OpcodeStr,
1677 defm SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
1678 !strconcat(OpcodeStr, "ss"), "", "_ss", ssmem, sse_load_f32, Is2Addr>, XS;
1679 defm SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
1680 !strconcat(OpcodeStr, "sd"), "2", "_sd", sdmem, sse_load_f64, Is2Addr>, XD;
1683 multiclass basic_sse12_fp_binop_p_int<bits<8> opc, string OpcodeStr,
1685 defm PS : sse12_fp_packed_int<opc, OpcodeStr, VR128,
1686 !strconcat(OpcodeStr, "ps"), "sse", "_ps", f128mem, memopv4f32,
1687 SSEPackedSingle, Is2Addr>, TB;
1689 defm PD : sse12_fp_packed_int<opc, OpcodeStr, VR128,
1690 !strconcat(OpcodeStr, "pd"), "sse2", "_pd", f128mem, memopv2f64,
1691 SSEPackedDouble, Is2Addr>, TB, OpSize;
1694 multiclass basic_sse12_fp_binop_p_y_int<bits<8> opc, string OpcodeStr> {
1695 defm PSY : sse12_fp_packed_int<opc, OpcodeStr, VR256,
1696 !strconcat(OpcodeStr, "ps"), "avx", "_ps_256", f256mem, memopv8f32,
1697 SSEPackedSingle, 0>, TB;
1699 defm PDY : sse12_fp_packed_int<opc, OpcodeStr, VR256,
1700 !strconcat(OpcodeStr, "pd"), "avx", "_pd_256", f256mem, memopv4f64,
1701 SSEPackedDouble, 0>, TB, OpSize;
1704 // Binary Arithmetic instructions
1705 let isAsmParserOnly = 1 in {
1706 defm VADD : basic_sse12_fp_binop_s<0x58, "add", fadd, 0>,
1707 basic_sse12_fp_binop_s_int<0x58, "add", 0>,
1708 basic_sse12_fp_binop_p<0x58, "add", fadd, 0>,
1709 basic_sse12_fp_binop_p_y<0x58, "add", fadd>, VEX_4V;
1710 defm VMUL : basic_sse12_fp_binop_s<0x59, "mul", fmul, 0>,
1711 basic_sse12_fp_binop_s_int<0x59, "mul", 0>,
1712 basic_sse12_fp_binop_p<0x59, "mul", fmul, 0>,
1713 basic_sse12_fp_binop_p_y<0x59, "mul", fmul>, VEX_4V;
1715 let isCommutable = 0 in {
1716 defm VSUB : basic_sse12_fp_binop_s<0x5C, "sub", fsub, 0>,
1717 basic_sse12_fp_binop_s_int<0x5C, "sub", 0>,
1718 basic_sse12_fp_binop_p<0x5C, "sub", fsub, 0>,
1719 basic_sse12_fp_binop_p_y<0x5C, "sub", fsub>, VEX_4V;
1720 defm VDIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv, 0>,
1721 basic_sse12_fp_binop_s_int<0x5E, "div", 0>,
1722 basic_sse12_fp_binop_p<0x5E, "div", fdiv, 0>,
1723 basic_sse12_fp_binop_p_y<0x5E, "div", fdiv>, VEX_4V;
1724 defm VMAX : basic_sse12_fp_binop_s<0x5F, "max", X86fmax, 0>,
1725 basic_sse12_fp_binop_s_int<0x5F, "max", 0>,
1726 basic_sse12_fp_binop_p<0x5F, "max", X86fmax, 0>,
1727 basic_sse12_fp_binop_p_int<0x5F, "max", 0>,
1728 basic_sse12_fp_binop_p_y<0x5F, "max", X86fmax>,
1729 basic_sse12_fp_binop_p_y_int<0x5F, "max">, VEX_4V;
1730 defm VMIN : basic_sse12_fp_binop_s<0x5D, "min", X86fmin, 0>,
1731 basic_sse12_fp_binop_s_int<0x5D, "min", 0>,
1732 basic_sse12_fp_binop_p<0x5D, "min", X86fmin, 0>,
1733 basic_sse12_fp_binop_p_int<0x5D, "min", 0>,
1734 basic_sse12_fp_binop_p_y_int<0x5D, "min">,
1735 basic_sse12_fp_binop_p_y<0x5D, "min", X86fmin>, VEX_4V;
1739 let Constraints = "$src1 = $dst" in {
1740 defm ADD : basic_sse12_fp_binop_s<0x58, "add", fadd>,
1741 basic_sse12_fp_binop_p<0x58, "add", fadd>,
1742 basic_sse12_fp_binop_s_int<0x58, "add">;
1743 defm MUL : basic_sse12_fp_binop_s<0x59, "mul", fmul>,
1744 basic_sse12_fp_binop_p<0x59, "mul", fmul>,
1745 basic_sse12_fp_binop_s_int<0x59, "mul">;
1747 let isCommutable = 0 in {
1748 defm SUB : basic_sse12_fp_binop_s<0x5C, "sub", fsub>,
1749 basic_sse12_fp_binop_p<0x5C, "sub", fsub>,
1750 basic_sse12_fp_binop_s_int<0x5C, "sub">;
1751 defm DIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv>,
1752 basic_sse12_fp_binop_p<0x5E, "div", fdiv>,
1753 basic_sse12_fp_binop_s_int<0x5E, "div">;
1754 defm MAX : basic_sse12_fp_binop_s<0x5F, "max", X86fmax>,
1755 basic_sse12_fp_binop_p<0x5F, "max", X86fmax>,
1756 basic_sse12_fp_binop_s_int<0x5F, "max">,
1757 basic_sse12_fp_binop_p_int<0x5F, "max">;
1758 defm MIN : basic_sse12_fp_binop_s<0x5D, "min", X86fmin>,
1759 basic_sse12_fp_binop_p<0x5D, "min", X86fmin>,
1760 basic_sse12_fp_binop_s_int<0x5D, "min">,
1761 basic_sse12_fp_binop_p_int<0x5D, "min">;
1766 /// In addition, we also have a special variant of the scalar form here to
1767 /// represent the associated intrinsic operation. This form is unlike the
1768 /// plain scalar form, in that it takes an entire vector (instead of a
1769 /// scalar) and leaves the top elements undefined.
1771 /// And, we have a special variant form for a full-vector intrinsic form.
1773 /// sse1_fp_unop_s - SSE1 unops in scalar form.
1774 multiclass sse1_fp_unop_s<bits<8> opc, string OpcodeStr,
1775 SDNode OpNode, Intrinsic F32Int> {
1776 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
1777 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1778 [(set FR32:$dst, (OpNode FR32:$src))]>;
1779 // For scalar unary operations, fold a load into the operation
1780 // only in OptForSize mode. It eliminates an instruction, but it also
1781 // eliminates a whole-register clobber (the load), so it introduces a
1782 // partial register update condition.
1783 def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
1784 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1785 [(set FR32:$dst, (OpNode (load addr:$src)))]>, XS,
1786 Requires<[HasSSE1, OptForSize]>;
1787 def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1788 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1789 [(set VR128:$dst, (F32Int VR128:$src))]>;
1790 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins ssmem:$src),
1791 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1792 [(set VR128:$dst, (F32Int sse_load_f32:$src))]>;
1795 /// sse1_fp_unop_s_avx - AVX SSE1 unops in scalar form.
1796 multiclass sse1_fp_unop_s_avx<bits<8> opc, string OpcodeStr,
1797 SDNode OpNode, Intrinsic F32Int> {
1798 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
1799 !strconcat(OpcodeStr,
1800 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1801 def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f32mem:$src2),
1802 !strconcat(OpcodeStr,
1803 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1804 []>, XS, Requires<[HasAVX, OptForSize]>;
1805 def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1806 !strconcat(OpcodeStr,
1807 "ss\t{$src, $dst, $dst|$dst, $dst, $src}"),
1808 [(set VR128:$dst, (F32Int VR128:$src))]>;
1809 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins ssmem:$src),
1810 !strconcat(OpcodeStr,
1811 "ss\t{$src, $dst, $dst|$dst, $dst, $src}"),
1812 [(set VR128:$dst, (F32Int sse_load_f32:$src))]>;
1815 /// sse1_fp_unop_p - SSE1 unops in packed form.
1816 multiclass sse1_fp_unop_p<bits<8> opc, string OpcodeStr, SDNode OpNode> {
1817 def PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1818 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1819 [(set VR128:$dst, (v4f32 (OpNode VR128:$src)))]>;
1820 def PSm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1821 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1822 [(set VR128:$dst, (OpNode (memopv4f32 addr:$src)))]>;
1825 /// sse1_fp_unop_p_y - AVX 256-bit SSE1 unops in packed form.
1826 multiclass sse1_fp_unop_p_y<bits<8> opc, string OpcodeStr, SDNode OpNode> {
1827 def PSYr : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1828 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1829 [(set VR256:$dst, (v8f32 (OpNode VR256:$src)))]>;
1830 def PSYm : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1831 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1832 [(set VR256:$dst, (OpNode (memopv8f32 addr:$src)))]>;
1835 /// sse1_fp_unop_p_int - SSE1 intrinsics unops in packed forms.
1836 multiclass sse1_fp_unop_p_int<bits<8> opc, string OpcodeStr,
1837 Intrinsic V4F32Int> {
1838 def PSr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1839 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1840 [(set VR128:$dst, (V4F32Int VR128:$src))]>;
1841 def PSm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1842 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1843 [(set VR128:$dst, (V4F32Int (memopv4f32 addr:$src)))]>;
1846 /// sse1_fp_unop_p_y_int - AVX 256-bit intrinsics unops in packed forms.
1847 multiclass sse1_fp_unop_p_y_int<bits<8> opc, string OpcodeStr,
1848 Intrinsic V4F32Int> {
1849 def PSYr_Int : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1850 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1851 [(set VR256:$dst, (V4F32Int VR256:$src))]>;
1852 def PSYm_Int : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1853 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1854 [(set VR256:$dst, (V4F32Int (memopv8f32 addr:$src)))]>;
1857 /// sse2_fp_unop_s - SSE2 unops in scalar form.
1858 multiclass sse2_fp_unop_s<bits<8> opc, string OpcodeStr,
1859 SDNode OpNode, Intrinsic F64Int> {
1860 def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
1861 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1862 [(set FR64:$dst, (OpNode FR64:$src))]>;
1863 // See the comments in sse1_fp_unop_s for why this is OptForSize.
1864 def SDm : I<opc, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
1865 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1866 [(set FR64:$dst, (OpNode (load addr:$src)))]>, XD,
1867 Requires<[HasSSE2, OptForSize]>;
1868 def SDr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1869 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1870 [(set VR128:$dst, (F64Int VR128:$src))]>;
1871 def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins sdmem:$src),
1872 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1873 [(set VR128:$dst, (F64Int sse_load_f64:$src))]>;
1876 /// sse2_fp_unop_s_avx - AVX SSE2 unops in scalar form.
1877 multiclass sse2_fp_unop_s_avx<bits<8> opc, string OpcodeStr,
1878 SDNode OpNode, Intrinsic F64Int> {
1879 def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
1880 !strconcat(OpcodeStr,
1881 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1882 def SDm : SDI<opc, MRMSrcMem, (outs FR64:$dst),
1883 (ins FR64:$src1, f64mem:$src2),
1884 !strconcat(OpcodeStr,
1885 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1886 def SDr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1887 !strconcat(OpcodeStr, "sd\t{$src, $dst, $dst|$dst, $dst, $src}"),
1888 [(set VR128:$dst, (F64Int VR128:$src))]>;
1889 def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins sdmem:$src),
1890 !strconcat(OpcodeStr, "sd\t{$src, $dst, $dst|$dst, $dst, $src}"),
1891 [(set VR128:$dst, (F64Int sse_load_f64:$src))]>;
1894 /// sse2_fp_unop_p - SSE2 unops in vector forms.
1895 multiclass sse2_fp_unop_p<bits<8> opc, string OpcodeStr,
1897 def PDr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1898 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1899 [(set VR128:$dst, (v2f64 (OpNode VR128:$src)))]>;
1900 def PDm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1901 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1902 [(set VR128:$dst, (OpNode (memopv2f64 addr:$src)))]>;
1905 /// sse2_fp_unop_p_y - AVX SSE2 256-bit unops in vector forms.
1906 multiclass sse2_fp_unop_p_y<bits<8> opc, string OpcodeStr, SDNode OpNode> {
1907 def PDYr : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1908 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1909 [(set VR256:$dst, (v4f64 (OpNode VR256:$src)))]>;
1910 def PDYm : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1911 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1912 [(set VR256:$dst, (OpNode (memopv4f64 addr:$src)))]>;
1915 /// sse2_fp_unop_p_int - SSE2 intrinsic unops in vector forms.
1916 multiclass sse2_fp_unop_p_int<bits<8> opc, string OpcodeStr,
1917 Intrinsic V2F64Int> {
1918 def PDr_Int : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1919 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1920 [(set VR128:$dst, (V2F64Int VR128:$src))]>;
1921 def PDm_Int : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1922 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1923 [(set VR128:$dst, (V2F64Int (memopv2f64 addr:$src)))]>;
1926 /// sse2_fp_unop_p_y_int - AVX 256-bit intrinsic unops in vector forms.
1927 multiclass sse2_fp_unop_p_y_int<bits<8> opc, string OpcodeStr,
1928 Intrinsic V2F64Int> {
1929 def PDYr_Int : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1930 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1931 [(set VR256:$dst, (V2F64Int VR256:$src))]>;
1932 def PDYm_Int : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1933 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1934 [(set VR256:$dst, (V2F64Int (memopv4f64 addr:$src)))]>;
1937 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
1939 defm VSQRT : sse1_fp_unop_s_avx<0x51, "vsqrt", fsqrt, int_x86_sse_sqrt_ss>,
1940 sse2_fp_unop_s_avx<0x51, "vsqrt", fsqrt, int_x86_sse2_sqrt_sd>,
1943 defm VSQRT : sse1_fp_unop_p<0x51, "vsqrt", fsqrt>,
1944 sse2_fp_unop_p<0x51, "vsqrt", fsqrt>,
1945 sse1_fp_unop_p_y<0x51, "vsqrt", fsqrt>,
1946 sse2_fp_unop_p_y<0x51, "vsqrt", fsqrt>,
1947 sse1_fp_unop_p_int<0x51, "vsqrt", int_x86_sse_sqrt_ps>,
1948 sse2_fp_unop_p_int<0x51, "vsqrt", int_x86_sse2_sqrt_pd>,
1949 sse1_fp_unop_p_y_int<0x51, "vsqrt", int_x86_avx_sqrt_ps_256>,
1950 sse2_fp_unop_p_y_int<0x51, "vsqrt", int_x86_avx_sqrt_pd_256>,
1953 // Reciprocal approximations. Note that these typically require refinement
1954 // in order to obtain suitable precision.
1955 defm VRSQRT : sse1_fp_unop_s_avx<0x52, "vrsqrt", X86frsqrt,
1956 int_x86_sse_rsqrt_ss>, VEX_4V;
1957 defm VRSQRT : sse1_fp_unop_p<0x52, "vrsqrt", X86frsqrt>,
1958 sse1_fp_unop_p_y<0x52, "vrsqrt", X86frsqrt>,
1959 sse1_fp_unop_p_y_int<0x52, "vrsqrt", int_x86_avx_rsqrt_ps_256>,
1960 sse1_fp_unop_p_int<0x52, "vrsqrt", int_x86_sse_rsqrt_ps>, VEX;
1962 defm VRCP : sse1_fp_unop_s_avx<0x53, "vrcp", X86frcp, int_x86_sse_rcp_ss>,
1964 defm VRCP : sse1_fp_unop_p<0x53, "vrcp", X86frcp>,
1965 sse1_fp_unop_p_y<0x53, "vrcp", X86frcp>,
1966 sse1_fp_unop_p_y_int<0x53, "vrcp", int_x86_avx_rcp_ps_256>,
1967 sse1_fp_unop_p_int<0x53, "vrcp", int_x86_sse_rcp_ps>, VEX;
1971 defm SQRT : sse1_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse_sqrt_ss>,
1972 sse1_fp_unop_p<0x51, "sqrt", fsqrt>,
1973 sse1_fp_unop_p_int<0x51, "sqrt", int_x86_sse_sqrt_ps>,
1974 sse2_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse2_sqrt_sd>,
1975 sse2_fp_unop_p<0x51, "sqrt", fsqrt>,
1976 sse2_fp_unop_p_int<0x51, "sqrt", int_x86_sse2_sqrt_pd>;
1978 // Reciprocal approximations. Note that these typically require refinement
1979 // in order to obtain suitable precision.
1980 defm RSQRT : sse1_fp_unop_s<0x52, "rsqrt", X86frsqrt, int_x86_sse_rsqrt_ss>,
1981 sse1_fp_unop_p<0x52, "rsqrt", X86frsqrt>,
1982 sse1_fp_unop_p_int<0x52, "rsqrt", int_x86_sse_rsqrt_ps>;
1983 defm RCP : sse1_fp_unop_s<0x53, "rcp", X86frcp, int_x86_sse_rcp_ss>,
1984 sse1_fp_unop_p<0x53, "rcp", X86frcp>,
1985 sse1_fp_unop_p_int<0x53, "rcp", int_x86_sse_rcp_ps>;
1987 // There is no f64 version of the reciprocal approximation instructions.
1989 //===----------------------------------------------------------------------===//
1990 // SSE 1 & 2 - Non-temporal stores
1991 //===----------------------------------------------------------------------===//
1993 let isAsmParserOnly = 1 in {
1994 def VMOVNTPSmr_Int : VPSI<0x2B, MRMDestMem, (outs),
1995 (ins i128mem:$dst, VR128:$src),
1996 "movntps\t{$src, $dst|$dst, $src}",
1997 [(int_x86_sse_movnt_ps addr:$dst, VR128:$src)]>, VEX;
1998 def VMOVNTPDmr_Int : VPDI<0x2B, MRMDestMem, (outs),
1999 (ins i128mem:$dst, VR128:$src),
2000 "movntpd\t{$src, $dst|$dst, $src}",
2001 [(int_x86_sse2_movnt_pd addr:$dst, VR128:$src)]>, VEX;
2003 let ExeDomain = SSEPackedInt in
2004 def VMOVNTDQmr_Int : VPDI<0xE7, MRMDestMem, (outs),
2005 (ins f128mem:$dst, VR128:$src),
2006 "movntdq\t{$src, $dst|$dst, $src}",
2007 [(int_x86_sse2_movnt_dq addr:$dst, VR128:$src)]>, VEX;
2009 let AddedComplexity = 400 in { // Prefer non-temporal versions
2010 def VMOVNTPSmr : VPSI<0x2B, MRMDestMem, (outs),
2011 (ins f128mem:$dst, VR128:$src),
2012 "movntps\t{$src, $dst|$dst, $src}",
2013 [(alignednontemporalstore (v4f32 VR128:$src),
2015 def VMOVNTPDmr : VPDI<0x2B, MRMDestMem, (outs),
2016 (ins f128mem:$dst, VR128:$src),
2017 "movntpd\t{$src, $dst|$dst, $src}",
2018 [(alignednontemporalstore (v2f64 VR128:$src),
2020 def VMOVNTDQ_64mr : VPDI<0xE7, MRMDestMem, (outs),
2021 (ins f128mem:$dst, VR128:$src),
2022 "movntdq\t{$src, $dst|$dst, $src}",
2023 [(alignednontemporalstore (v2f64 VR128:$src),
2025 let ExeDomain = SSEPackedInt in
2026 def VMOVNTDQmr : VPDI<0xE7, MRMDestMem, (outs),
2027 (ins f128mem:$dst, VR128:$src),
2028 "movntdq\t{$src, $dst|$dst, $src}",
2029 [(alignednontemporalstore (v4f32 VR128:$src),
2032 def VMOVNTPSYmr : VPSI<0x2B, MRMDestMem, (outs),
2033 (ins f256mem:$dst, VR256:$src),
2034 "movntps\t{$src, $dst|$dst, $src}",
2035 [(alignednontemporalstore (v8f32 VR256:$src),
2037 def VMOVNTPDYmr : VPDI<0x2B, MRMDestMem, (outs),
2038 (ins f256mem:$dst, VR256:$src),
2039 "movntpd\t{$src, $dst|$dst, $src}",
2040 [(alignednontemporalstore (v4f64 VR256:$src),
2042 def VMOVNTDQY_64mr : VPDI<0xE7, MRMDestMem, (outs),
2043 (ins f256mem:$dst, VR256:$src),
2044 "movntdq\t{$src, $dst|$dst, $src}",
2045 [(alignednontemporalstore (v4f64 VR256:$src),
2047 let ExeDomain = SSEPackedInt in
2048 def VMOVNTDQYmr : VPDI<0xE7, MRMDestMem, (outs),
2049 (ins f256mem:$dst, VR256:$src),
2050 "movntdq\t{$src, $dst|$dst, $src}",
2051 [(alignednontemporalstore (v8f32 VR256:$src),
2056 def : Pat<(int_x86_avx_movnt_dq_256 addr:$dst, VR256:$src),
2057 (VMOVNTDQYmr addr:$dst, VR256:$src)>;
2058 def : Pat<(int_x86_avx_movnt_pd_256 addr:$dst, VR256:$src),
2059 (VMOVNTPDYmr addr:$dst, VR256:$src)>;
2060 def : Pat<(int_x86_avx_movnt_ps_256 addr:$dst, VR256:$src),
2061 (VMOVNTPSYmr addr:$dst, VR256:$src)>;
2063 def MOVNTPSmr_Int : PSI<0x2B, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2064 "movntps\t{$src, $dst|$dst, $src}",
2065 [(int_x86_sse_movnt_ps addr:$dst, VR128:$src)]>;
2066 def MOVNTPDmr_Int : PDI<0x2B, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2067 "movntpd\t{$src, $dst|$dst, $src}",
2068 [(int_x86_sse2_movnt_pd addr:$dst, VR128:$src)]>;
2070 let ExeDomain = SSEPackedInt in
2071 def MOVNTDQmr_Int : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2072 "movntdq\t{$src, $dst|$dst, $src}",
2073 [(int_x86_sse2_movnt_dq addr:$dst, VR128:$src)]>;
2075 let AddedComplexity = 400 in { // Prefer non-temporal versions
2076 def MOVNTPSmr : PSI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2077 "movntps\t{$src, $dst|$dst, $src}",
2078 [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)]>;
2079 def MOVNTPDmr : PDI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2080 "movntpd\t{$src, $dst|$dst, $src}",
2081 [(alignednontemporalstore(v2f64 VR128:$src), addr:$dst)]>;
2083 def MOVNTDQ_64mr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2084 "movntdq\t{$src, $dst|$dst, $src}",
2085 [(alignednontemporalstore (v2f64 VR128:$src), addr:$dst)]>;
2087 let ExeDomain = SSEPackedInt in
2088 def MOVNTDQmr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2089 "movntdq\t{$src, $dst|$dst, $src}",
2090 [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)]>;
2092 // There is no AVX form for instructions below this point
2093 def MOVNTImr : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
2094 "movnti\t{$src, $dst|$dst, $src}",
2095 [(nontemporalstore (i32 GR32:$src), addr:$dst)]>,
2096 TB, Requires<[HasSSE2]>;
2098 def MOVNTI_64mr : RI<0xC3, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
2099 "movnti\t{$src, $dst|$dst, $src}",
2100 [(nontemporalstore (i64 GR64:$src), addr:$dst)]>,
2101 TB, Requires<[HasSSE2]>;
2104 def MOVNTImr_Int : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
2105 "movnti\t{$src, $dst|$dst, $src}",
2106 [(int_x86_sse2_movnt_i addr:$dst, GR32:$src)]>,
2107 TB, Requires<[HasSSE2]>;
2109 //===----------------------------------------------------------------------===//
2110 // SSE 1 & 2 - Misc Instructions (No AVX form)
2111 //===----------------------------------------------------------------------===//
2113 // Prefetch intrinsic.
2114 def PREFETCHT0 : PSI<0x18, MRM1m, (outs), (ins i8mem:$src),
2115 "prefetcht0\t$src", [(prefetch addr:$src, imm, (i32 3))]>;
2116 def PREFETCHT1 : PSI<0x18, MRM2m, (outs), (ins i8mem:$src),
2117 "prefetcht1\t$src", [(prefetch addr:$src, imm, (i32 2))]>;
2118 def PREFETCHT2 : PSI<0x18, MRM3m, (outs), (ins i8mem:$src),
2119 "prefetcht2\t$src", [(prefetch addr:$src, imm, (i32 1))]>;
2120 def PREFETCHNTA : PSI<0x18, MRM0m, (outs), (ins i8mem:$src),
2121 "prefetchnta\t$src", [(prefetch addr:$src, imm, (i32 0))]>;
2123 // Load, store, and memory fence
2124 def SFENCE : I<0xAE, MRM_F8, (outs), (ins), "sfence", [(int_x86_sse_sfence)]>,
2125 TB, Requires<[HasSSE1]>;
2126 def : Pat<(X86SFence), (SFENCE)>;
2128 // Alias instructions that map zero vector to pxor / xorp* for sse.
2129 // We set canFoldAsLoad because this can be converted to a constant-pool
2130 // load of an all-zeros value if folding it would be beneficial.
2131 // FIXME: Change encoding to pseudo! This is blocked right now by the x86
2132 // JIT implementatioan, it does not expand the instructions below like
2133 // X86MCInstLower does.
2134 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
2135 isCodeGenOnly = 1 in {
2136 def V_SET0PS : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2137 [(set VR128:$dst, (v4f32 immAllZerosV))]>;
2138 def V_SET0PD : PDI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2139 [(set VR128:$dst, (v2f64 immAllZerosV))]>;
2140 let ExeDomain = SSEPackedInt in
2141 def V_SET0PI : PDI<0xEF, MRMInitReg, (outs VR128:$dst), (ins), "",
2142 [(set VR128:$dst, (v4i32 immAllZerosV))]>;
2145 // The same as done above but for AVX. The 128-bit versions are the
2146 // same, but re-encoded. The 256-bit does not support PI version.
2147 // FIXME: Change encoding to pseudo! This is blocked right now by the x86
2148 // JIT implementatioan, it does not expand the instructions below like
2149 // X86MCInstLower does.
2150 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
2151 isCodeGenOnly = 1, Predicates = [HasAVX] in {
2152 def AVX_SET0PS : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2153 [(set VR128:$dst, (v4f32 immAllZerosV))]>, VEX_4V;
2154 def AVX_SET0PD : PDI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2155 [(set VR128:$dst, (v2f64 immAllZerosV))]>, VEX_4V;
2156 def AVX_SET0PSY : PSI<0x57, MRMInitReg, (outs VR256:$dst), (ins), "",
2157 [(set VR256:$dst, (v8f32 immAllZerosV))]>, VEX_4V;
2158 def AVX_SET0PDY : PDI<0x57, MRMInitReg, (outs VR256:$dst), (ins), "",
2159 [(set VR256:$dst, (v4f64 immAllZerosV))]>, VEX_4V;
2160 let ExeDomain = SSEPackedInt in
2161 def AVX_SET0PI : PDI<0xEF, MRMInitReg, (outs VR128:$dst), (ins), "",
2162 [(set VR128:$dst, (v4i32 immAllZerosV))]>;
2165 def : Pat<(v2i64 immAllZerosV), (V_SET0PI)>;
2166 def : Pat<(v8i16 immAllZerosV), (V_SET0PI)>;
2167 def : Pat<(v16i8 immAllZerosV), (V_SET0PI)>;
2169 def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
2170 (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
2172 //===----------------------------------------------------------------------===//
2173 // SSE 1 & 2 - Load/Store XCSR register
2174 //===----------------------------------------------------------------------===//
2176 let isAsmParserOnly = 1 in {
2177 def VLDMXCSR : VPSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
2178 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>, VEX;
2179 def VSTMXCSR : VPSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
2180 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>, VEX;
2183 def LDMXCSR : PSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
2184 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>;
2185 def STMXCSR : PSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
2186 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>;
2188 //===---------------------------------------------------------------------===//
2189 // SSE2 - Move Aligned/Unaligned Packed Integer Instructions
2190 //===---------------------------------------------------------------------===//
2192 let ExeDomain = SSEPackedInt in { // SSE integer instructions
2194 let isAsmParserOnly = 1 in {
2195 let neverHasSideEffects = 1 in {
2196 def VMOVDQArr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2197 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2198 def VMOVDQAYrr : VPDI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2199 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2201 def VMOVDQUrr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2202 "movdqu\t{$src, $dst|$dst, $src}", []>, XS, VEX;
2203 def VMOVDQUYrr : VPDI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2204 "movdqu\t{$src, $dst|$dst, $src}", []>, XS, VEX;
2206 let canFoldAsLoad = 1, mayLoad = 1 in {
2207 def VMOVDQArm : VPDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2208 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2209 def VMOVDQAYrm : VPDI<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
2210 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2211 let Predicates = [HasAVX] in {
2212 def VMOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2213 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2214 def VMOVDQUYrm : I<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
2215 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2219 let mayStore = 1 in {
2220 def VMOVDQAmr : VPDI<0x7F, MRMDestMem, (outs),
2221 (ins i128mem:$dst, VR128:$src),
2222 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2223 def VMOVDQAYmr : VPDI<0x7F, MRMDestMem, (outs),
2224 (ins i256mem:$dst, VR256:$src),
2225 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2226 let Predicates = [HasAVX] in {
2227 def VMOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2228 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2229 def VMOVDQUYmr : I<0x7F, MRMDestMem, (outs), (ins i256mem:$dst, VR256:$src),
2230 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2235 let neverHasSideEffects = 1 in
2236 def MOVDQArr : PDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2237 "movdqa\t{$src, $dst|$dst, $src}", []>;
2239 let canFoldAsLoad = 1, mayLoad = 1 in {
2240 def MOVDQArm : PDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2241 "movdqa\t{$src, $dst|$dst, $src}",
2242 [/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/]>;
2243 def MOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2244 "movdqu\t{$src, $dst|$dst, $src}",
2245 [/*(set VR128:$dst, (loadv2i64 addr:$src))*/]>,
2246 XS, Requires<[HasSSE2]>;
2249 let mayStore = 1 in {
2250 def MOVDQAmr : PDI<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2251 "movdqa\t{$src, $dst|$dst, $src}",
2252 [/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/]>;
2253 def MOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2254 "movdqu\t{$src, $dst|$dst, $src}",
2255 [/*(store (v2i64 VR128:$src), addr:$dst)*/]>,
2256 XS, Requires<[HasSSE2]>;
2259 // Intrinsic forms of MOVDQU load and store
2260 let isAsmParserOnly = 1 in {
2261 let canFoldAsLoad = 1 in
2262 def VMOVDQUrm_Int : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2263 "vmovdqu\t{$src, $dst|$dst, $src}",
2264 [(set VR128:$dst, (int_x86_sse2_loadu_dq addr:$src))]>,
2265 XS, VEX, Requires<[HasAVX]>;
2266 def VMOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2267 "vmovdqu\t{$src, $dst|$dst, $src}",
2268 [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
2269 XS, VEX, Requires<[HasAVX]>;
2272 let canFoldAsLoad = 1 in
2273 def MOVDQUrm_Int : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2274 "movdqu\t{$src, $dst|$dst, $src}",
2275 [(set VR128:$dst, (int_x86_sse2_loadu_dq addr:$src))]>,
2276 XS, Requires<[HasSSE2]>;
2277 def MOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2278 "movdqu\t{$src, $dst|$dst, $src}",
2279 [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
2280 XS, Requires<[HasSSE2]>;
2282 } // ExeDomain = SSEPackedInt
2284 def : Pat<(int_x86_avx_loadu_dq_256 addr:$src), (VMOVDQUYrm addr:$src)>;
2285 def : Pat<(int_x86_avx_storeu_dq_256 addr:$dst, VR256:$src),
2286 (VMOVDQUYmr addr:$dst, VR256:$src)>;
2288 //===---------------------------------------------------------------------===//
2289 // SSE2 - Packed Integer Arithmetic Instructions
2290 //===---------------------------------------------------------------------===//
2292 let ExeDomain = SSEPackedInt in { // SSE integer instructions
2294 multiclass PDI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId,
2295 bit IsCommutable = 0, bit Is2Addr = 1> {
2296 let isCommutable = IsCommutable in
2297 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2298 (ins VR128:$src1, VR128:$src2),
2300 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2301 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2302 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
2303 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2304 (ins VR128:$src1, i128mem:$src2),
2306 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2307 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2308 [(set VR128:$dst, (IntId VR128:$src1,
2309 (bitconvert (memopv2i64 addr:$src2))))]>;
2312 multiclass PDI_binop_rmi_int<bits<8> opc, bits<8> opc2, Format ImmForm,
2313 string OpcodeStr, Intrinsic IntId,
2314 Intrinsic IntId2, bit Is2Addr = 1> {
2315 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2316 (ins VR128:$src1, VR128:$src2),
2318 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2319 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2320 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
2321 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2322 (ins VR128:$src1, i128mem:$src2),
2324 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2325 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2326 [(set VR128:$dst, (IntId VR128:$src1,
2327 (bitconvert (memopv2i64 addr:$src2))))]>;
2328 def ri : PDIi8<opc2, ImmForm, (outs VR128:$dst),
2329 (ins VR128:$src1, i32i8imm:$src2),
2331 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2332 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2333 [(set VR128:$dst, (IntId2 VR128:$src1, (i32 imm:$src2)))]>;
2336 /// PDI_binop_rm - Simple SSE2 binary operator.
2337 multiclass PDI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
2338 ValueType OpVT, bit IsCommutable = 0, bit Is2Addr = 1> {
2339 let isCommutable = IsCommutable in
2340 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2341 (ins VR128:$src1, VR128:$src2),
2343 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2344 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2345 [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]>;
2346 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2347 (ins VR128:$src1, i128mem:$src2),
2349 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2350 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2351 [(set VR128:$dst, (OpVT (OpNode VR128:$src1,
2352 (bitconvert (memopv2i64 addr:$src2)))))]>;
2355 /// PDI_binop_rm_v2i64 - Simple SSE2 binary operator whose type is v2i64.
2357 /// FIXME: we could eliminate this and use PDI_binop_rm instead if tblgen knew
2358 /// to collapse (bitconvert VT to VT) into its operand.
2360 multiclass PDI_binop_rm_v2i64<bits<8> opc, string OpcodeStr, SDNode OpNode,
2361 bit IsCommutable = 0, bit Is2Addr = 1> {
2362 let isCommutable = IsCommutable in
2363 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2364 (ins VR128:$src1, VR128:$src2),
2366 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2367 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2368 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))]>;
2369 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2370 (ins VR128:$src1, i128mem:$src2),
2372 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2373 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2374 [(set VR128:$dst, (OpNode VR128:$src1, (memopv2i64 addr:$src2)))]>;
2377 } // ExeDomain = SSEPackedInt
2379 // 128-bit Integer Arithmetic
2381 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
2382 defm VPADDB : PDI_binop_rm<0xFC, "vpaddb", add, v16i8, 1, 0 /*3addr*/>, VEX_4V;
2383 defm VPADDW : PDI_binop_rm<0xFD, "vpaddw", add, v8i16, 1, 0>, VEX_4V;
2384 defm VPADDD : PDI_binop_rm<0xFE, "vpaddd", add, v4i32, 1, 0>, VEX_4V;
2385 defm VPADDQ : PDI_binop_rm_v2i64<0xD4, "vpaddq", add, 1, 0>, VEX_4V;
2386 defm VPMULLW : PDI_binop_rm<0xD5, "vpmullw", mul, v8i16, 1, 0>, VEX_4V;
2387 defm VPSUBB : PDI_binop_rm<0xF8, "vpsubb", sub, v16i8, 0, 0>, VEX_4V;
2388 defm VPSUBW : PDI_binop_rm<0xF9, "vpsubw", sub, v8i16, 0, 0>, VEX_4V;
2389 defm VPSUBD : PDI_binop_rm<0xFA, "vpsubd", sub, v4i32, 0, 0>, VEX_4V;
2390 defm VPSUBQ : PDI_binop_rm_v2i64<0xFB, "vpsubq", sub, 0, 0>, VEX_4V;
2393 defm VPSUBSB : PDI_binop_rm_int<0xE8, "vpsubsb" , int_x86_sse2_psubs_b, 0, 0>,
2395 defm VPSUBSW : PDI_binop_rm_int<0xE9, "vpsubsw" , int_x86_sse2_psubs_w, 0, 0>,
2397 defm VPSUBUSB : PDI_binop_rm_int<0xD8, "vpsubusb", int_x86_sse2_psubus_b, 0, 0>,
2399 defm VPSUBUSW : PDI_binop_rm_int<0xD9, "vpsubusw", int_x86_sse2_psubus_w, 0, 0>,
2401 defm VPADDSB : PDI_binop_rm_int<0xEC, "vpaddsb" , int_x86_sse2_padds_b, 1, 0>,
2403 defm VPADDSW : PDI_binop_rm_int<0xED, "vpaddsw" , int_x86_sse2_padds_w, 1, 0>,
2405 defm VPADDUSB : PDI_binop_rm_int<0xDC, "vpaddusb", int_x86_sse2_paddus_b, 1, 0>,
2407 defm VPADDUSW : PDI_binop_rm_int<0xDD, "vpaddusw", int_x86_sse2_paddus_w, 1, 0>,
2409 defm VPMULHUW : PDI_binop_rm_int<0xE4, "vpmulhuw", int_x86_sse2_pmulhu_w, 1, 0>,
2411 defm VPMULHW : PDI_binop_rm_int<0xE5, "vpmulhw" , int_x86_sse2_pmulh_w, 1, 0>,
2413 defm VPMULUDQ : PDI_binop_rm_int<0xF4, "vpmuludq", int_x86_sse2_pmulu_dq, 1, 0>,
2415 defm VPMADDWD : PDI_binop_rm_int<0xF5, "vpmaddwd", int_x86_sse2_pmadd_wd, 1, 0>,
2417 defm VPAVGB : PDI_binop_rm_int<0xE0, "vpavgb", int_x86_sse2_pavg_b, 1, 0>,
2419 defm VPAVGW : PDI_binop_rm_int<0xE3, "vpavgw", int_x86_sse2_pavg_w, 1, 0>,
2421 defm VPMINUB : PDI_binop_rm_int<0xDA, "vpminub", int_x86_sse2_pminu_b, 1, 0>,
2423 defm VPMINSW : PDI_binop_rm_int<0xEA, "vpminsw", int_x86_sse2_pmins_w, 1, 0>,
2425 defm VPMAXUB : PDI_binop_rm_int<0xDE, "vpmaxub", int_x86_sse2_pmaxu_b, 1, 0>,
2427 defm VPMAXSW : PDI_binop_rm_int<0xEE, "vpmaxsw", int_x86_sse2_pmaxs_w, 1, 0>,
2429 defm VPSADBW : PDI_binop_rm_int<0xF6, "vpsadbw", int_x86_sse2_psad_bw, 1, 0>,
2433 let Constraints = "$src1 = $dst" in {
2434 defm PADDB : PDI_binop_rm<0xFC, "paddb", add, v16i8, 1>;
2435 defm PADDW : PDI_binop_rm<0xFD, "paddw", add, v8i16, 1>;
2436 defm PADDD : PDI_binop_rm<0xFE, "paddd", add, v4i32, 1>;
2437 defm PADDQ : PDI_binop_rm_v2i64<0xD4, "paddq", add, 1>;
2438 defm PMULLW : PDI_binop_rm<0xD5, "pmullw", mul, v8i16, 1>;
2439 defm PSUBB : PDI_binop_rm<0xF8, "psubb", sub, v16i8>;
2440 defm PSUBW : PDI_binop_rm<0xF9, "psubw", sub, v8i16>;
2441 defm PSUBD : PDI_binop_rm<0xFA, "psubd", sub, v4i32>;
2442 defm PSUBQ : PDI_binop_rm_v2i64<0xFB, "psubq", sub>;
2445 defm PSUBSB : PDI_binop_rm_int<0xE8, "psubsb" , int_x86_sse2_psubs_b>;
2446 defm PSUBSW : PDI_binop_rm_int<0xE9, "psubsw" , int_x86_sse2_psubs_w>;
2447 defm PSUBUSB : PDI_binop_rm_int<0xD8, "psubusb", int_x86_sse2_psubus_b>;
2448 defm PSUBUSW : PDI_binop_rm_int<0xD9, "psubusw", int_x86_sse2_psubus_w>;
2449 defm PADDSB : PDI_binop_rm_int<0xEC, "paddsb" , int_x86_sse2_padds_b, 1>;
2450 defm PADDSW : PDI_binop_rm_int<0xED, "paddsw" , int_x86_sse2_padds_w, 1>;
2451 defm PADDUSB : PDI_binop_rm_int<0xDC, "paddusb", int_x86_sse2_paddus_b, 1>;
2452 defm PADDUSW : PDI_binop_rm_int<0xDD, "paddusw", int_x86_sse2_paddus_w, 1>;
2453 defm PMULHUW : PDI_binop_rm_int<0xE4, "pmulhuw", int_x86_sse2_pmulhu_w, 1>;
2454 defm PMULHW : PDI_binop_rm_int<0xE5, "pmulhw" , int_x86_sse2_pmulh_w, 1>;
2455 defm PMULUDQ : PDI_binop_rm_int<0xF4, "pmuludq", int_x86_sse2_pmulu_dq, 1>;
2456 defm PMADDWD : PDI_binop_rm_int<0xF5, "pmaddwd", int_x86_sse2_pmadd_wd, 1>;
2457 defm PAVGB : PDI_binop_rm_int<0xE0, "pavgb", int_x86_sse2_pavg_b, 1>;
2458 defm PAVGW : PDI_binop_rm_int<0xE3, "pavgw", int_x86_sse2_pavg_w, 1>;
2459 defm PMINUB : PDI_binop_rm_int<0xDA, "pminub", int_x86_sse2_pminu_b, 1>;
2460 defm PMINSW : PDI_binop_rm_int<0xEA, "pminsw", int_x86_sse2_pmins_w, 1>;
2461 defm PMAXUB : PDI_binop_rm_int<0xDE, "pmaxub", int_x86_sse2_pmaxu_b, 1>;
2462 defm PMAXSW : PDI_binop_rm_int<0xEE, "pmaxsw", int_x86_sse2_pmaxs_w, 1>;
2463 defm PSADBW : PDI_binop_rm_int<0xF6, "psadbw", int_x86_sse2_psad_bw, 1>;
2465 } // Constraints = "$src1 = $dst"
2467 //===---------------------------------------------------------------------===//
2468 // SSE2 - Packed Integer Logical Instructions
2469 //===---------------------------------------------------------------------===//
2471 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
2472 defm VPSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "vpsllw",
2473 int_x86_sse2_psll_w, int_x86_sse2_pslli_w, 0>,
2475 defm VPSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "vpslld",
2476 int_x86_sse2_psll_d, int_x86_sse2_pslli_d, 0>,
2478 defm VPSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "vpsllq",
2479 int_x86_sse2_psll_q, int_x86_sse2_pslli_q, 0>,
2482 defm VPSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "vpsrlw",
2483 int_x86_sse2_psrl_w, int_x86_sse2_psrli_w, 0>,
2485 defm VPSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "vpsrld",
2486 int_x86_sse2_psrl_d, int_x86_sse2_psrli_d, 0>,
2488 defm VPSRLQ : PDI_binop_rmi_int<0xD3, 0x73, MRM2r, "vpsrlq",
2489 int_x86_sse2_psrl_q, int_x86_sse2_psrli_q, 0>,
2492 defm VPSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "vpsraw",
2493 int_x86_sse2_psra_w, int_x86_sse2_psrai_w, 0>,
2495 defm VPSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "vpsrad",
2496 int_x86_sse2_psra_d, int_x86_sse2_psrai_d, 0>,
2499 defm VPAND : PDI_binop_rm_v2i64<0xDB, "vpand", and, 1, 0>, VEX_4V;
2500 defm VPOR : PDI_binop_rm_v2i64<0xEB, "vpor" , or, 1, 0>, VEX_4V;
2501 defm VPXOR : PDI_binop_rm_v2i64<0xEF, "vpxor", xor, 1, 0>, VEX_4V;
2503 let ExeDomain = SSEPackedInt in {
2504 let neverHasSideEffects = 1 in {
2505 // 128-bit logical shifts.
2506 def VPSLLDQri : PDIi8<0x73, MRM7r,
2507 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2508 "vpslldq\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
2510 def VPSRLDQri : PDIi8<0x73, MRM3r,
2511 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2512 "vpsrldq\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
2514 // PSRADQri doesn't exist in SSE[1-3].
2516 def VPANDNrr : PDI<0xDF, MRMSrcReg,
2517 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2518 "vpandn\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2519 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
2520 VR128:$src2)))]>, VEX_4V;
2522 def VPANDNrm : PDI<0xDF, MRMSrcMem,
2523 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2524 "vpandn\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2525 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
2526 (memopv2i64 addr:$src2))))]>,
2531 let Constraints = "$src1 = $dst" in {
2532 defm PSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "psllw",
2533 int_x86_sse2_psll_w, int_x86_sse2_pslli_w>;
2534 defm PSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "pslld",
2535 int_x86_sse2_psll_d, int_x86_sse2_pslli_d>;
2536 defm PSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "psllq",
2537 int_x86_sse2_psll_q, int_x86_sse2_pslli_q>;
2539 defm PSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "psrlw",
2540 int_x86_sse2_psrl_w, int_x86_sse2_psrli_w>;
2541 defm PSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "psrld",
2542 int_x86_sse2_psrl_d, int_x86_sse2_psrli_d>;
2543 defm PSRLQ : PDI_binop_rmi_int<0xD3, 0x73, MRM2r, "psrlq",
2544 int_x86_sse2_psrl_q, int_x86_sse2_psrli_q>;
2546 defm PSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "psraw",
2547 int_x86_sse2_psra_w, int_x86_sse2_psrai_w>;
2548 defm PSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "psrad",
2549 int_x86_sse2_psra_d, int_x86_sse2_psrai_d>;
2551 defm PAND : PDI_binop_rm_v2i64<0xDB, "pand", and, 1>;
2552 defm POR : PDI_binop_rm_v2i64<0xEB, "por" , or, 1>;
2553 defm PXOR : PDI_binop_rm_v2i64<0xEF, "pxor", xor, 1>;
2555 let ExeDomain = SSEPackedInt in {
2556 let neverHasSideEffects = 1 in {
2557 // 128-bit logical shifts.
2558 def PSLLDQri : PDIi8<0x73, MRM7r,
2559 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2560 "pslldq\t{$src2, $dst|$dst, $src2}", []>;
2561 def PSRLDQri : PDIi8<0x73, MRM3r,
2562 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2563 "psrldq\t{$src2, $dst|$dst, $src2}", []>;
2564 // PSRADQri doesn't exist in SSE[1-3].
2566 def PANDNrr : PDI<0xDF, MRMSrcReg,
2567 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2568 "pandn\t{$src2, $dst|$dst, $src2}",
2569 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
2572 def PANDNrm : PDI<0xDF, MRMSrcMem,
2573 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2574 "pandn\t{$src2, $dst|$dst, $src2}",
2575 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
2576 (memopv2i64 addr:$src2))))]>;
2578 } // Constraints = "$src1 = $dst"
2580 let Predicates = [HasAVX] in {
2581 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
2582 (v2i64 (VPSLLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2583 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
2584 (v2i64 (VPSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2585 def : Pat<(int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2),
2586 (v2i64 (VPSLLDQri VR128:$src1, imm:$src2))>;
2587 def : Pat<(int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2),
2588 (v2i64 (VPSRLDQri VR128:$src1, imm:$src2))>;
2589 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
2590 (v2f64 (VPSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2592 // Shift up / down and insert zero's.
2593 def : Pat<(v2i64 (X86vshl VR128:$src, (i8 imm:$amt))),
2594 (v2i64 (VPSLLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2595 def : Pat<(v2i64 (X86vshr VR128:$src, (i8 imm:$amt))),
2596 (v2i64 (VPSRLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2599 let Predicates = [HasSSE2] in {
2600 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
2601 (v2i64 (PSLLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2602 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
2603 (v2i64 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2604 def : Pat<(int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2),
2605 (v2i64 (PSLLDQri VR128:$src1, imm:$src2))>;
2606 def : Pat<(int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2),
2607 (v2i64 (PSRLDQri VR128:$src1, imm:$src2))>;
2608 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
2609 (v2f64 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2611 // Shift up / down and insert zero's.
2612 def : Pat<(v2i64 (X86vshl VR128:$src, (i8 imm:$amt))),
2613 (v2i64 (PSLLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2614 def : Pat<(v2i64 (X86vshr VR128:$src, (i8 imm:$amt))),
2615 (v2i64 (PSRLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2618 //===---------------------------------------------------------------------===//
2619 // SSE2 - Packed Integer Comparison Instructions
2620 //===---------------------------------------------------------------------===//
2622 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
2623 defm VPCMPEQB : PDI_binop_rm_int<0x74, "vpcmpeqb", int_x86_sse2_pcmpeq_b, 1,
2625 defm VPCMPEQW : PDI_binop_rm_int<0x75, "vpcmpeqw", int_x86_sse2_pcmpeq_w, 1,
2627 defm VPCMPEQD : PDI_binop_rm_int<0x76, "vpcmpeqd", int_x86_sse2_pcmpeq_d, 1,
2629 defm VPCMPGTB : PDI_binop_rm_int<0x64, "vpcmpgtb", int_x86_sse2_pcmpgt_b, 0,
2631 defm VPCMPGTW : PDI_binop_rm_int<0x65, "vpcmpgtw", int_x86_sse2_pcmpgt_w, 0,
2633 defm VPCMPGTD : PDI_binop_rm_int<0x66, "vpcmpgtd", int_x86_sse2_pcmpgt_d, 0,
2637 let Constraints = "$src1 = $dst" in {
2638 defm PCMPEQB : PDI_binop_rm_int<0x74, "pcmpeqb", int_x86_sse2_pcmpeq_b, 1>;
2639 defm PCMPEQW : PDI_binop_rm_int<0x75, "pcmpeqw", int_x86_sse2_pcmpeq_w, 1>;
2640 defm PCMPEQD : PDI_binop_rm_int<0x76, "pcmpeqd", int_x86_sse2_pcmpeq_d, 1>;
2641 defm PCMPGTB : PDI_binop_rm_int<0x64, "pcmpgtb", int_x86_sse2_pcmpgt_b>;
2642 defm PCMPGTW : PDI_binop_rm_int<0x65, "pcmpgtw", int_x86_sse2_pcmpgt_w>;
2643 defm PCMPGTD : PDI_binop_rm_int<0x66, "pcmpgtd", int_x86_sse2_pcmpgt_d>;
2644 } // Constraints = "$src1 = $dst"
2646 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, VR128:$src2)),
2647 (PCMPEQBrr VR128:$src1, VR128:$src2)>;
2648 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, (memop addr:$src2))),
2649 (PCMPEQBrm VR128:$src1, addr:$src2)>;
2650 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, VR128:$src2)),
2651 (PCMPEQWrr VR128:$src1, VR128:$src2)>;
2652 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, (memop addr:$src2))),
2653 (PCMPEQWrm VR128:$src1, addr:$src2)>;
2654 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, VR128:$src2)),
2655 (PCMPEQDrr VR128:$src1, VR128:$src2)>;
2656 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, (memop addr:$src2))),
2657 (PCMPEQDrm VR128:$src1, addr:$src2)>;
2659 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, VR128:$src2)),
2660 (PCMPGTBrr VR128:$src1, VR128:$src2)>;
2661 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, (memop addr:$src2))),
2662 (PCMPGTBrm VR128:$src1, addr:$src2)>;
2663 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, VR128:$src2)),
2664 (PCMPGTWrr VR128:$src1, VR128:$src2)>;
2665 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, (memop addr:$src2))),
2666 (PCMPGTWrm VR128:$src1, addr:$src2)>;
2667 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, VR128:$src2)),
2668 (PCMPGTDrr VR128:$src1, VR128:$src2)>;
2669 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, (memop addr:$src2))),
2670 (PCMPGTDrm VR128:$src1, addr:$src2)>;
2672 //===---------------------------------------------------------------------===//
2673 // SSE2 - Packed Integer Pack Instructions
2674 //===---------------------------------------------------------------------===//
2676 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
2677 defm VPACKSSWB : PDI_binop_rm_int<0x63, "vpacksswb", int_x86_sse2_packsswb_128,
2679 defm VPACKSSDW : PDI_binop_rm_int<0x6B, "vpackssdw", int_x86_sse2_packssdw_128,
2681 defm VPACKUSWB : PDI_binop_rm_int<0x67, "vpackuswb", int_x86_sse2_packuswb_128,
2685 let Constraints = "$src1 = $dst" in {
2686 defm PACKSSWB : PDI_binop_rm_int<0x63, "packsswb", int_x86_sse2_packsswb_128>;
2687 defm PACKSSDW : PDI_binop_rm_int<0x6B, "packssdw", int_x86_sse2_packssdw_128>;
2688 defm PACKUSWB : PDI_binop_rm_int<0x67, "packuswb", int_x86_sse2_packuswb_128>;
2689 } // Constraints = "$src1 = $dst"
2691 //===---------------------------------------------------------------------===//
2692 // SSE2 - Packed Integer Shuffle Instructions
2693 //===---------------------------------------------------------------------===//
2695 let ExeDomain = SSEPackedInt in {
2696 multiclass sse2_pshuffle<string OpcodeStr, ValueType vt, PatFrag pshuf_frag,
2698 def ri : Ii8<0x70, MRMSrcReg,
2699 (outs VR128:$dst), (ins VR128:$src1, i8imm:$src2),
2700 !strconcat(OpcodeStr,
2701 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2702 [(set VR128:$dst, (vt (pshuf_frag:$src2 VR128:$src1,
2704 def mi : Ii8<0x70, MRMSrcMem,
2705 (outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
2706 !strconcat(OpcodeStr,
2707 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2708 [(set VR128:$dst, (vt (pshuf_frag:$src2
2709 (bc_frag (memopv2i64 addr:$src1)),
2712 } // ExeDomain = SSEPackedInt
2714 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
2715 let AddedComplexity = 5 in
2716 defm VPSHUFD : sse2_pshuffle<"vpshufd", v4i32, pshufd, bc_v4i32>, OpSize,
2719 // SSE2 with ImmT == Imm8 and XS prefix.
2720 defm VPSHUFHW : sse2_pshuffle<"vpshufhw", v8i16, pshufhw, bc_v8i16>, XS,
2723 // SSE2 with ImmT == Imm8 and XD prefix.
2724 defm VPSHUFLW : sse2_pshuffle<"vpshuflw", v8i16, pshuflw, bc_v8i16>, XD,
2728 let Predicates = [HasSSE2] in {
2729 let AddedComplexity = 5 in
2730 defm PSHUFD : sse2_pshuffle<"pshufd", v4i32, pshufd, bc_v4i32>, TB, OpSize;
2732 // SSE2 with ImmT == Imm8 and XS prefix.
2733 defm PSHUFHW : sse2_pshuffle<"pshufhw", v8i16, pshufhw, bc_v8i16>, XS;
2735 // SSE2 with ImmT == Imm8 and XD prefix.
2736 defm PSHUFLW : sse2_pshuffle<"pshuflw", v8i16, pshuflw, bc_v8i16>, XD;
2739 //===---------------------------------------------------------------------===//
2740 // SSE2 - Packed Integer Unpack Instructions
2741 //===---------------------------------------------------------------------===//
2743 let ExeDomain = SSEPackedInt in {
2744 multiclass sse2_unpack<bits<8> opc, string OpcodeStr, ValueType vt,
2745 PatFrag unp_frag, PatFrag bc_frag, bit Is2Addr = 1> {
2746 def rr : PDI<opc, MRMSrcReg,
2747 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2749 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
2750 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2751 [(set VR128:$dst, (vt (unp_frag VR128:$src1, VR128:$src2)))]>;
2752 def rm : PDI<opc, MRMSrcMem,
2753 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2755 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
2756 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2757 [(set VR128:$dst, (unp_frag VR128:$src1,
2758 (bc_frag (memopv2i64
2762 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
2763 defm VPUNPCKLBW : sse2_unpack<0x60, "vpunpcklbw", v16i8, unpckl, bc_v16i8,
2765 defm VPUNPCKLWD : sse2_unpack<0x61, "vpunpcklwd", v8i16, unpckl, bc_v8i16,
2767 defm VPUNPCKLDQ : sse2_unpack<0x62, "vpunpckldq", v4i32, unpckl, bc_v4i32,
2770 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2771 /// knew to collapse (bitconvert VT to VT) into its operand.
2772 def VPUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
2773 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2774 "vpunpcklqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2776 (v2i64 (unpckl VR128:$src1, VR128:$src2)))]>, VEX_4V;
2777 def VPUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
2778 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2779 "vpunpcklqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2781 (v2i64 (unpckl VR128:$src1,
2782 (memopv2i64 addr:$src2))))]>, VEX_4V;
2784 defm VPUNPCKHBW : sse2_unpack<0x68, "vpunpckhbw", v16i8, unpckh, bc_v16i8,
2786 defm VPUNPCKHWD : sse2_unpack<0x69, "vpunpckhwd", v8i16, unpckh, bc_v8i16,
2788 defm VPUNPCKHDQ : sse2_unpack<0x6A, "vpunpckhdq", v4i32, unpckh, bc_v4i32,
2791 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2792 /// knew to collapse (bitconvert VT to VT) into its operand.
2793 def VPUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
2794 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2795 "vpunpckhqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2797 (v2i64 (unpckh VR128:$src1, VR128:$src2)))]>, VEX_4V;
2798 def VPUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
2799 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2800 "vpunpckhqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2802 (v2i64 (unpckh VR128:$src1,
2803 (memopv2i64 addr:$src2))))]>, VEX_4V;
2806 let Constraints = "$src1 = $dst" in {
2807 defm PUNPCKLBW : sse2_unpack<0x60, "punpcklbw", v16i8, unpckl, bc_v16i8>;
2808 defm PUNPCKLWD : sse2_unpack<0x61, "punpcklwd", v8i16, unpckl, bc_v8i16>;
2809 defm PUNPCKLDQ : sse2_unpack<0x62, "punpckldq", v4i32, unpckl, bc_v4i32>;
2811 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2812 /// knew to collapse (bitconvert VT to VT) into its operand.
2813 def PUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
2814 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2815 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
2817 (v2i64 (unpckl VR128:$src1, VR128:$src2)))]>;
2818 def PUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
2819 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2820 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
2822 (v2i64 (unpckl VR128:$src1,
2823 (memopv2i64 addr:$src2))))]>;
2825 defm PUNPCKHBW : sse2_unpack<0x68, "punpckhbw", v16i8, unpckh, bc_v16i8>;
2826 defm PUNPCKHWD : sse2_unpack<0x69, "punpckhwd", v8i16, unpckh, bc_v8i16>;
2827 defm PUNPCKHDQ : sse2_unpack<0x6A, "punpckhdq", v4i32, unpckh, bc_v4i32>;
2829 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2830 /// knew to collapse (bitconvert VT to VT) into its operand.
2831 def PUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
2832 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2833 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
2835 (v2i64 (unpckh VR128:$src1, VR128:$src2)))]>;
2836 def PUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
2837 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2838 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
2840 (v2i64 (unpckh VR128:$src1,
2841 (memopv2i64 addr:$src2))))]>;
2844 } // ExeDomain = SSEPackedInt
2846 //===---------------------------------------------------------------------===//
2847 // SSE2 - Packed Integer Extract and Insert
2848 //===---------------------------------------------------------------------===//
2850 let ExeDomain = SSEPackedInt in {
2851 multiclass sse2_pinsrw<bit Is2Addr = 1> {
2852 def rri : Ii8<0xC4, MRMSrcReg,
2853 (outs VR128:$dst), (ins VR128:$src1,
2854 GR32:$src2, i32i8imm:$src3),
2856 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2857 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
2859 (X86pinsrw VR128:$src1, GR32:$src2, imm:$src3))]>;
2860 def rmi : Ii8<0xC4, MRMSrcMem,
2861 (outs VR128:$dst), (ins VR128:$src1,
2862 i16mem:$src2, i32i8imm:$src3),
2864 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2865 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
2867 (X86pinsrw VR128:$src1, (extloadi16 addr:$src2),
2872 let isAsmParserOnly = 1, Predicates = [HasAVX] in
2873 def VPEXTRWri : Ii8<0xC5, MRMSrcReg,
2874 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
2875 "vpextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2876 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
2877 imm:$src2))]>, OpSize, VEX;
2878 def PEXTRWri : PDIi8<0xC5, MRMSrcReg,
2879 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
2880 "pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2881 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
2885 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
2886 defm VPINSRW : sse2_pinsrw<0>, OpSize, VEX_4V;
2887 def VPINSRWrr64i : Ii8<0xC4, MRMSrcReg, (outs VR128:$dst),
2888 (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
2889 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2890 []>, OpSize, VEX_4V;
2893 let Constraints = "$src1 = $dst" in
2894 defm PINSRW : sse2_pinsrw, TB, OpSize, Requires<[HasSSE2]>;
2896 } // ExeDomain = SSEPackedInt
2898 //===---------------------------------------------------------------------===//
2899 // SSE2 - Packed Mask Creation
2900 //===---------------------------------------------------------------------===//
2902 let ExeDomain = SSEPackedInt in {
2904 let isAsmParserOnly = 1 in {
2905 def VPMOVMSKBrr : VPDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
2906 "pmovmskb\t{$src, $dst|$dst, $src}",
2907 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>, VEX;
2908 def VPMOVMSKBr64r : VPDI<0xD7, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
2909 "pmovmskb\t{$src, $dst|$dst, $src}", []>, VEX;
2911 def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
2912 "pmovmskb\t{$src, $dst|$dst, $src}",
2913 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>;
2915 } // ExeDomain = SSEPackedInt
2917 //===---------------------------------------------------------------------===//
2918 // SSE2 - Conditional Store
2919 //===---------------------------------------------------------------------===//
2921 let ExeDomain = SSEPackedInt in {
2923 let isAsmParserOnly = 1 in {
2925 def VMASKMOVDQU : VPDI<0xF7, MRMSrcReg, (outs),
2926 (ins VR128:$src, VR128:$mask),
2927 "maskmovdqu\t{$mask, $src|$src, $mask}",
2928 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>, VEX;
2930 def VMASKMOVDQU64 : VPDI<0xF7, MRMSrcReg, (outs),
2931 (ins VR128:$src, VR128:$mask),
2932 "maskmovdqu\t{$mask, $src|$src, $mask}",
2933 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>, VEX;
2937 def MASKMOVDQU : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
2938 "maskmovdqu\t{$mask, $src|$src, $mask}",
2939 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>;
2941 def MASKMOVDQU64 : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
2942 "maskmovdqu\t{$mask, $src|$src, $mask}",
2943 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>;
2945 } // ExeDomain = SSEPackedInt
2947 //===---------------------------------------------------------------------===//
2948 // SSE2 - Move Doubleword
2949 //===---------------------------------------------------------------------===//
2951 // Move Int Doubleword to Packed Double Int
2952 let isAsmParserOnly = 1 in {
2953 def VMOVDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
2954 "movd\t{$src, $dst|$dst, $src}",
2956 (v4i32 (scalar_to_vector GR32:$src)))]>, VEX;
2957 def VMOVDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
2958 "movd\t{$src, $dst|$dst, $src}",
2960 (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>,
2963 def MOVDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
2964 "movd\t{$src, $dst|$dst, $src}",
2966 (v4i32 (scalar_to_vector GR32:$src)))]>;
2967 def MOVDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
2968 "movd\t{$src, $dst|$dst, $src}",
2970 (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>;
2973 // Move Int Doubleword to Single Scalar
2974 let isAsmParserOnly = 1 in {
2975 def VMOVDI2SSrr : VPDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
2976 "movd\t{$src, $dst|$dst, $src}",
2977 [(set FR32:$dst, (bitconvert GR32:$src))]>, VEX;
2979 def VMOVDI2SSrm : VPDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
2980 "movd\t{$src, $dst|$dst, $src}",
2981 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>,
2984 def MOVDI2SSrr : PDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
2985 "movd\t{$src, $dst|$dst, $src}",
2986 [(set FR32:$dst, (bitconvert GR32:$src))]>;
2988 def MOVDI2SSrm : PDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
2989 "movd\t{$src, $dst|$dst, $src}",
2990 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>;
2992 // Move Packed Doubleword Int to Packed Double Int
2993 let isAsmParserOnly = 1 in {
2994 def VMOVPDI2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
2995 "movd\t{$src, $dst|$dst, $src}",
2996 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
2998 def VMOVPDI2DImr : VPDI<0x7E, MRMDestMem, (outs),
2999 (ins i32mem:$dst, VR128:$src),
3000 "movd\t{$src, $dst|$dst, $src}",
3001 [(store (i32 (vector_extract (v4i32 VR128:$src),
3002 (iPTR 0))), addr:$dst)]>, VEX;
3004 def MOVPDI2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
3005 "movd\t{$src, $dst|$dst, $src}",
3006 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
3008 def MOVPDI2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src),
3009 "movd\t{$src, $dst|$dst, $src}",
3010 [(store (i32 (vector_extract (v4i32 VR128:$src),
3011 (iPTR 0))), addr:$dst)]>;
3013 // Move Scalar Single to Double Int
3014 let isAsmParserOnly = 1 in {
3015 def VMOVSS2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
3016 "movd\t{$src, $dst|$dst, $src}",
3017 [(set GR32:$dst, (bitconvert FR32:$src))]>, VEX;
3018 def VMOVSS2DImr : VPDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
3019 "movd\t{$src, $dst|$dst, $src}",
3020 [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>, VEX;
3022 def MOVSS2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
3023 "movd\t{$src, $dst|$dst, $src}",
3024 [(set GR32:$dst, (bitconvert FR32:$src))]>;
3025 def MOVSS2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
3026 "movd\t{$src, $dst|$dst, $src}",
3027 [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>;
3029 // movd / movq to XMM register zero-extends
3030 let AddedComplexity = 15, isAsmParserOnly = 1 in {
3031 def VMOVZDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
3032 "movd\t{$src, $dst|$dst, $src}",
3033 [(set VR128:$dst, (v4i32 (X86vzmovl
3034 (v4i32 (scalar_to_vector GR32:$src)))))]>,
3036 def VMOVZQI2PQIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3037 "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
3038 [(set VR128:$dst, (v2i64 (X86vzmovl
3039 (v2i64 (scalar_to_vector GR64:$src)))))]>,
3042 let AddedComplexity = 15 in {
3043 def MOVZDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
3044 "movd\t{$src, $dst|$dst, $src}",
3045 [(set VR128:$dst, (v4i32 (X86vzmovl
3046 (v4i32 (scalar_to_vector GR32:$src)))))]>;
3047 def MOVZQI2PQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3048 "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
3049 [(set VR128:$dst, (v2i64 (X86vzmovl
3050 (v2i64 (scalar_to_vector GR64:$src)))))]>;
3053 let AddedComplexity = 20 in {
3054 let isAsmParserOnly = 1 in
3055 def VMOVZDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
3056 "movd\t{$src, $dst|$dst, $src}",
3058 (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
3059 (loadi32 addr:$src))))))]>,
3061 def MOVZDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
3062 "movd\t{$src, $dst|$dst, $src}",
3064 (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
3065 (loadi32 addr:$src))))))]>;
3067 def : Pat<(v4i32 (X86vzmovl (loadv4i32 addr:$src))),
3068 (MOVZDI2PDIrm addr:$src)>;
3069 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
3070 (MOVZDI2PDIrm addr:$src)>;
3071 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
3072 (MOVZDI2PDIrm addr:$src)>;
3075 //===---------------------------------------------------------------------===//
3076 // SSE2 - Move Quadword
3077 //===---------------------------------------------------------------------===//
3079 // Move Quadword Int to Packed Quadword Int
3080 let isAsmParserOnly = 1 in
3081 def VMOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3082 "vmovq\t{$src, $dst|$dst, $src}",
3084 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
3085 VEX, Requires<[HasAVX]>;
3086 def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3087 "movq\t{$src, $dst|$dst, $src}",
3089 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
3090 Requires<[HasSSE2]>; // SSE2 instruction with XS Prefix
3092 // Move Packed Quadword Int to Quadword Int
3093 let isAsmParserOnly = 1 in
3094 def VMOVPQI2QImr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3095 "movq\t{$src, $dst|$dst, $src}",
3096 [(store (i64 (vector_extract (v2i64 VR128:$src),
3097 (iPTR 0))), addr:$dst)]>, VEX;
3098 def MOVPQI2QImr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3099 "movq\t{$src, $dst|$dst, $src}",
3100 [(store (i64 (vector_extract (v2i64 VR128:$src),
3101 (iPTR 0))), addr:$dst)]>;
3103 def : Pat<(f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
3104 (f64 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
3106 // Store / copy lower 64-bits of a XMM register.
3107 let isAsmParserOnly = 1 in
3108 def VMOVLQ128mr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3109 "movq\t{$src, $dst|$dst, $src}",
3110 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>, VEX;
3111 def MOVLQ128mr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3112 "movq\t{$src, $dst|$dst, $src}",
3113 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>;
3115 let AddedComplexity = 20, isAsmParserOnly = 1 in
3116 def VMOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3117 "vmovq\t{$src, $dst|$dst, $src}",
3119 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
3120 (loadi64 addr:$src))))))]>,
3121 XS, VEX, Requires<[HasAVX]>;
3123 let AddedComplexity = 20 in {
3124 def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3125 "movq\t{$src, $dst|$dst, $src}",
3127 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
3128 (loadi64 addr:$src))))))]>,
3129 XS, Requires<[HasSSE2]>;
3131 def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
3132 (MOVZQI2PQIrm addr:$src)>;
3133 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4f32 addr:$src)))),
3134 (MOVZQI2PQIrm addr:$src)>;
3135 def : Pat<(v2i64 (X86vzload addr:$src)), (MOVZQI2PQIrm addr:$src)>;
3138 // Moving from XMM to XMM and clear upper 64 bits. Note, there is a bug in
3139 // IA32 document. movq xmm1, xmm2 does clear the high bits.
3140 let isAsmParserOnly = 1, AddedComplexity = 15 in
3141 def VMOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3142 "vmovq\t{$src, $dst|$dst, $src}",
3143 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
3144 XS, VEX, Requires<[HasAVX]>;
3145 let AddedComplexity = 15 in
3146 def MOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3147 "movq\t{$src, $dst|$dst, $src}",
3148 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
3149 XS, Requires<[HasSSE2]>;
3151 let AddedComplexity = 20, isAsmParserOnly = 1 in
3152 def VMOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3153 "vmovq\t{$src, $dst|$dst, $src}",
3154 [(set VR128:$dst, (v2i64 (X86vzmovl
3155 (loadv2i64 addr:$src))))]>,
3156 XS, VEX, Requires<[HasAVX]>;
3157 let AddedComplexity = 20 in {
3158 def MOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3159 "movq\t{$src, $dst|$dst, $src}",
3160 [(set VR128:$dst, (v2i64 (X86vzmovl
3161 (loadv2i64 addr:$src))))]>,
3162 XS, Requires<[HasSSE2]>;
3164 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4i32 addr:$src)))),
3165 (MOVZPQILo2PQIrm addr:$src)>;
3168 // Instructions to match in the assembler
3169 let isAsmParserOnly = 1 in {
3170 def VMOVQs64rr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3171 "movq\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
3172 def VMOVQd64rr : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
3173 "movq\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
3174 // Recognize "movd" with GR64 destination, but encode as a "movq"
3175 def VMOVQd64rr_alt : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
3176 "movd\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
3179 // Instructions for the disassembler
3180 // xr = XMM register
3183 let isAsmParserOnly = 1, Predicates = [HasAVX] in
3184 def VMOVQxrxr: I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3185 "vmovq\t{$src, $dst|$dst, $src}", []>, VEX, XS;
3186 def MOVQxrxr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3187 "movq\t{$src, $dst|$dst, $src}", []>, XS;
3189 //===---------------------------------------------------------------------===//
3190 // SSE2 - Misc Instructions
3191 //===---------------------------------------------------------------------===//
3194 def CLFLUSH : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
3195 "clflush\t$src", [(int_x86_sse2_clflush addr:$src)]>,
3196 TB, Requires<[HasSSE2]>;
3198 // Load, store, and memory fence
3199 def LFENCE : I<0xAE, MRM_E8, (outs), (ins),
3200 "lfence", [(int_x86_sse2_lfence)]>, TB, Requires<[HasSSE2]>;
3201 def MFENCE : I<0xAE, MRM_F0, (outs), (ins),
3202 "mfence", [(int_x86_sse2_mfence)]>, TB, Requires<[HasSSE2]>;
3203 def : Pat<(X86LFence), (LFENCE)>;
3204 def : Pat<(X86MFence), (MFENCE)>;
3207 // Pause. This "instruction" is encoded as "rep; nop", so even though it
3208 // was introduced with SSE2, it's backward compatible.
3209 def PAUSE : I<0x90, RawFrm, (outs), (ins), "pause", []>, REP;
3211 // Alias instructions that map zero vector to pxor / xorp* for sse.
3212 // We set canFoldAsLoad because this can be converted to a constant-pool
3213 // load of an all-ones value if folding it would be beneficial.
3214 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
3215 isCodeGenOnly = 1, ExeDomain = SSEPackedInt in
3216 // FIXME: Change encoding to pseudo.
3217 def V_SETALLONES : PDI<0x76, MRMInitReg, (outs VR128:$dst), (ins), "",
3218 [(set VR128:$dst, (v4i32 immAllOnesV))]>;
3220 //===---------------------------------------------------------------------===//
3221 // SSE3 - Conversion Instructions
3222 //===---------------------------------------------------------------------===//
3224 // Convert Packed Double FP to Packed DW Integers
3225 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3226 // The assembler can recognize rr 256-bit instructions by seeing a ymm
3227 // register, but the same isn't true when using memory operands instead.
3228 // Provide other assembly rr and rm forms to address this explicitly.
3229 def VCVTPD2DQrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3230 "vcvtpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
3231 def VCVTPD2DQXrYr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
3232 "vcvtpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
3235 def VCVTPD2DQXrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3236 "vcvtpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
3237 def VCVTPD2DQXrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3238 "vcvtpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
3241 def VCVTPD2DQYrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
3242 "vcvtpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX;
3243 def VCVTPD2DQYrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
3244 "vcvtpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
3247 def CVTPD2DQrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3248 "cvtpd2dq\t{$src, $dst|$dst, $src}", []>;
3249 def CVTPD2DQrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3250 "cvtpd2dq\t{$src, $dst|$dst, $src}", []>;
3252 // Convert Packed DW Integers to Packed Double FP
3253 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3254 def VCVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3255 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3256 def VCVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3257 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3258 def VCVTDQ2PDYrm : S3SI<0xE6, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
3259 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3260 def VCVTDQ2PDYrr : S3SI<0xE6, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
3261 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3264 def CVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3265 "cvtdq2pd\t{$src, $dst|$dst, $src}", []>;
3266 def CVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3267 "cvtdq2pd\t{$src, $dst|$dst, $src}", []>;
3269 // AVX 256-bit register conversion intrinsics
3270 def : Pat<(int_x86_avx_cvtdq2_pd_256 VR128:$src),
3271 (VCVTDQ2PDYrr VR128:$src)>;
3272 def : Pat<(int_x86_avx_cvtdq2_pd_256 (memopv4i32 addr:$src)),
3273 (VCVTDQ2PDYrm addr:$src)>;
3275 def : Pat<(int_x86_avx_cvt_pd2dq_256 VR256:$src),
3276 (VCVTPD2DQYrr VR256:$src)>;
3277 def : Pat<(int_x86_avx_cvt_pd2dq_256 (memopv4f64 addr:$src)),
3278 (VCVTPD2DQYrm addr:$src)>;
3280 //===---------------------------------------------------------------------===//
3281 // SSE3 - Move Instructions
3282 //===---------------------------------------------------------------------===//
3284 // Replicate Single FP
3285 multiclass sse3_replicate_sfp<bits<8> op, PatFrag rep_frag, string OpcodeStr> {
3286 def rr : S3SI<op, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3287 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3288 [(set VR128:$dst, (v4f32 (rep_frag
3289 VR128:$src, (undef))))]>;
3290 def rm : S3SI<op, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3291 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3292 [(set VR128:$dst, (rep_frag
3293 (memopv4f32 addr:$src), (undef)))]>;
3296 multiclass sse3_replicate_sfp_y<bits<8> op, PatFrag rep_frag,
3298 def rr : S3SI<op, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3299 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>;
3300 def rm : S3SI<op, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
3301 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>;
3304 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3305 // FIXME: Merge above classes when we have patterns for the ymm version
3306 defm VMOVSHDUP : sse3_replicate_sfp<0x16, movshdup, "vmovshdup">, VEX;
3307 defm VMOVSLDUP : sse3_replicate_sfp<0x12, movsldup, "vmovsldup">, VEX;
3308 defm VMOVSHDUPY : sse3_replicate_sfp_y<0x16, movshdup, "vmovshdup">, VEX;
3309 defm VMOVSLDUPY : sse3_replicate_sfp_y<0x12, movsldup, "vmovsldup">, VEX;
3311 defm MOVSHDUP : sse3_replicate_sfp<0x16, movshdup, "movshdup">;
3312 defm MOVSLDUP : sse3_replicate_sfp<0x12, movsldup, "movsldup">;
3314 // Replicate Double FP
3315 multiclass sse3_replicate_dfp<string OpcodeStr> {
3316 def rr : S3DI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3317 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3318 [(set VR128:$dst,(v2f64 (movddup VR128:$src, (undef))))]>;
3319 def rm : S3DI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
3320 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3322 (v2f64 (movddup (scalar_to_vector (loadf64 addr:$src)),
3326 multiclass sse3_replicate_dfp_y<string OpcodeStr> {
3327 def rr : S3DI<0x12, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3328 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3330 def rm : S3DI<0x12, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
3331 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3335 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3336 // FIXME: Merge above classes when we have patterns for the ymm version
3337 defm VMOVDDUP : sse3_replicate_dfp<"vmovddup">, VEX;
3338 defm VMOVDDUPY : sse3_replicate_dfp_y<"vmovddup">, VEX;
3340 defm MOVDDUP : sse3_replicate_dfp<"movddup">;
3342 // Move Unaligned Integer
3343 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3344 def VLDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3345 "vlddqu\t{$src, $dst|$dst, $src}",
3346 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>, VEX;
3347 def VLDDQUYrm : S3DI<0xF0, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
3348 "vlddqu\t{$src, $dst|$dst, $src}",
3349 [(set VR256:$dst, (int_x86_avx_ldu_dq_256 addr:$src))]>, VEX;
3351 def LDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3352 "lddqu\t{$src, $dst|$dst, $src}",
3353 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>;
3355 def : Pat<(movddup (bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src)))),
3357 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3359 // Several Move patterns
3360 let AddedComplexity = 5 in {
3361 def : Pat<(movddup (memopv2f64 addr:$src), (undef)),
3362 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3363 def : Pat<(movddup (bc_v4f32 (memopv2f64 addr:$src)), (undef)),
3364 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3365 def : Pat<(movddup (memopv2i64 addr:$src), (undef)),
3366 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3367 def : Pat<(movddup (bc_v4i32 (memopv2i64 addr:$src)), (undef)),
3368 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3371 // vector_shuffle v1, <undef> <1, 1, 3, 3>
3372 let AddedComplexity = 15 in
3373 def : Pat<(v4i32 (movshdup VR128:$src, (undef))),
3374 (MOVSHDUPrr VR128:$src)>, Requires<[HasSSE3]>;
3375 let AddedComplexity = 20 in
3376 def : Pat<(v4i32 (movshdup (bc_v4i32 (memopv2i64 addr:$src)), (undef))),
3377 (MOVSHDUPrm addr:$src)>, Requires<[HasSSE3]>;
3379 // vector_shuffle v1, <undef> <0, 0, 2, 2>
3380 let AddedComplexity = 15 in
3381 def : Pat<(v4i32 (movsldup VR128:$src, (undef))),
3382 (MOVSLDUPrr VR128:$src)>, Requires<[HasSSE3]>;
3383 let AddedComplexity = 20 in
3384 def : Pat<(v4i32 (movsldup (bc_v4i32 (memopv2i64 addr:$src)), (undef))),
3385 (MOVSLDUPrm addr:$src)>, Requires<[HasSSE3]>;
3387 //===---------------------------------------------------------------------===//
3388 // SSE3 - Arithmetic
3389 //===---------------------------------------------------------------------===//
3391 multiclass sse3_addsub<Intrinsic Int, string OpcodeStr, RegisterClass RC,
3392 X86MemOperand x86memop, bit Is2Addr = 1> {
3393 def rr : I<0xD0, MRMSrcReg,
3394 (outs RC:$dst), (ins RC:$src1, RC:$src2),
3396 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3397 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3398 [(set RC:$dst, (Int RC:$src1, RC:$src2))]>;
3399 def rm : I<0xD0, MRMSrcMem,
3400 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
3402 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3403 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3404 [(set RC:$dst, (Int RC:$src1, (memop addr:$src2)))]>;
3407 let isAsmParserOnly = 1, Predicates = [HasAVX],
3408 ExeDomain = SSEPackedDouble in {
3409 defm VADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "vaddsubps", VR128,
3410 f128mem, 0>, XD, VEX_4V;
3411 defm VADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "vaddsubpd", VR128,
3412 f128mem, 0>, OpSize, VEX_4V;
3413 defm VADDSUBPSY : sse3_addsub<int_x86_avx_addsub_ps_256, "vaddsubps", VR256,
3414 f256mem, 0>, XD, VEX_4V;
3415 defm VADDSUBPDY : sse3_addsub<int_x86_avx_addsub_pd_256, "vaddsubpd", VR256,
3416 f256mem, 0>, OpSize, VEX_4V;
3418 let Constraints = "$src1 = $dst", Predicates = [HasSSE3],
3419 ExeDomain = SSEPackedDouble in {
3420 defm ADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "addsubps", VR128,
3422 defm ADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "addsubpd", VR128,
3423 f128mem>, TB, OpSize;
3426 //===---------------------------------------------------------------------===//
3427 // SSE3 Instructions
3428 //===---------------------------------------------------------------------===//
3431 multiclass S3D_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
3432 X86MemOperand x86memop, Intrinsic IntId, bit Is2Addr = 1> {
3433 def rr : S3DI<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
3435 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3436 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3437 [(set RC:$dst, (vt (IntId RC:$src1, RC:$src2)))]>;
3439 def rm : S3DI<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
3441 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3442 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3443 [(set RC:$dst, (vt (IntId RC:$src1, (memop addr:$src2))))]>;
3445 multiclass S3_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
3446 X86MemOperand x86memop, Intrinsic IntId, bit Is2Addr = 1> {
3447 def rr : S3I<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
3449 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3450 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3451 [(set RC:$dst, (vt (IntId RC:$src1, RC:$src2)))]>;
3453 def rm : S3I<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
3455 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3456 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3457 [(set RC:$dst, (vt (IntId RC:$src1, (memop addr:$src2))))]>;
3460 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3461 defm VHADDPS : S3D_Int<0x7C, "vhaddps", v4f32, VR128, f128mem,
3462 int_x86_sse3_hadd_ps, 0>, VEX_4V;
3463 defm VHADDPD : S3_Int <0x7C, "vhaddpd", v2f64, VR128, f128mem,
3464 int_x86_sse3_hadd_pd, 0>, VEX_4V;
3465 defm VHSUBPS : S3D_Int<0x7D, "vhsubps", v4f32, VR128, f128mem,
3466 int_x86_sse3_hsub_ps, 0>, VEX_4V;
3467 defm VHSUBPD : S3_Int <0x7D, "vhsubpd", v2f64, VR128, f128mem,
3468 int_x86_sse3_hsub_pd, 0>, VEX_4V;
3469 defm VHADDPSY : S3D_Int<0x7C, "vhaddps", v8f32, VR256, f256mem,
3470 int_x86_avx_hadd_ps_256, 0>, VEX_4V;
3471 defm VHADDPDY : S3_Int <0x7C, "vhaddpd", v4f64, VR256, f256mem,
3472 int_x86_avx_hadd_pd_256, 0>, VEX_4V;
3473 defm VHSUBPSY : S3D_Int<0x7D, "vhsubps", v8f32, VR256, f256mem,
3474 int_x86_avx_hsub_ps_256, 0>, VEX_4V;
3475 defm VHSUBPDY : S3_Int <0x7D, "vhsubpd", v4f64, VR256, f256mem,
3476 int_x86_avx_hsub_pd_256, 0>, VEX_4V;
3479 let Constraints = "$src1 = $dst" in {
3480 defm HADDPS : S3D_Int<0x7C, "haddps", v4f32, VR128, f128mem,
3481 int_x86_sse3_hadd_ps>;
3482 defm HADDPD : S3_Int<0x7C, "haddpd", v2f64, VR128, f128mem,
3483 int_x86_sse3_hadd_pd>;
3484 defm HSUBPS : S3D_Int<0x7D, "hsubps", v4f32, VR128, f128mem,
3485 int_x86_sse3_hsub_ps>;
3486 defm HSUBPD : S3_Int<0x7D, "hsubpd", v2f64, VR128, f128mem,
3487 int_x86_sse3_hsub_pd>;
3490 //===---------------------------------------------------------------------===//
3491 // SSSE3 - Packed Absolute Instructions
3492 //===---------------------------------------------------------------------===//
3495 /// SS3I_unop_rm_int - Simple SSSE3 unary op whose type can be v*{i8,i16,i32}.
3496 multiclass SS3I_unop_rm_int<bits<8> opc, string OpcodeStr,
3497 PatFrag mem_frag128, Intrinsic IntId128> {
3498 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
3500 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3501 [(set VR128:$dst, (IntId128 VR128:$src))]>,
3504 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
3506 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3509 (bitconvert (mem_frag128 addr:$src))))]>, OpSize;
3512 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3513 defm VPABSB : SS3I_unop_rm_int<0x1C, "vpabsb", memopv16i8,
3514 int_x86_ssse3_pabs_b_128>, VEX;
3515 defm VPABSW : SS3I_unop_rm_int<0x1D, "vpabsw", memopv8i16,
3516 int_x86_ssse3_pabs_w_128>, VEX;
3517 defm VPABSD : SS3I_unop_rm_int<0x1E, "vpabsd", memopv4i32,
3518 int_x86_ssse3_pabs_d_128>, VEX;
3521 defm PABSB : SS3I_unop_rm_int<0x1C, "pabsb", memopv16i8,
3522 int_x86_ssse3_pabs_b_128>;
3523 defm PABSW : SS3I_unop_rm_int<0x1D, "pabsw", memopv8i16,
3524 int_x86_ssse3_pabs_w_128>;
3525 defm PABSD : SS3I_unop_rm_int<0x1E, "pabsd", memopv4i32,
3526 int_x86_ssse3_pabs_d_128>;
3528 //===---------------------------------------------------------------------===//
3529 // SSSE3 - Packed Binary Operator Instructions
3530 //===---------------------------------------------------------------------===//
3532 /// SS3I_binop_rm_int - Simple SSSE3 bin op whose type can be v*{i8,i16,i32}.
3533 multiclass SS3I_binop_rm_int<bits<8> opc, string OpcodeStr,
3534 PatFrag mem_frag128, Intrinsic IntId128,
3536 let isCommutable = 1 in
3537 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
3538 (ins VR128:$src1, VR128:$src2),
3540 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3541 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3542 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
3544 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
3545 (ins VR128:$src1, i128mem:$src2),
3547 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3548 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3550 (IntId128 VR128:$src1,
3551 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
3554 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3555 let isCommutable = 0 in {
3556 defm VPHADDW : SS3I_binop_rm_int<0x01, "vphaddw", memopv8i16,
3557 int_x86_ssse3_phadd_w_128, 0>, VEX_4V;
3558 defm VPHADDD : SS3I_binop_rm_int<0x02, "vphaddd", memopv4i32,
3559 int_x86_ssse3_phadd_d_128, 0>, VEX_4V;
3560 defm VPHADDSW : SS3I_binop_rm_int<0x03, "vphaddsw", memopv8i16,
3561 int_x86_ssse3_phadd_sw_128, 0>, VEX_4V;
3562 defm VPHSUBW : SS3I_binop_rm_int<0x05, "vphsubw", memopv8i16,
3563 int_x86_ssse3_phsub_w_128, 0>, VEX_4V;
3564 defm VPHSUBD : SS3I_binop_rm_int<0x06, "vphsubd", memopv4i32,
3565 int_x86_ssse3_phsub_d_128, 0>, VEX_4V;
3566 defm VPHSUBSW : SS3I_binop_rm_int<0x07, "vphsubsw", memopv8i16,
3567 int_x86_ssse3_phsub_sw_128, 0>, VEX_4V;
3568 defm VPMADDUBSW : SS3I_binop_rm_int<0x04, "vpmaddubsw", memopv16i8,
3569 int_x86_ssse3_pmadd_ub_sw_128, 0>, VEX_4V;
3570 defm VPSHUFB : SS3I_binop_rm_int<0x00, "vpshufb", memopv16i8,
3571 int_x86_ssse3_pshuf_b_128, 0>, VEX_4V;
3572 defm VPSIGNB : SS3I_binop_rm_int<0x08, "vpsignb", memopv16i8,
3573 int_x86_ssse3_psign_b_128, 0>, VEX_4V;
3574 defm VPSIGNW : SS3I_binop_rm_int<0x09, "vpsignw", memopv8i16,
3575 int_x86_ssse3_psign_w_128, 0>, VEX_4V;
3576 defm VPSIGND : SS3I_binop_rm_int<0x0A, "vpsignd", memopv4i32,
3577 int_x86_ssse3_psign_d_128, 0>, VEX_4V;
3579 defm VPMULHRSW : SS3I_binop_rm_int<0x0B, "vpmulhrsw", memopv8i16,
3580 int_x86_ssse3_pmul_hr_sw_128, 0>, VEX_4V;
3583 // None of these have i8 immediate fields.
3584 let ImmT = NoImm, Constraints = "$src1 = $dst" in {
3585 let isCommutable = 0 in {
3586 defm PHADDW : SS3I_binop_rm_int<0x01, "phaddw", memopv8i16,
3587 int_x86_ssse3_phadd_w_128>;
3588 defm PHADDD : SS3I_binop_rm_int<0x02, "phaddd", memopv4i32,
3589 int_x86_ssse3_phadd_d_128>;
3590 defm PHADDSW : SS3I_binop_rm_int<0x03, "phaddsw", memopv8i16,
3591 int_x86_ssse3_phadd_sw_128>;
3592 defm PHSUBW : SS3I_binop_rm_int<0x05, "phsubw", memopv8i16,
3593 int_x86_ssse3_phsub_w_128>;
3594 defm PHSUBD : SS3I_binop_rm_int<0x06, "phsubd", memopv4i32,
3595 int_x86_ssse3_phsub_d_128>;
3596 defm PHSUBSW : SS3I_binop_rm_int<0x07, "phsubsw", memopv8i16,
3597 int_x86_ssse3_phsub_sw_128>;
3598 defm PMADDUBSW : SS3I_binop_rm_int<0x04, "pmaddubsw", memopv16i8,
3599 int_x86_ssse3_pmadd_ub_sw_128>;
3600 defm PSHUFB : SS3I_binop_rm_int<0x00, "pshufb", memopv8i8,
3601 int_x86_ssse3_pshuf_b_128>;
3602 defm PSIGNB : SS3I_binop_rm_int<0x08, "psignb", memopv16i8,
3603 int_x86_ssse3_psign_b_128>;
3604 defm PSIGNW : SS3I_binop_rm_int<0x09, "psignw", memopv8i16,
3605 int_x86_ssse3_psign_w_128>;
3606 defm PSIGND : SS3I_binop_rm_int<0x0A, "psignd", memopv4i32,
3607 int_x86_ssse3_psign_d_128>;
3609 defm PMULHRSW : SS3I_binop_rm_int<0x0B, "pmulhrsw", memopv8i16,
3610 int_x86_ssse3_pmul_hr_sw_128>;
3613 def : Pat<(X86pshufb VR128:$src, VR128:$mask),
3614 (PSHUFBrr128 VR128:$src, VR128:$mask)>, Requires<[HasSSSE3]>;
3615 def : Pat<(X86pshufb VR128:$src, (bc_v16i8 (memopv2i64 addr:$mask))),
3616 (PSHUFBrm128 VR128:$src, addr:$mask)>, Requires<[HasSSSE3]>;
3618 //===---------------------------------------------------------------------===//
3619 // SSSE3 - Packed Align Instruction Patterns
3620 //===---------------------------------------------------------------------===//
3622 multiclass ssse3_palign<string asm, bit Is2Addr = 1> {
3623 def R128rr : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
3624 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
3626 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3628 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
3630 def R128rm : SS3AI<0x0F, MRMSrcMem, (outs VR128:$dst),
3631 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
3633 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3635 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
3639 let isAsmParserOnly = 1, Predicates = [HasAVX] in
3640 defm VPALIGN : ssse3_palign<"vpalignr", 0>, VEX_4V;
3641 let Constraints = "$src1 = $dst" in
3642 defm PALIGN : ssse3_palign<"palignr">;
3644 let AddedComplexity = 5 in {
3645 def : Pat<(v4i32 (palign:$src3 VR128:$src1, VR128:$src2)),
3646 (PALIGNR128rr VR128:$src2, VR128:$src1,
3647 (SHUFFLE_get_palign_imm VR128:$src3))>,
3648 Requires<[HasSSSE3]>;
3649 def : Pat<(v4f32 (palign:$src3 VR128:$src1, VR128:$src2)),
3650 (PALIGNR128rr VR128:$src2, VR128:$src1,
3651 (SHUFFLE_get_palign_imm VR128:$src3))>,
3652 Requires<[HasSSSE3]>;
3653 def : Pat<(v8i16 (palign:$src3 VR128:$src1, VR128:$src2)),
3654 (PALIGNR128rr VR128:$src2, VR128:$src1,
3655 (SHUFFLE_get_palign_imm VR128:$src3))>,
3656 Requires<[HasSSSE3]>;
3657 def : Pat<(v16i8 (palign:$src3 VR128:$src1, VR128:$src2)),
3658 (PALIGNR128rr VR128:$src2, VR128:$src1,
3659 (SHUFFLE_get_palign_imm VR128:$src3))>,
3660 Requires<[HasSSSE3]>;
3663 //===---------------------------------------------------------------------===//
3664 // SSSE3 Misc Instructions
3665 //===---------------------------------------------------------------------===//
3667 // Thread synchronization
3668 def MONITOR : I<0x01, MRM_C8, (outs), (ins), "monitor",
3669 [(int_x86_sse3_monitor EAX, ECX, EDX)]>,TB, Requires<[HasSSE3]>;
3670 def MWAIT : I<0x01, MRM_C9, (outs), (ins), "mwait",
3671 [(int_x86_sse3_mwait ECX, EAX)]>, TB, Requires<[HasSSE3]>;
3673 //===---------------------------------------------------------------------===//
3674 // Non-Instruction Patterns
3675 //===---------------------------------------------------------------------===//
3677 // extload f32 -> f64. This matches load+fextend because we have a hack in
3678 // the isel (PreprocessForFPConvert) that can introduce loads after dag
3680 // Since these loads aren't folded into the fextend, we have to match it
3682 let Predicates = [HasSSE2] in
3683 def : Pat<(fextend (loadf32 addr:$src)),
3684 (CVTSS2SDrm addr:$src)>;
3687 let Predicates = [HasSSE2] in {
3688 def : Pat<(v2i64 (bitconvert (v4i32 VR128:$src))), (v2i64 VR128:$src)>;
3689 def : Pat<(v2i64 (bitconvert (v8i16 VR128:$src))), (v2i64 VR128:$src)>;
3690 def : Pat<(v2i64 (bitconvert (v16i8 VR128:$src))), (v2i64 VR128:$src)>;
3691 def : Pat<(v2i64 (bitconvert (v2f64 VR128:$src))), (v2i64 VR128:$src)>;
3692 def : Pat<(v2i64 (bitconvert (v4f32 VR128:$src))), (v2i64 VR128:$src)>;
3693 def : Pat<(v4i32 (bitconvert (v2i64 VR128:$src))), (v4i32 VR128:$src)>;
3694 def : Pat<(v4i32 (bitconvert (v8i16 VR128:$src))), (v4i32 VR128:$src)>;
3695 def : Pat<(v4i32 (bitconvert (v16i8 VR128:$src))), (v4i32 VR128:$src)>;
3696 def : Pat<(v4i32 (bitconvert (v2f64 VR128:$src))), (v4i32 VR128:$src)>;
3697 def : Pat<(v4i32 (bitconvert (v4f32 VR128:$src))), (v4i32 VR128:$src)>;
3698 def : Pat<(v8i16 (bitconvert (v2i64 VR128:$src))), (v8i16 VR128:$src)>;
3699 def : Pat<(v8i16 (bitconvert (v4i32 VR128:$src))), (v8i16 VR128:$src)>;
3700 def : Pat<(v8i16 (bitconvert (v16i8 VR128:$src))), (v8i16 VR128:$src)>;
3701 def : Pat<(v8i16 (bitconvert (v2f64 VR128:$src))), (v8i16 VR128:$src)>;
3702 def : Pat<(v8i16 (bitconvert (v4f32 VR128:$src))), (v8i16 VR128:$src)>;
3703 def : Pat<(v16i8 (bitconvert (v2i64 VR128:$src))), (v16i8 VR128:$src)>;
3704 def : Pat<(v16i8 (bitconvert (v4i32 VR128:$src))), (v16i8 VR128:$src)>;
3705 def : Pat<(v16i8 (bitconvert (v8i16 VR128:$src))), (v16i8 VR128:$src)>;
3706 def : Pat<(v16i8 (bitconvert (v2f64 VR128:$src))), (v16i8 VR128:$src)>;
3707 def : Pat<(v16i8 (bitconvert (v4f32 VR128:$src))), (v16i8 VR128:$src)>;
3708 def : Pat<(v4f32 (bitconvert (v2i64 VR128:$src))), (v4f32 VR128:$src)>;
3709 def : Pat<(v4f32 (bitconvert (v4i32 VR128:$src))), (v4f32 VR128:$src)>;
3710 def : Pat<(v4f32 (bitconvert (v8i16 VR128:$src))), (v4f32 VR128:$src)>;
3711 def : Pat<(v4f32 (bitconvert (v16i8 VR128:$src))), (v4f32 VR128:$src)>;
3712 def : Pat<(v4f32 (bitconvert (v2f64 VR128:$src))), (v4f32 VR128:$src)>;
3713 def : Pat<(v2f64 (bitconvert (v2i64 VR128:$src))), (v2f64 VR128:$src)>;
3714 def : Pat<(v2f64 (bitconvert (v4i32 VR128:$src))), (v2f64 VR128:$src)>;
3715 def : Pat<(v2f64 (bitconvert (v8i16 VR128:$src))), (v2f64 VR128:$src)>;
3716 def : Pat<(v2f64 (bitconvert (v16i8 VR128:$src))), (v2f64 VR128:$src)>;
3717 def : Pat<(v2f64 (bitconvert (v4f32 VR128:$src))), (v2f64 VR128:$src)>;
3720 // Move scalar to XMM zero-extended
3721 // movd to XMM register zero-extends
3722 let AddedComplexity = 15 in {
3723 // Zeroing a VR128 then do a MOVS{S|D} to the lower bits.
3724 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))),
3725 (MOVSDrr (v2f64 (V_SET0PS)), FR64:$src)>;
3726 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),
3727 (MOVSSrr (v4f32 (V_SET0PS)), FR32:$src)>;
3728 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
3729 (MOVSSrr (v4f32 (V_SET0PS)),
3730 (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss)))>;
3731 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
3732 (MOVSSrr (v4i32 (V_SET0PI)),
3733 (EXTRACT_SUBREG (v4i32 VR128:$src), sub_ss))>;
3736 // Splat v2f64 / v2i64
3737 let AddedComplexity = 10 in {
3738 def : Pat<(splat_lo (v2f64 VR128:$src), (undef)),
3739 (UNPCKLPDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3740 def : Pat<(unpckh (v2f64 VR128:$src), (undef)),
3741 (UNPCKHPDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3742 def : Pat<(splat_lo (v2i64 VR128:$src), (undef)),
3743 (PUNPCKLQDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3744 def : Pat<(unpckh (v2i64 VR128:$src), (undef)),
3745 (PUNPCKHQDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3748 // Special unary SHUFPSrri case.
3749 def : Pat<(v4f32 (pshufd:$src3 VR128:$src1, (undef))),
3750 (SHUFPSrri VR128:$src1, VR128:$src1,
3751 (SHUFFLE_get_shuf_imm VR128:$src3))>;
3752 let AddedComplexity = 5 in
3753 def : Pat<(v4f32 (pshufd:$src2 VR128:$src1, (undef))),
3754 (PSHUFDri VR128:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3755 Requires<[HasSSE2]>;
3756 // Special unary SHUFPDrri case.
3757 def : Pat<(v2i64 (pshufd:$src3 VR128:$src1, (undef))),
3758 (SHUFPDrri VR128:$src1, VR128:$src1,
3759 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3760 Requires<[HasSSE2]>;
3761 // Special unary SHUFPDrri case.
3762 def : Pat<(v2f64 (pshufd:$src3 VR128:$src1, (undef))),
3763 (SHUFPDrri VR128:$src1, VR128:$src1,
3764 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3765 Requires<[HasSSE2]>;
3766 // Unary v4f32 shuffle with PSHUF* in order to fold a load.
3767 def : Pat<(pshufd:$src2 (bc_v4i32 (memopv4f32 addr:$src1)), (undef)),
3768 (PSHUFDmi addr:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3769 Requires<[HasSSE2]>;
3771 // Special binary v4i32 shuffle cases with SHUFPS.
3772 def : Pat<(v4i32 (shufp:$src3 VR128:$src1, (v4i32 VR128:$src2))),
3773 (SHUFPSrri VR128:$src1, VR128:$src2,
3774 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3775 Requires<[HasSSE2]>;
3776 def : Pat<(v4i32 (shufp:$src3 VR128:$src1, (bc_v4i32 (memopv2i64 addr:$src2)))),
3777 (SHUFPSrmi VR128:$src1, addr:$src2,
3778 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3779 Requires<[HasSSE2]>;
3780 // Special binary v2i64 shuffle cases using SHUFPDrri.
3781 def : Pat<(v2i64 (shufp:$src3 VR128:$src1, VR128:$src2)),
3782 (SHUFPDrri VR128:$src1, VR128:$src2,
3783 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3784 Requires<[HasSSE2]>;
3786 // vector_shuffle v1, <undef>, <0, 0, 1, 1, ...>
3787 let AddedComplexity = 15 in {
3788 def : Pat<(v4i32 (unpckl_undef:$src2 VR128:$src, (undef))),
3789 (PSHUFDri VR128:$src, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3790 Requires<[OptForSpeed, HasSSE2]>;
3791 def : Pat<(v4f32 (unpckl_undef:$src2 VR128:$src, (undef))),
3792 (PSHUFDri VR128:$src, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3793 Requires<[OptForSpeed, HasSSE2]>;
3795 let AddedComplexity = 10 in {
3796 def : Pat<(v4f32 (unpckl_undef VR128:$src, (undef))),
3797 (UNPCKLPSrr VR128:$src, VR128:$src)>;
3798 def : Pat<(v16i8 (unpckl_undef VR128:$src, (undef))),
3799 (PUNPCKLBWrr VR128:$src, VR128:$src)>;
3800 def : Pat<(v8i16 (unpckl_undef VR128:$src, (undef))),
3801 (PUNPCKLWDrr VR128:$src, VR128:$src)>;
3802 def : Pat<(v4i32 (unpckl_undef VR128:$src, (undef))),
3803 (PUNPCKLDQrr VR128:$src, VR128:$src)>;
3806 // vector_shuffle v1, <undef>, <2, 2, 3, 3, ...>
3807 let AddedComplexity = 15 in {
3808 def : Pat<(v4i32 (unpckh_undef:$src2 VR128:$src, (undef))),
3809 (PSHUFDri VR128:$src, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3810 Requires<[OptForSpeed, HasSSE2]>;
3811 def : Pat<(v4f32 (unpckh_undef:$src2 VR128:$src, (undef))),
3812 (PSHUFDri VR128:$src, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3813 Requires<[OptForSpeed, HasSSE2]>;
3815 let AddedComplexity = 10 in {
3816 def : Pat<(v4f32 (unpckh_undef VR128:$src, (undef))),
3817 (UNPCKHPSrr VR128:$src, VR128:$src)>;
3818 def : Pat<(v16i8 (unpckh_undef VR128:$src, (undef))),
3819 (PUNPCKHBWrr VR128:$src, VR128:$src)>;
3820 def : Pat<(v8i16 (unpckh_undef VR128:$src, (undef))),
3821 (PUNPCKHWDrr VR128:$src, VR128:$src)>;
3822 def : Pat<(v4i32 (unpckh_undef VR128:$src, (undef))),
3823 (PUNPCKHDQrr VR128:$src, VR128:$src)>;
3826 let AddedComplexity = 20 in {
3827 // vector_shuffle v1, v2 <0, 1, 4, 5> using MOVLHPS
3828 def : Pat<(v4i32 (movlhps VR128:$src1, VR128:$src2)),
3829 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
3831 // vector_shuffle v1, v2 <6, 7, 2, 3> using MOVHLPS
3832 def : Pat<(v4i32 (movhlps VR128:$src1, VR128:$src2)),
3833 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
3835 // vector_shuffle v1, undef <2, ?, ?, ?> using MOVHLPS
3836 def : Pat<(v4f32 (movhlps_undef VR128:$src1, (undef))),
3837 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
3838 def : Pat<(v4i32 (movhlps_undef VR128:$src1, (undef))),
3839 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
3842 let AddedComplexity = 20 in {
3843 // vector_shuffle v1, (load v2) <4, 5, 2, 3> using MOVLPS
3844 def : Pat<(v4f32 (movlp VR128:$src1, (load addr:$src2))),
3845 (MOVLPSrm VR128:$src1, addr:$src2)>;
3846 def : Pat<(v2f64 (movlp VR128:$src1, (load addr:$src2))),
3847 (MOVLPDrm VR128:$src1, addr:$src2)>;
3848 def : Pat<(v4i32 (movlp VR128:$src1, (load addr:$src2))),
3849 (MOVLPSrm VR128:$src1, addr:$src2)>;
3850 def : Pat<(v2i64 (movlp VR128:$src1, (load addr:$src2))),
3851 (MOVLPDrm VR128:$src1, addr:$src2)>;
3854 // (store (vector_shuffle (load addr), v2, <4, 5, 2, 3>), addr) using MOVLPS
3855 def : Pat<(store (v4f32 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
3856 (MOVLPSmr addr:$src1, VR128:$src2)>;
3857 def : Pat<(store (v2f64 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
3858 (MOVLPDmr addr:$src1, VR128:$src2)>;
3859 def : Pat<(store (v4i32 (movlp (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)),
3861 (MOVLPSmr addr:$src1, VR128:$src2)>;
3862 def : Pat<(store (v2i64 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
3863 (MOVLPDmr addr:$src1, VR128:$src2)>;
3865 let AddedComplexity = 15 in {
3866 // Setting the lowest element in the vector.
3867 def : Pat<(v4i32 (movl VR128:$src1, VR128:$src2)),
3868 (MOVSSrr (v4i32 VR128:$src1),
3869 (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_ss))>;
3870 def : Pat<(v2i64 (movl VR128:$src1, VR128:$src2)),
3871 (MOVSDrr (v2i64 VR128:$src1),
3872 (EXTRACT_SUBREG (v2i64 VR128:$src2), sub_sd))>;
3874 // vector_shuffle v1, v2 <4, 5, 2, 3> using movsd
3875 def : Pat<(v4f32 (movlp VR128:$src1, VR128:$src2)),
3876 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, sub_sd))>,
3877 Requires<[HasSSE2]>;
3878 def : Pat<(v4i32 (movlp VR128:$src1, VR128:$src2)),
3879 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, sub_sd))>,
3880 Requires<[HasSSE2]>;
3883 // vector_shuffle v1, v2 <4, 5, 2, 3> using SHUFPSrri (we prefer movsd, but
3884 // fall back to this for SSE1)
3885 def : Pat<(v4f32 (movlp:$src3 VR128:$src1, (v4f32 VR128:$src2))),
3886 (SHUFPSrri VR128:$src2, VR128:$src1,
3887 (SHUFFLE_get_shuf_imm VR128:$src3))>;
3889 // Set lowest element and zero upper elements.
3890 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
3891 (MOVZPQILo2PQIrr VR128:$src)>, Requires<[HasSSE2]>;
3893 // Some special case pandn patterns.
3894 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v4i32 immAllOnesV))),
3896 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3897 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v8i16 immAllOnesV))),
3899 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3900 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v16i8 immAllOnesV))),
3902 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3904 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v4i32 immAllOnesV))),
3905 (memop addr:$src2))),
3906 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
3907 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v8i16 immAllOnesV))),
3908 (memop addr:$src2))),
3909 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
3910 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v16i8 immAllOnesV))),
3911 (memop addr:$src2))),
3912 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
3914 // vector -> vector casts
3915 def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
3916 (Int_CVTDQ2PSrr VR128:$src)>, Requires<[HasSSE2]>;
3917 def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
3918 (Int_CVTTPS2DQrr VR128:$src)>, Requires<[HasSSE2]>;
3920 // Use movaps / movups for SSE integer load / store (one byte shorter).
3921 let Predicates = [HasSSE1] in {
3922 def : Pat<(alignedloadv4i32 addr:$src),
3923 (MOVAPSrm addr:$src)>;
3924 def : Pat<(loadv4i32 addr:$src),
3925 (MOVUPSrm addr:$src)>;
3926 def : Pat<(alignedloadv2i64 addr:$src),
3927 (MOVAPSrm addr:$src)>;
3928 def : Pat<(loadv2i64 addr:$src),
3929 (MOVUPSrm addr:$src)>;
3931 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
3932 (MOVAPSmr addr:$dst, VR128:$src)>;
3933 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
3934 (MOVAPSmr addr:$dst, VR128:$src)>;
3935 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
3936 (MOVAPSmr addr:$dst, VR128:$src)>;
3937 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
3938 (MOVAPSmr addr:$dst, VR128:$src)>;
3939 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
3940 (MOVUPSmr addr:$dst, VR128:$src)>;
3941 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
3942 (MOVUPSmr addr:$dst, VR128:$src)>;
3943 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
3944 (MOVUPSmr addr:$dst, VR128:$src)>;
3945 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
3946 (MOVUPSmr addr:$dst, VR128:$src)>;
3949 // Use vmovaps/vmovups for AVX 128-bit integer load/store (one byte shorter).
3950 let Predicates = [HasAVX] in {
3951 def : Pat<(alignedloadv4i32 addr:$src),
3952 (VMOVAPSrm addr:$src)>;
3953 def : Pat<(loadv4i32 addr:$src),
3954 (VMOVUPSrm addr:$src)>;
3955 def : Pat<(alignedloadv2i64 addr:$src),
3956 (VMOVAPSrm addr:$src)>;
3957 def : Pat<(loadv2i64 addr:$src),
3958 (VMOVUPSrm addr:$src)>;
3960 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
3961 (VMOVAPSmr addr:$dst, VR128:$src)>;
3962 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
3963 (VMOVAPSmr addr:$dst, VR128:$src)>;
3964 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
3965 (VMOVAPSmr addr:$dst, VR128:$src)>;
3966 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
3967 (VMOVAPSmr addr:$dst, VR128:$src)>;
3968 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
3969 (VMOVUPSmr addr:$dst, VR128:$src)>;
3970 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
3971 (VMOVUPSmr addr:$dst, VR128:$src)>;
3972 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
3973 (VMOVUPSmr addr:$dst, VR128:$src)>;
3974 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
3975 (VMOVUPSmr addr:$dst, VR128:$src)>;
3978 //===----------------------------------------------------------------------===//
3979 // SSE4.1 - Packed Move with Sign/Zero Extend
3980 //===----------------------------------------------------------------------===//
3982 multiclass SS41I_binop_rm_int8<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
3983 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3984 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3985 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
3987 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3988 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3990 (IntId (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))]>,
3994 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3995 defm VPMOVSXBW : SS41I_binop_rm_int8<0x20, "vpmovsxbw", int_x86_sse41_pmovsxbw>,
3997 defm VPMOVSXWD : SS41I_binop_rm_int8<0x23, "vpmovsxwd", int_x86_sse41_pmovsxwd>,
3999 defm VPMOVSXDQ : SS41I_binop_rm_int8<0x25, "vpmovsxdq", int_x86_sse41_pmovsxdq>,
4001 defm VPMOVZXBW : SS41I_binop_rm_int8<0x30, "vpmovzxbw", int_x86_sse41_pmovzxbw>,
4003 defm VPMOVZXWD : SS41I_binop_rm_int8<0x33, "vpmovzxwd", int_x86_sse41_pmovzxwd>,
4005 defm VPMOVZXDQ : SS41I_binop_rm_int8<0x35, "vpmovzxdq", int_x86_sse41_pmovzxdq>,
4009 defm PMOVSXBW : SS41I_binop_rm_int8<0x20, "pmovsxbw", int_x86_sse41_pmovsxbw>;
4010 defm PMOVSXWD : SS41I_binop_rm_int8<0x23, "pmovsxwd", int_x86_sse41_pmovsxwd>;
4011 defm PMOVSXDQ : SS41I_binop_rm_int8<0x25, "pmovsxdq", int_x86_sse41_pmovsxdq>;
4012 defm PMOVZXBW : SS41I_binop_rm_int8<0x30, "pmovzxbw", int_x86_sse41_pmovzxbw>;
4013 defm PMOVZXWD : SS41I_binop_rm_int8<0x33, "pmovzxwd", int_x86_sse41_pmovzxwd>;
4014 defm PMOVZXDQ : SS41I_binop_rm_int8<0x35, "pmovzxdq", int_x86_sse41_pmovzxdq>;
4016 // Common patterns involving scalar load.
4017 def : Pat<(int_x86_sse41_pmovsxbw (vzmovl_v2i64 addr:$src)),
4018 (PMOVSXBWrm addr:$src)>, Requires<[HasSSE41]>;
4019 def : Pat<(int_x86_sse41_pmovsxbw (vzload_v2i64 addr:$src)),
4020 (PMOVSXBWrm addr:$src)>, Requires<[HasSSE41]>;
4022 def : Pat<(int_x86_sse41_pmovsxwd (vzmovl_v2i64 addr:$src)),
4023 (PMOVSXWDrm addr:$src)>, Requires<[HasSSE41]>;
4024 def : Pat<(int_x86_sse41_pmovsxwd (vzload_v2i64 addr:$src)),
4025 (PMOVSXWDrm addr:$src)>, Requires<[HasSSE41]>;
4027 def : Pat<(int_x86_sse41_pmovsxdq (vzmovl_v2i64 addr:$src)),
4028 (PMOVSXDQrm addr:$src)>, Requires<[HasSSE41]>;
4029 def : Pat<(int_x86_sse41_pmovsxdq (vzload_v2i64 addr:$src)),
4030 (PMOVSXDQrm addr:$src)>, Requires<[HasSSE41]>;
4032 def : Pat<(int_x86_sse41_pmovzxbw (vzmovl_v2i64 addr:$src)),
4033 (PMOVZXBWrm addr:$src)>, Requires<[HasSSE41]>;
4034 def : Pat<(int_x86_sse41_pmovzxbw (vzload_v2i64 addr:$src)),
4035 (PMOVZXBWrm addr:$src)>, Requires<[HasSSE41]>;
4037 def : Pat<(int_x86_sse41_pmovzxwd (vzmovl_v2i64 addr:$src)),
4038 (PMOVZXWDrm addr:$src)>, Requires<[HasSSE41]>;
4039 def : Pat<(int_x86_sse41_pmovzxwd (vzload_v2i64 addr:$src)),
4040 (PMOVZXWDrm addr:$src)>, Requires<[HasSSE41]>;
4042 def : Pat<(int_x86_sse41_pmovzxdq (vzmovl_v2i64 addr:$src)),
4043 (PMOVZXDQrm addr:$src)>, Requires<[HasSSE41]>;
4044 def : Pat<(int_x86_sse41_pmovzxdq (vzload_v2i64 addr:$src)),
4045 (PMOVZXDQrm addr:$src)>, Requires<[HasSSE41]>;
4048 multiclass SS41I_binop_rm_int4<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4049 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4050 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4051 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
4053 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
4054 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4056 (IntId (bitconvert (v4i32 (scalar_to_vector (loadi32 addr:$src))))))]>,
4060 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4061 defm VPMOVSXBD : SS41I_binop_rm_int4<0x21, "vpmovsxbd", int_x86_sse41_pmovsxbd>,
4063 defm VPMOVSXWQ : SS41I_binop_rm_int4<0x24, "vpmovsxwq", int_x86_sse41_pmovsxwq>,
4065 defm VPMOVZXBD : SS41I_binop_rm_int4<0x31, "vpmovzxbd", int_x86_sse41_pmovzxbd>,
4067 defm VPMOVZXWQ : SS41I_binop_rm_int4<0x34, "vpmovzxwq", int_x86_sse41_pmovzxwq>,
4071 defm PMOVSXBD : SS41I_binop_rm_int4<0x21, "pmovsxbd", int_x86_sse41_pmovsxbd>;
4072 defm PMOVSXWQ : SS41I_binop_rm_int4<0x24, "pmovsxwq", int_x86_sse41_pmovsxwq>;
4073 defm PMOVZXBD : SS41I_binop_rm_int4<0x31, "pmovzxbd", int_x86_sse41_pmovzxbd>;
4074 defm PMOVZXWQ : SS41I_binop_rm_int4<0x34, "pmovzxwq", int_x86_sse41_pmovzxwq>;
4076 // Common patterns involving scalar load
4077 def : Pat<(int_x86_sse41_pmovsxbd (vzmovl_v4i32 addr:$src)),
4078 (PMOVSXBDrm addr:$src)>, Requires<[HasSSE41]>;
4079 def : Pat<(int_x86_sse41_pmovsxwq (vzmovl_v4i32 addr:$src)),
4080 (PMOVSXWQrm addr:$src)>, Requires<[HasSSE41]>;
4082 def : Pat<(int_x86_sse41_pmovzxbd (vzmovl_v4i32 addr:$src)),
4083 (PMOVZXBDrm addr:$src)>, Requires<[HasSSE41]>;
4084 def : Pat<(int_x86_sse41_pmovzxwq (vzmovl_v4i32 addr:$src)),
4085 (PMOVZXWQrm addr:$src)>, Requires<[HasSSE41]>;
4088 multiclass SS41I_binop_rm_int2<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4089 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4090 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4091 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
4093 // Expecting a i16 load any extended to i32 value.
4094 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i16mem:$src),
4095 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4096 [(set VR128:$dst, (IntId (bitconvert
4097 (v4i32 (scalar_to_vector (loadi16_anyext addr:$src))))))]>,
4101 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4102 defm VPMOVSXBQ : SS41I_binop_rm_int2<0x22, "vpmovsxbq", int_x86_sse41_pmovsxbq>,
4104 defm VPMOVZXBQ : SS41I_binop_rm_int2<0x32, "vpmovzxbq", int_x86_sse41_pmovzxbq>,
4107 defm PMOVSXBQ : SS41I_binop_rm_int2<0x22, "pmovsxbq", int_x86_sse41_pmovsxbq>;
4108 defm PMOVZXBQ : SS41I_binop_rm_int2<0x32, "pmovzxbq", int_x86_sse41_pmovzxbq>;
4110 // Common patterns involving scalar load
4111 def : Pat<(int_x86_sse41_pmovsxbq
4112 (bitconvert (v4i32 (X86vzmovl
4113 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
4114 (PMOVSXBQrm addr:$src)>, Requires<[HasSSE41]>;
4116 def : Pat<(int_x86_sse41_pmovzxbq
4117 (bitconvert (v4i32 (X86vzmovl
4118 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
4119 (PMOVZXBQrm addr:$src)>, Requires<[HasSSE41]>;
4121 //===----------------------------------------------------------------------===//
4122 // SSE4.1 - Extract Instructions
4123 //===----------------------------------------------------------------------===//
4125 /// SS41I_binop_ext8 - SSE 4.1 extract 8 bits to 32 bit reg or 8 bit mem
4126 multiclass SS41I_extract8<bits<8> opc, string OpcodeStr> {
4127 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4128 (ins VR128:$src1, i32i8imm:$src2),
4129 !strconcat(OpcodeStr,
4130 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4131 [(set GR32:$dst, (X86pextrb (v16i8 VR128:$src1), imm:$src2))]>,
4133 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4134 (ins i8mem:$dst, VR128:$src1, i32i8imm:$src2),
4135 !strconcat(OpcodeStr,
4136 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4139 // There's an AssertZext in the way of writing the store pattern
4140 // (store (i8 (trunc (X86pextrb (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
4143 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4144 defm VPEXTRB : SS41I_extract8<0x14, "vpextrb">, VEX;
4145 def VPEXTRBrr64 : SS4AIi8<0x14, MRMDestReg, (outs GR64:$dst),
4146 (ins VR128:$src1, i32i8imm:$src2),
4147 "vpextrb\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>, OpSize, VEX;
4150 defm PEXTRB : SS41I_extract8<0x14, "pextrb">;
4153 /// SS41I_extract16 - SSE 4.1 extract 16 bits to memory destination
4154 multiclass SS41I_extract16<bits<8> opc, string OpcodeStr> {
4155 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4156 (ins i16mem:$dst, VR128:$src1, i32i8imm:$src2),
4157 !strconcat(OpcodeStr,
4158 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4161 // There's an AssertZext in the way of writing the store pattern
4162 // (store (i16 (trunc (X86pextrw (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
4165 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4166 defm VPEXTRW : SS41I_extract16<0x15, "vpextrw">, VEX;
4168 defm PEXTRW : SS41I_extract16<0x15, "pextrw">;
4171 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
4172 multiclass SS41I_extract32<bits<8> opc, string OpcodeStr> {
4173 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4174 (ins VR128:$src1, i32i8imm:$src2),
4175 !strconcat(OpcodeStr,
4176 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4178 (extractelt (v4i32 VR128:$src1), imm:$src2))]>, OpSize;
4179 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4180 (ins i32mem:$dst, VR128:$src1, i32i8imm:$src2),
4181 !strconcat(OpcodeStr,
4182 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4183 [(store (extractelt (v4i32 VR128:$src1), imm:$src2),
4184 addr:$dst)]>, OpSize;
4187 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4188 defm VPEXTRD : SS41I_extract32<0x16, "vpextrd">, VEX;
4190 defm PEXTRD : SS41I_extract32<0x16, "pextrd">;
4192 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
4193 multiclass SS41I_extract64<bits<8> opc, string OpcodeStr> {
4194 def rr : SS4AIi8<opc, MRMDestReg, (outs GR64:$dst),
4195 (ins VR128:$src1, i32i8imm:$src2),
4196 !strconcat(OpcodeStr,
4197 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4199 (extractelt (v2i64 VR128:$src1), imm:$src2))]>, OpSize, REX_W;
4200 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4201 (ins i64mem:$dst, VR128:$src1, i32i8imm:$src2),
4202 !strconcat(OpcodeStr,
4203 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4204 [(store (extractelt (v2i64 VR128:$src1), imm:$src2),
4205 addr:$dst)]>, OpSize, REX_W;
4208 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4209 defm VPEXTRQ : SS41I_extract64<0x16, "vpextrq">, VEX, VEX_W;
4211 defm PEXTRQ : SS41I_extract64<0x16, "pextrq">;
4213 /// SS41I_extractf32 - SSE 4.1 extract 32 bits fp value to int reg or memory
4215 multiclass SS41I_extractf32<bits<8> opc, string OpcodeStr> {
4216 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4217 (ins VR128:$src1, i32i8imm:$src2),
4218 !strconcat(OpcodeStr,
4219 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4221 (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2))]>,
4223 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4224 (ins f32mem:$dst, VR128:$src1, i32i8imm:$src2),
4225 !strconcat(OpcodeStr,
4226 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4227 [(store (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2),
4228 addr:$dst)]>, OpSize;
4231 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4232 defm VEXTRACTPS : SS41I_extractf32<0x17, "vextractps">, VEX;
4233 def VEXTRACTPSrr64 : SS4AIi8<0x17, MRMDestReg, (outs GR64:$dst),
4234 (ins VR128:$src1, i32i8imm:$src2),
4235 "vextractps \t{$src2, $src1, $dst|$dst, $src1, $src2}",
4238 defm EXTRACTPS : SS41I_extractf32<0x17, "extractps">;
4240 // Also match an EXTRACTPS store when the store is done as f32 instead of i32.
4241 def : Pat<(store (f32 (bitconvert (extractelt (bc_v4i32 (v4f32 VR128:$src1)),
4244 (EXTRACTPSmr addr:$dst, VR128:$src1, imm:$src2)>,
4245 Requires<[HasSSE41]>;
4247 //===----------------------------------------------------------------------===//
4248 // SSE4.1 - Insert Instructions
4249 //===----------------------------------------------------------------------===//
4251 multiclass SS41I_insert8<bits<8> opc, string asm, bit Is2Addr = 1> {
4252 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4253 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
4255 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4257 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4259 (X86pinsrb VR128:$src1, GR32:$src2, imm:$src3))]>, OpSize;
4260 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4261 (ins VR128:$src1, i8mem:$src2, i32i8imm:$src3),
4263 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4265 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4267 (X86pinsrb VR128:$src1, (extloadi8 addr:$src2),
4268 imm:$src3))]>, OpSize;
4271 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4272 defm VPINSRB : SS41I_insert8<0x20, "vpinsrb", 0>, VEX_4V;
4273 let Constraints = "$src1 = $dst" in
4274 defm PINSRB : SS41I_insert8<0x20, "pinsrb">;
4276 multiclass SS41I_insert32<bits<8> opc, string asm, bit Is2Addr = 1> {
4277 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4278 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
4280 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4282 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4284 (v4i32 (insertelt VR128:$src1, GR32:$src2, imm:$src3)))]>,
4286 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4287 (ins VR128:$src1, i32mem:$src2, i32i8imm:$src3),
4289 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4291 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4293 (v4i32 (insertelt VR128:$src1, (loadi32 addr:$src2),
4294 imm:$src3)))]>, OpSize;
4297 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4298 defm VPINSRD : SS41I_insert32<0x22, "vpinsrd", 0>, VEX_4V;
4299 let Constraints = "$src1 = $dst" in
4300 defm PINSRD : SS41I_insert32<0x22, "pinsrd">;
4302 multiclass SS41I_insert64<bits<8> opc, string asm, bit Is2Addr = 1> {
4303 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4304 (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
4306 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4308 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4310 (v2i64 (insertelt VR128:$src1, GR64:$src2, imm:$src3)))]>,
4312 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4313 (ins VR128:$src1, i64mem:$src2, i32i8imm:$src3),
4315 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4317 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4319 (v2i64 (insertelt VR128:$src1, (loadi64 addr:$src2),
4320 imm:$src3)))]>, OpSize;
4323 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4324 defm VPINSRQ : SS41I_insert64<0x22, "vpinsrq", 0>, VEX_4V, VEX_W;
4325 let Constraints = "$src1 = $dst" in
4326 defm PINSRQ : SS41I_insert64<0x22, "pinsrq">, REX_W;
4328 // insertps has a few different modes, there's the first two here below which
4329 // are optimized inserts that won't zero arbitrary elements in the destination
4330 // vector. The next one matches the intrinsic and could zero arbitrary elements
4331 // in the target vector.
4332 multiclass SS41I_insertf32<bits<8> opc, string asm, bit Is2Addr = 1> {
4333 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4334 (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4336 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4338 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4340 (X86insrtps VR128:$src1, VR128:$src2, imm:$src3))]>,
4342 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4343 (ins VR128:$src1, f32mem:$src2, i32i8imm:$src3),
4345 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4347 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4349 (X86insrtps VR128:$src1,
4350 (v4f32 (scalar_to_vector (loadf32 addr:$src2))),
4351 imm:$src3))]>, OpSize;
4354 let Constraints = "$src1 = $dst" in
4355 defm INSERTPS : SS41I_insertf32<0x21, "insertps">;
4356 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4357 defm VINSERTPS : SS41I_insertf32<0x21, "vinsertps", 0>, VEX_4V;
4359 def : Pat<(int_x86_sse41_insertps VR128:$src1, VR128:$src2, imm:$src3),
4360 (VINSERTPSrr VR128:$src1, VR128:$src2, imm:$src3)>,
4362 def : Pat<(int_x86_sse41_insertps VR128:$src1, VR128:$src2, imm:$src3),
4363 (INSERTPSrr VR128:$src1, VR128:$src2, imm:$src3)>,
4364 Requires<[HasSSE41]>;
4366 //===----------------------------------------------------------------------===//
4367 // SSE4.1 - Round Instructions
4368 //===----------------------------------------------------------------------===//
4370 multiclass sse41_fp_unop_rm<bits<8> opcps, bits<8> opcpd, string OpcodeStr,
4371 X86MemOperand x86memop, RegisterClass RC,
4372 PatFrag mem_frag32, PatFrag mem_frag64,
4373 Intrinsic V4F32Int, Intrinsic V2F64Int> {
4374 // Intrinsic operation, reg.
4375 // Vector intrinsic operation, reg
4376 def PSr_Int : SS4AIi8<opcps, MRMSrcReg,
4377 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4378 !strconcat(OpcodeStr,
4379 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4380 [(set RC:$dst, (V4F32Int RC:$src1, imm:$src2))]>,
4383 // Vector intrinsic operation, mem
4384 def PSm_Int : Ii8<opcps, MRMSrcMem,
4385 (outs RC:$dst), (ins f256mem:$src1, i32i8imm:$src2),
4386 !strconcat(OpcodeStr,
4387 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4389 (V4F32Int (mem_frag32 addr:$src1),imm:$src2))]>,
4391 Requires<[HasSSE41]>;
4393 // Vector intrinsic operation, reg
4394 def PDr_Int : SS4AIi8<opcpd, MRMSrcReg,
4395 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4396 !strconcat(OpcodeStr,
4397 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4398 [(set RC:$dst, (V2F64Int RC:$src1, imm:$src2))]>,
4401 // Vector intrinsic operation, mem
4402 def PDm_Int : SS4AIi8<opcpd, MRMSrcMem,
4403 (outs RC:$dst), (ins f256mem:$src1, i32i8imm:$src2),
4404 !strconcat(OpcodeStr,
4405 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4407 (V2F64Int (mem_frag64 addr:$src1),imm:$src2))]>,
4411 multiclass sse41_fp_unop_rm_avx_p<bits<8> opcps, bits<8> opcpd,
4412 RegisterClass RC, X86MemOperand x86memop, string OpcodeStr> {
4413 // Intrinsic operation, reg.
4414 // Vector intrinsic operation, reg
4415 def PSr : SS4AIi8<opcps, MRMSrcReg,
4416 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4417 !strconcat(OpcodeStr,
4418 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4421 // Vector intrinsic operation, mem
4422 def PSm : Ii8<opcps, MRMSrcMem,
4423 (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
4424 !strconcat(OpcodeStr,
4425 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4426 []>, TA, OpSize, Requires<[HasSSE41]>;
4428 // Vector intrinsic operation, reg
4429 def PDr : SS4AIi8<opcpd, MRMSrcReg,
4430 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4431 !strconcat(OpcodeStr,
4432 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4435 // Vector intrinsic operation, mem
4436 def PDm : SS4AIi8<opcpd, MRMSrcMem,
4437 (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
4438 !strconcat(OpcodeStr,
4439 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4443 multiclass sse41_fp_binop_rm<bits<8> opcss, bits<8> opcsd,
4446 Intrinsic F64Int, bit Is2Addr = 1> {
4447 // Intrinsic operation, reg.
4448 def SSr_Int : SS4AIi8<opcss, MRMSrcReg,
4449 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4451 !strconcat(OpcodeStr,
4452 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4453 !strconcat(OpcodeStr,
4454 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4455 [(set VR128:$dst, (F32Int VR128:$src1, VR128:$src2, imm:$src3))]>,
4458 // Intrinsic operation, mem.
4459 def SSm_Int : SS4AIi8<opcss, MRMSrcMem,
4460 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
4462 !strconcat(OpcodeStr,
4463 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4464 !strconcat(OpcodeStr,
4465 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4467 (F32Int VR128:$src1, sse_load_f32:$src2, imm:$src3))]>,
4470 // Intrinsic operation, reg.
4471 def SDr_Int : SS4AIi8<opcsd, MRMSrcReg,
4472 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4474 !strconcat(OpcodeStr,
4475 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4476 !strconcat(OpcodeStr,
4477 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4478 [(set VR128:$dst, (F64Int VR128:$src1, VR128:$src2, imm:$src3))]>,
4481 // Intrinsic operation, mem.
4482 def SDm_Int : SS4AIi8<opcsd, MRMSrcMem,
4483 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
4485 !strconcat(OpcodeStr,
4486 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4487 !strconcat(OpcodeStr,
4488 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4490 (F64Int VR128:$src1, sse_load_f64:$src2, imm:$src3))]>,
4494 multiclass sse41_fp_binop_rm_avx_s<bits<8> opcss, bits<8> opcsd,
4496 // Intrinsic operation, reg.
4497 def SSr : SS4AIi8<opcss, MRMSrcReg,
4498 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4499 !strconcat(OpcodeStr,
4500 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4503 // Intrinsic operation, mem.
4504 def SSm : SS4AIi8<opcss, MRMSrcMem,
4505 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
4506 !strconcat(OpcodeStr,
4507 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4510 // Intrinsic operation, reg.
4511 def SDr : SS4AIi8<opcsd, MRMSrcReg,
4512 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4513 !strconcat(OpcodeStr,
4514 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4517 // Intrinsic operation, mem.
4518 def SDm : SS4AIi8<opcsd, MRMSrcMem,
4519 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
4520 !strconcat(OpcodeStr,
4521 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4525 // FP round - roundss, roundps, roundsd, roundpd
4526 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4528 defm VROUND : sse41_fp_unop_rm<0x08, 0x09, "vround", f128mem, VR128,
4529 memopv4f32, memopv2f64,
4530 int_x86_sse41_round_ps,
4531 int_x86_sse41_round_pd>, VEX;
4532 defm VROUNDY : sse41_fp_unop_rm<0x08, 0x09, "vround", f256mem, VR256,
4533 memopv8f32, memopv4f64,
4534 int_x86_avx_round_ps_256,
4535 int_x86_avx_round_pd_256>, VEX;
4536 defm VROUND : sse41_fp_binop_rm<0x0A, 0x0B, "vround",
4537 int_x86_sse41_round_ss,
4538 int_x86_sse41_round_sd, 0>, VEX_4V;
4540 // Instructions for the assembler
4541 defm VROUND : sse41_fp_unop_rm_avx_p<0x08, 0x09, VR128, f128mem, "vround">,
4543 defm VROUNDY : sse41_fp_unop_rm_avx_p<0x08, 0x09, VR256, f256mem, "vround">,
4545 defm VROUND : sse41_fp_binop_rm_avx_s<0x0A, 0x0B, "vround">, VEX_4V;
4548 defm ROUND : sse41_fp_unop_rm<0x08, 0x09, "round", f128mem, VR128,
4549 memopv4f32, memopv2f64,
4550 int_x86_sse41_round_ps, int_x86_sse41_round_pd>;
4551 let Constraints = "$src1 = $dst" in
4552 defm ROUND : sse41_fp_binop_rm<0x0A, 0x0B, "round",
4553 int_x86_sse41_round_ss, int_x86_sse41_round_sd>;
4555 //===----------------------------------------------------------------------===//
4556 // SSE4.1 - Packed Bit Test
4557 //===----------------------------------------------------------------------===//
4559 // ptest instruction we'll lower to this in X86ISelLowering primarily from
4560 // the intel intrinsic that corresponds to this.
4561 let Defs = [EFLAGS], isAsmParserOnly = 1, Predicates = [HasAVX] in {
4562 def VPTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
4563 "vptest\t{$src2, $src1|$src1, $src2}",
4564 [(set EFLAGS, (X86ptest VR128:$src1, (v4f32 VR128:$src2)))]>,
4566 def VPTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
4567 "vptest\t{$src2, $src1|$src1, $src2}",
4568 [(set EFLAGS,(X86ptest VR128:$src1, (memopv4f32 addr:$src2)))]>,
4571 def VPTESTYrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR256:$src1, VR256:$src2),
4572 "vptest\t{$src2, $src1|$src1, $src2}",
4573 [(set EFLAGS, (X86ptest VR256:$src1, (v4i64 VR256:$src2)))]>,
4575 def VPTESTYrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR256:$src1, i256mem:$src2),
4576 "vptest\t{$src2, $src1|$src1, $src2}",
4577 [(set EFLAGS,(X86ptest VR256:$src1, (memopv4i64 addr:$src2)))]>,
4581 let Defs = [EFLAGS] in {
4582 def PTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
4583 "ptest \t{$src2, $src1|$src1, $src2}",
4584 [(set EFLAGS, (X86ptest VR128:$src1, (v4f32 VR128:$src2)))]>,
4586 def PTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
4587 "ptest \t{$src2, $src1|$src1, $src2}",
4588 [(set EFLAGS, (X86ptest VR128:$src1, (memopv4f32 addr:$src2)))]>,
4592 // The bit test instructions below are AVX only
4593 multiclass avx_bittest<bits<8> opc, string OpcodeStr, RegisterClass RC,
4594 X86MemOperand x86memop, PatFrag mem_frag, ValueType vt> {
4595 def rr : SS48I<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
4596 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
4597 [(set EFLAGS, (X86testp RC:$src1, (vt RC:$src2)))]>, OpSize, VEX;
4598 def rm : SS48I<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
4599 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
4600 [(set EFLAGS, (X86testp RC:$src1, (mem_frag addr:$src2)))]>,
4604 let Defs = [EFLAGS], isAsmParserOnly = 1, Predicates = [HasAVX] in {
4605 defm VTESTPS : avx_bittest<0x0E, "vtestps", VR128, f128mem, memopv4f32, v4f32>;
4606 defm VTESTPSY : avx_bittest<0x0E, "vtestps", VR256, f256mem, memopv8f32, v8f32>;
4607 defm VTESTPD : avx_bittest<0x0F, "vtestpd", VR128, f128mem, memopv2f64, v2f64>;
4608 defm VTESTPDY : avx_bittest<0x0F, "vtestpd", VR256, f256mem, memopv4f64, v4f64>;
4611 //===----------------------------------------------------------------------===//
4612 // SSE4.1 - Misc Instructions
4613 //===----------------------------------------------------------------------===//
4615 // SS41I_unop_rm_int_v16 - SSE 4.1 unary operator whose type is v8i16.
4616 multiclass SS41I_unop_rm_int_v16<bits<8> opc, string OpcodeStr,
4617 Intrinsic IntId128> {
4618 def rr128 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4620 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4621 [(set VR128:$dst, (IntId128 VR128:$src))]>, OpSize;
4622 def rm128 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4624 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4627 (bitconvert (memopv8i16 addr:$src))))]>, OpSize;
4630 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4631 defm VPHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "vphminposuw",
4632 int_x86_sse41_phminposuw>, VEX;
4633 defm PHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "phminposuw",
4634 int_x86_sse41_phminposuw>;
4636 /// SS41I_binop_rm_int - Simple SSE 4.1 binary operator
4637 multiclass SS41I_binop_rm_int<bits<8> opc, string OpcodeStr,
4638 Intrinsic IntId128, bit Is2Addr = 1> {
4639 let isCommutable = 1 in
4640 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4641 (ins VR128:$src1, VR128:$src2),
4643 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4644 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4645 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>, OpSize;
4646 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4647 (ins VR128:$src1, i128mem:$src2),
4649 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4650 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4652 (IntId128 VR128:$src1,
4653 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
4656 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4657 let isCommutable = 0 in
4658 defm VPACKUSDW : SS41I_binop_rm_int<0x2B, "vpackusdw", int_x86_sse41_packusdw,
4660 defm VPCMPEQQ : SS41I_binop_rm_int<0x29, "vpcmpeqq", int_x86_sse41_pcmpeqq,
4662 defm VPMINSB : SS41I_binop_rm_int<0x38, "vpminsb", int_x86_sse41_pminsb,
4664 defm VPMINSD : SS41I_binop_rm_int<0x39, "vpminsd", int_x86_sse41_pminsd,
4666 defm VPMINUD : SS41I_binop_rm_int<0x3B, "vpminud", int_x86_sse41_pminud,
4668 defm VPMINUW : SS41I_binop_rm_int<0x3A, "vpminuw", int_x86_sse41_pminuw,
4670 defm VPMAXSB : SS41I_binop_rm_int<0x3C, "vpmaxsb", int_x86_sse41_pmaxsb,
4672 defm VPMAXSD : SS41I_binop_rm_int<0x3D, "vpmaxsd", int_x86_sse41_pmaxsd,
4674 defm VPMAXUD : SS41I_binop_rm_int<0x3F, "vpmaxud", int_x86_sse41_pmaxud,
4676 defm VPMAXUW : SS41I_binop_rm_int<0x3E, "vpmaxuw", int_x86_sse41_pmaxuw,
4678 defm VPMULDQ : SS41I_binop_rm_int<0x28, "vpmuldq", int_x86_sse41_pmuldq,
4682 let Constraints = "$src1 = $dst" in {
4683 let isCommutable = 0 in
4684 defm PACKUSDW : SS41I_binop_rm_int<0x2B, "packusdw", int_x86_sse41_packusdw>;
4685 defm PCMPEQQ : SS41I_binop_rm_int<0x29, "pcmpeqq", int_x86_sse41_pcmpeqq>;
4686 defm PMINSB : SS41I_binop_rm_int<0x38, "pminsb", int_x86_sse41_pminsb>;
4687 defm PMINSD : SS41I_binop_rm_int<0x39, "pminsd", int_x86_sse41_pminsd>;
4688 defm PMINUD : SS41I_binop_rm_int<0x3B, "pminud", int_x86_sse41_pminud>;
4689 defm PMINUW : SS41I_binop_rm_int<0x3A, "pminuw", int_x86_sse41_pminuw>;
4690 defm PMAXSB : SS41I_binop_rm_int<0x3C, "pmaxsb", int_x86_sse41_pmaxsb>;
4691 defm PMAXSD : SS41I_binop_rm_int<0x3D, "pmaxsd", int_x86_sse41_pmaxsd>;
4692 defm PMAXUD : SS41I_binop_rm_int<0x3F, "pmaxud", int_x86_sse41_pmaxud>;
4693 defm PMAXUW : SS41I_binop_rm_int<0x3E, "pmaxuw", int_x86_sse41_pmaxuw>;
4694 defm PMULDQ : SS41I_binop_rm_int<0x28, "pmuldq", int_x86_sse41_pmuldq>;
4697 def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, VR128:$src2)),
4698 (PCMPEQQrr VR128:$src1, VR128:$src2)>;
4699 def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, (memop addr:$src2))),
4700 (PCMPEQQrm VR128:$src1, addr:$src2)>;
4702 /// SS48I_binop_rm - Simple SSE41 binary operator.
4703 multiclass SS48I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
4704 ValueType OpVT, bit Is2Addr = 1> {
4705 let isCommutable = 1 in
4706 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4707 (ins VR128:$src1, VR128:$src2),
4709 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4710 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4711 [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]>,
4713 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4714 (ins VR128:$src1, i128mem:$src2),
4716 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4717 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4718 [(set VR128:$dst, (OpNode VR128:$src1,
4719 (bc_v4i32 (memopv2i64 addr:$src2))))]>,
4723 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4724 defm VPMULLD : SS48I_binop_rm<0x40, "vpmulld", mul, v4i32, 0>, VEX_4V;
4725 let Constraints = "$src1 = $dst" in
4726 defm PMULLD : SS48I_binop_rm<0x40, "pmulld", mul, v4i32>;
4728 /// SS41I_binop_rmi_int - SSE 4.1 binary operator with 8-bit immediate
4729 multiclass SS41I_binop_rmi_int<bits<8> opc, string OpcodeStr,
4730 Intrinsic IntId, RegisterClass RC, PatFrag memop_frag,
4731 X86MemOperand x86memop, bit Is2Addr = 1> {
4732 let isCommutable = 1 in
4733 def rri : SS4AIi8<opc, MRMSrcReg, (outs RC:$dst),
4734 (ins RC:$src1, RC:$src2, i32i8imm:$src3),
4736 !strconcat(OpcodeStr,
4737 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4738 !strconcat(OpcodeStr,
4739 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4740 [(set RC:$dst, (IntId RC:$src1, RC:$src2, imm:$src3))]>,
4742 def rmi : SS4AIi8<opc, MRMSrcMem, (outs RC:$dst),
4743 (ins RC:$src1, x86memop:$src2, i32i8imm:$src3),
4745 !strconcat(OpcodeStr,
4746 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4747 !strconcat(OpcodeStr,
4748 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4751 (bitconvert (memop_frag addr:$src2)), imm:$src3))]>,
4755 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4756 let isCommutable = 0 in {
4757 defm VBLENDPS : SS41I_binop_rmi_int<0x0C, "vblendps", int_x86_sse41_blendps,
4758 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4759 defm VBLENDPD : SS41I_binop_rmi_int<0x0D, "vblendpd", int_x86_sse41_blendpd,
4760 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4761 defm VBLENDPSY : SS41I_binop_rmi_int<0x0C, "vblendps",
4762 int_x86_avx_blend_ps_256, VR256, memopv32i8, i256mem, 0>, VEX_4V;
4763 defm VBLENDPDY : SS41I_binop_rmi_int<0x0D, "vblendpd",
4764 int_x86_avx_blend_pd_256, VR256, memopv32i8, i256mem, 0>, VEX_4V;
4765 defm VPBLENDW : SS41I_binop_rmi_int<0x0E, "vpblendw", int_x86_sse41_pblendw,
4766 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4767 defm VMPSADBW : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_sse41_mpsadbw,
4768 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4770 defm VDPPS : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_sse41_dpps,
4771 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4772 defm VDPPD : SS41I_binop_rmi_int<0x41, "vdppd", int_x86_sse41_dppd,
4773 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4774 defm VDPPSY : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_avx_dp_ps_256,
4775 VR256, memopv32i8, i256mem, 0>, VEX_4V;
4778 let Constraints = "$src1 = $dst" in {
4779 let isCommutable = 0 in {
4780 defm BLENDPS : SS41I_binop_rmi_int<0x0C, "blendps", int_x86_sse41_blendps,
4781 VR128, memopv16i8, i128mem>;
4782 defm BLENDPD : SS41I_binop_rmi_int<0x0D, "blendpd", int_x86_sse41_blendpd,
4783 VR128, memopv16i8, i128mem>;
4784 defm PBLENDW : SS41I_binop_rmi_int<0x0E, "pblendw", int_x86_sse41_pblendw,
4785 VR128, memopv16i8, i128mem>;
4786 defm MPSADBW : SS41I_binop_rmi_int<0x42, "mpsadbw", int_x86_sse41_mpsadbw,
4787 VR128, memopv16i8, i128mem>;
4789 defm DPPS : SS41I_binop_rmi_int<0x40, "dpps", int_x86_sse41_dpps,
4790 VR128, memopv16i8, i128mem>;
4791 defm DPPD : SS41I_binop_rmi_int<0x41, "dppd", int_x86_sse41_dppd,
4792 VR128, memopv16i8, i128mem>;
4795 /// SS41I_quaternary_int_avx - AVX SSE 4.1 with 4 operators
4796 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4797 multiclass SS41I_quaternary_int_avx<bits<8> opc, string OpcodeStr,
4798 RegisterClass RC, X86MemOperand x86memop,
4799 PatFrag mem_frag, Intrinsic IntId> {
4800 def rr : I<opc, MRMSrcReg, (outs RC:$dst),
4801 (ins RC:$src1, RC:$src2, RC:$src3),
4802 !strconcat(OpcodeStr,
4803 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4804 [(set RC:$dst, (IntId RC:$src1, RC:$src2, RC:$src3))],
4805 SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
4807 def rm : I<opc, MRMSrcMem, (outs RC:$dst),
4808 (ins RC:$src1, x86memop:$src2, RC:$src3),
4809 !strconcat(OpcodeStr,
4810 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4812 (IntId RC:$src1, (bitconvert (mem_frag addr:$src2)),
4814 SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
4818 defm VBLENDVPD : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR128, i128mem,
4819 memopv16i8, int_x86_sse41_blendvpd>;
4820 defm VBLENDVPS : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR128, i128mem,
4821 memopv16i8, int_x86_sse41_blendvps>;
4822 defm VPBLENDVB : SS41I_quaternary_int_avx<0x4C, "vpblendvb", VR128, i128mem,
4823 memopv16i8, int_x86_sse41_pblendvb>;
4824 defm VBLENDVPDY : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR256, i256mem,
4825 memopv32i8, int_x86_avx_blendv_pd_256>;
4826 defm VBLENDVPSY : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR256, i256mem,
4827 memopv32i8, int_x86_avx_blendv_ps_256>;
4829 /// SS41I_ternary_int - SSE 4.1 ternary operator
4830 let Uses = [XMM0], Constraints = "$src1 = $dst" in {
4831 multiclass SS41I_ternary_int<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4832 def rr0 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4833 (ins VR128:$src1, VR128:$src2),
4834 !strconcat(OpcodeStr,
4835 "\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}"),
4836 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2, XMM0))]>,
4839 def rm0 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4840 (ins VR128:$src1, i128mem:$src2),
4841 !strconcat(OpcodeStr,
4842 "\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}"),
4845 (bitconvert (memopv16i8 addr:$src2)), XMM0))]>, OpSize;
4849 defm BLENDVPD : SS41I_ternary_int<0x15, "blendvpd", int_x86_sse41_blendvpd>;
4850 defm BLENDVPS : SS41I_ternary_int<0x14, "blendvps", int_x86_sse41_blendvps>;
4851 defm PBLENDVB : SS41I_ternary_int<0x10, "pblendvb", int_x86_sse41_pblendvb>;
4853 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4854 def VMOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4855 "vmovntdqa\t{$src, $dst|$dst, $src}",
4856 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
4858 def MOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4859 "movntdqa\t{$src, $dst|$dst, $src}",
4860 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
4863 //===----------------------------------------------------------------------===//
4864 // SSE4.2 - Compare Instructions
4865 //===----------------------------------------------------------------------===//
4867 /// SS42I_binop_rm_int - Simple SSE 4.2 binary operator
4868 multiclass SS42I_binop_rm_int<bits<8> opc, string OpcodeStr,
4869 Intrinsic IntId128, bit Is2Addr = 1> {
4870 def rr : SS428I<opc, MRMSrcReg, (outs VR128:$dst),
4871 (ins VR128:$src1, VR128:$src2),
4873 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4874 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4875 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
4877 def rm : SS428I<opc, MRMSrcMem, (outs VR128:$dst),
4878 (ins VR128:$src1, i128mem:$src2),
4880 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4881 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4883 (IntId128 VR128:$src1,
4884 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
4887 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4888 defm VPCMPGTQ : SS42I_binop_rm_int<0x37, "vpcmpgtq", int_x86_sse42_pcmpgtq,
4890 let Constraints = "$src1 = $dst" in
4891 defm PCMPGTQ : SS42I_binop_rm_int<0x37, "pcmpgtq", int_x86_sse42_pcmpgtq>;
4893 def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, VR128:$src2)),
4894 (PCMPGTQrr VR128:$src1, VR128:$src2)>;
4895 def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, (memop addr:$src2))),
4896 (PCMPGTQrm VR128:$src1, addr:$src2)>;
4898 //===----------------------------------------------------------------------===//
4899 // SSE4.2 - String/text Processing Instructions
4900 //===----------------------------------------------------------------------===//
4902 // Packed Compare Implicit Length Strings, Return Mask
4903 multiclass pseudo_pcmpistrm<string asm> {
4904 def REG : Ii8<0, Pseudo, (outs VR128:$dst),
4905 (ins VR128:$src1, VR128:$src2, i8imm:$src3), !strconcat(asm, "rr PSEUDO"),
4906 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128 VR128:$src1, VR128:$src2,
4908 def MEM : Ii8<0, Pseudo, (outs VR128:$dst),
4909 (ins VR128:$src1, i128mem:$src2, i8imm:$src3), !strconcat(asm, "rm PSEUDO"),
4910 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128
4911 VR128:$src1, (load addr:$src2), imm:$src3))]>;
4914 let Defs = [EFLAGS], usesCustomInserter = 1 in {
4915 defm PCMPISTRM128 : pseudo_pcmpistrm<"#PCMPISTRM128">, Requires<[HasSSE42]>;
4916 defm VPCMPISTRM128 : pseudo_pcmpistrm<"#VPCMPISTRM128">, Requires<[HasAVX]>;
4919 let Defs = [XMM0, EFLAGS], isAsmParserOnly = 1,
4920 Predicates = [HasAVX] in {
4921 def VPCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
4922 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
4923 "vpcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize, VEX;
4924 def VPCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
4925 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
4926 "vpcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize, VEX;
4929 let Defs = [XMM0, EFLAGS] in {
4930 def PCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
4931 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
4932 "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
4933 def PCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
4934 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
4935 "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
4938 // Packed Compare Explicit Length Strings, Return Mask
4939 multiclass pseudo_pcmpestrm<string asm> {
4940 def REG : Ii8<0, Pseudo, (outs VR128:$dst),
4941 (ins VR128:$src1, VR128:$src3, i8imm:$src5), !strconcat(asm, "rr PSEUDO"),
4942 [(set VR128:$dst, (int_x86_sse42_pcmpestrm128
4943 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5))]>;
4944 def MEM : Ii8<0, Pseudo, (outs VR128:$dst),
4945 (ins VR128:$src1, i128mem:$src3, i8imm:$src5), !strconcat(asm, "rm PSEUDO"),
4946 [(set VR128:$dst, (int_x86_sse42_pcmpestrm128
4947 VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5))]>;
4950 let Defs = [EFLAGS], Uses = [EAX, EDX], usesCustomInserter = 1 in {
4951 defm PCMPESTRM128 : pseudo_pcmpestrm<"#PCMPESTRM128">, Requires<[HasSSE42]>;
4952 defm VPCMPESTRM128 : pseudo_pcmpestrm<"#VPCMPESTRM128">, Requires<[HasAVX]>;
4955 let isAsmParserOnly = 1, Predicates = [HasAVX],
4956 Defs = [XMM0, EFLAGS], Uses = [EAX, EDX] in {
4957 def VPCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
4958 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
4959 "vpcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize, VEX;
4960 def VPCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
4961 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
4962 "vpcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize, VEX;
4965 let Defs = [XMM0, EFLAGS], Uses = [EAX, EDX] in {
4966 def PCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
4967 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
4968 "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
4969 def PCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
4970 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
4971 "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
4974 // Packed Compare Implicit Length Strings, Return Index
4975 let Defs = [ECX, EFLAGS] in {
4976 multiclass SS42AI_pcmpistri<Intrinsic IntId128, string asm = "pcmpistri"> {
4977 def rr : SS42AI<0x63, MRMSrcReg, (outs),
4978 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
4979 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
4980 [(set ECX, (IntId128 VR128:$src1, VR128:$src2, imm:$src3)),
4981 (implicit EFLAGS)]>, OpSize;
4982 def rm : SS42AI<0x63, MRMSrcMem, (outs),
4983 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
4984 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
4985 [(set ECX, (IntId128 VR128:$src1, (load addr:$src2), imm:$src3)),
4986 (implicit EFLAGS)]>, OpSize;
4990 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4991 defm VPCMPISTRI : SS42AI_pcmpistri<int_x86_sse42_pcmpistri128, "vpcmpistri">,
4993 defm VPCMPISTRIA : SS42AI_pcmpistri<int_x86_sse42_pcmpistria128, "vpcmpistri">,
4995 defm VPCMPISTRIC : SS42AI_pcmpistri<int_x86_sse42_pcmpistric128, "vpcmpistri">,
4997 defm VPCMPISTRIO : SS42AI_pcmpistri<int_x86_sse42_pcmpistrio128, "vpcmpistri">,
4999 defm VPCMPISTRIS : SS42AI_pcmpistri<int_x86_sse42_pcmpistris128, "vpcmpistri">,
5001 defm VPCMPISTRIZ : SS42AI_pcmpistri<int_x86_sse42_pcmpistriz128, "vpcmpistri">,
5005 defm PCMPISTRI : SS42AI_pcmpistri<int_x86_sse42_pcmpistri128>;
5006 defm PCMPISTRIA : SS42AI_pcmpistri<int_x86_sse42_pcmpistria128>;
5007 defm PCMPISTRIC : SS42AI_pcmpistri<int_x86_sse42_pcmpistric128>;
5008 defm PCMPISTRIO : SS42AI_pcmpistri<int_x86_sse42_pcmpistrio128>;
5009 defm PCMPISTRIS : SS42AI_pcmpistri<int_x86_sse42_pcmpistris128>;
5010 defm PCMPISTRIZ : SS42AI_pcmpistri<int_x86_sse42_pcmpistriz128>;
5012 // Packed Compare Explicit Length Strings, Return Index
5013 let Defs = [ECX, EFLAGS], Uses = [EAX, EDX] in {
5014 multiclass SS42AI_pcmpestri<Intrinsic IntId128, string asm = "pcmpestri"> {
5015 def rr : SS42AI<0x61, MRMSrcReg, (outs),
5016 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
5017 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
5018 [(set ECX, (IntId128 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5)),
5019 (implicit EFLAGS)]>, OpSize;
5020 def rm : SS42AI<0x61, MRMSrcMem, (outs),
5021 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
5022 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
5024 (IntId128 VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5)),
5025 (implicit EFLAGS)]>, OpSize;
5029 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
5030 defm VPCMPESTRI : SS42AI_pcmpestri<int_x86_sse42_pcmpestri128, "vpcmpestri">,
5032 defm VPCMPESTRIA : SS42AI_pcmpestri<int_x86_sse42_pcmpestria128, "vpcmpestri">,
5034 defm VPCMPESTRIC : SS42AI_pcmpestri<int_x86_sse42_pcmpestric128, "vpcmpestri">,
5036 defm VPCMPESTRIO : SS42AI_pcmpestri<int_x86_sse42_pcmpestrio128, "vpcmpestri">,
5038 defm VPCMPESTRIS : SS42AI_pcmpestri<int_x86_sse42_pcmpestris128, "vpcmpestri">,
5040 defm VPCMPESTRIZ : SS42AI_pcmpestri<int_x86_sse42_pcmpestriz128, "vpcmpestri">,
5044 defm PCMPESTRI : SS42AI_pcmpestri<int_x86_sse42_pcmpestri128>;
5045 defm PCMPESTRIA : SS42AI_pcmpestri<int_x86_sse42_pcmpestria128>;
5046 defm PCMPESTRIC : SS42AI_pcmpestri<int_x86_sse42_pcmpestric128>;
5047 defm PCMPESTRIO : SS42AI_pcmpestri<int_x86_sse42_pcmpestrio128>;
5048 defm PCMPESTRIS : SS42AI_pcmpestri<int_x86_sse42_pcmpestris128>;
5049 defm PCMPESTRIZ : SS42AI_pcmpestri<int_x86_sse42_pcmpestriz128>;
5051 //===----------------------------------------------------------------------===//
5052 // SSE4.2 - CRC Instructions
5053 //===----------------------------------------------------------------------===//
5055 // No CRC instructions have AVX equivalents
5057 // crc intrinsic instruction
5058 // This set of instructions are only rm, the only difference is the size
5060 let Constraints = "$src1 = $dst" in {
5061 def CRC32m8 : SS42FI<0xF0, MRMSrcMem, (outs GR32:$dst),
5062 (ins GR32:$src1, i8mem:$src2),
5063 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5065 (int_x86_sse42_crc32_8 GR32:$src1,
5066 (load addr:$src2)))]>;
5067 def CRC32r8 : SS42FI<0xF0, MRMSrcReg, (outs GR32:$dst),
5068 (ins GR32:$src1, GR8:$src2),
5069 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5071 (int_x86_sse42_crc32_8 GR32:$src1, GR8:$src2))]>;
5072 def CRC32m16 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst),
5073 (ins GR32:$src1, i16mem:$src2),
5074 "crc32{w} \t{$src2, $src1|$src1, $src2}",
5076 (int_x86_sse42_crc32_16 GR32:$src1,
5077 (load addr:$src2)))]>,
5079 def CRC32r16 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst),
5080 (ins GR32:$src1, GR16:$src2),
5081 "crc32{w} \t{$src2, $src1|$src1, $src2}",
5083 (int_x86_sse42_crc32_16 GR32:$src1, GR16:$src2))]>,
5085 def CRC32m32 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst),
5086 (ins GR32:$src1, i32mem:$src2),
5087 "crc32{l} \t{$src2, $src1|$src1, $src2}",
5089 (int_x86_sse42_crc32_32 GR32:$src1,
5090 (load addr:$src2)))]>;
5091 def CRC32r32 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst),
5092 (ins GR32:$src1, GR32:$src2),
5093 "crc32{l} \t{$src2, $src1|$src1, $src2}",
5095 (int_x86_sse42_crc32_32 GR32:$src1, GR32:$src2))]>;
5096 def CRC64m8 : SS42FI<0xF0, MRMSrcMem, (outs GR64:$dst),
5097 (ins GR64:$src1, i8mem:$src2),
5098 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5100 (int_x86_sse42_crc64_8 GR64:$src1,
5101 (load addr:$src2)))]>,
5103 def CRC64r8 : SS42FI<0xF0, MRMSrcReg, (outs GR64:$dst),
5104 (ins GR64:$src1, GR8:$src2),
5105 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5107 (int_x86_sse42_crc64_8 GR64:$src1, GR8:$src2))]>,
5109 def CRC64m64 : SS42FI<0xF1, MRMSrcMem, (outs GR64:$dst),
5110 (ins GR64:$src1, i64mem:$src2),
5111 "crc32{q} \t{$src2, $src1|$src1, $src2}",
5113 (int_x86_sse42_crc64_64 GR64:$src1,
5114 (load addr:$src2)))]>,
5116 def CRC64r64 : SS42FI<0xF1, MRMSrcReg, (outs GR64:$dst),
5117 (ins GR64:$src1, GR64:$src2),
5118 "crc32{q} \t{$src2, $src1|$src1, $src2}",
5120 (int_x86_sse42_crc64_64 GR64:$src1, GR64:$src2))]>,
5124 //===----------------------------------------------------------------------===//
5125 // AES-NI Instructions
5126 //===----------------------------------------------------------------------===//
5128 multiclass AESI_binop_rm_int<bits<8> opc, string OpcodeStr,
5129 Intrinsic IntId128, bit Is2Addr = 1> {
5130 def rr : AES8I<opc, MRMSrcReg, (outs VR128:$dst),
5131 (ins VR128:$src1, VR128:$src2),
5133 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5134 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5135 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
5137 def rm : AES8I<opc, MRMSrcMem, (outs VR128:$dst),
5138 (ins VR128:$src1, i128mem:$src2),
5140 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5141 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5143 (IntId128 VR128:$src1,
5144 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
5147 // Perform One Round of an AES Encryption/Decryption Flow
5148 let isAsmParserOnly = 1, Predicates = [HasAVX, HasAES] in {
5149 defm VAESENC : AESI_binop_rm_int<0xDC, "vaesenc",
5150 int_x86_aesni_aesenc, 0>, VEX_4V;
5151 defm VAESENCLAST : AESI_binop_rm_int<0xDD, "vaesenclast",
5152 int_x86_aesni_aesenclast, 0>, VEX_4V;
5153 defm VAESDEC : AESI_binop_rm_int<0xDE, "vaesdec",
5154 int_x86_aesni_aesdec, 0>, VEX_4V;
5155 defm VAESDECLAST : AESI_binop_rm_int<0xDF, "vaesdeclast",
5156 int_x86_aesni_aesdeclast, 0>, VEX_4V;
5159 let Constraints = "$src1 = $dst" in {
5160 defm AESENC : AESI_binop_rm_int<0xDC, "aesenc",
5161 int_x86_aesni_aesenc>;
5162 defm AESENCLAST : AESI_binop_rm_int<0xDD, "aesenclast",
5163 int_x86_aesni_aesenclast>;
5164 defm AESDEC : AESI_binop_rm_int<0xDE, "aesdec",
5165 int_x86_aesni_aesdec>;
5166 defm AESDECLAST : AESI_binop_rm_int<0xDF, "aesdeclast",
5167 int_x86_aesni_aesdeclast>;
5170 def : Pat<(v2i64 (int_x86_aesni_aesenc VR128:$src1, VR128:$src2)),
5171 (AESENCrr VR128:$src1, VR128:$src2)>;
5172 def : Pat<(v2i64 (int_x86_aesni_aesenc VR128:$src1, (memop addr:$src2))),
5173 (AESENCrm VR128:$src1, addr:$src2)>;
5174 def : Pat<(v2i64 (int_x86_aesni_aesenclast VR128:$src1, VR128:$src2)),
5175 (AESENCLASTrr VR128:$src1, VR128:$src2)>;
5176 def : Pat<(v2i64 (int_x86_aesni_aesenclast VR128:$src1, (memop addr:$src2))),
5177 (AESENCLASTrm VR128:$src1, addr:$src2)>;
5178 def : Pat<(v2i64 (int_x86_aesni_aesdec VR128:$src1, VR128:$src2)),
5179 (AESDECrr VR128:$src1, VR128:$src2)>;
5180 def : Pat<(v2i64 (int_x86_aesni_aesdec VR128:$src1, (memop addr:$src2))),
5181 (AESDECrm VR128:$src1, addr:$src2)>;
5182 def : Pat<(v2i64 (int_x86_aesni_aesdeclast VR128:$src1, VR128:$src2)),
5183 (AESDECLASTrr VR128:$src1, VR128:$src2)>;
5184 def : Pat<(v2i64 (int_x86_aesni_aesdeclast VR128:$src1, (memop addr:$src2))),
5185 (AESDECLASTrm VR128:$src1, addr:$src2)>;
5187 // Perform the AES InvMixColumn Transformation
5188 let isAsmParserOnly = 1, Predicates = [HasAVX, HasAES] in {
5189 def VAESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
5191 "vaesimc\t{$src1, $dst|$dst, $src1}",
5193 (int_x86_aesni_aesimc VR128:$src1))]>,
5195 def VAESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
5196 (ins i128mem:$src1),
5197 "vaesimc\t{$src1, $dst|$dst, $src1}",
5199 (int_x86_aesni_aesimc (bitconvert (memopv2i64 addr:$src1))))]>,
5202 def AESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
5204 "aesimc\t{$src1, $dst|$dst, $src1}",
5206 (int_x86_aesni_aesimc VR128:$src1))]>,
5208 def AESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
5209 (ins i128mem:$src1),
5210 "aesimc\t{$src1, $dst|$dst, $src1}",
5212 (int_x86_aesni_aesimc (bitconvert (memopv2i64 addr:$src1))))]>,
5215 // AES Round Key Generation Assist
5216 let isAsmParserOnly = 1, Predicates = [HasAVX, HasAES] in {
5217 def VAESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
5218 (ins VR128:$src1, i8imm:$src2),
5219 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5221 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
5223 def VAESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
5224 (ins i128mem:$src1, i8imm:$src2),
5225 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5227 (int_x86_aesni_aeskeygenassist (bitconvert (memopv2i64 addr:$src1)),
5231 def AESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
5232 (ins VR128:$src1, i8imm:$src2),
5233 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5235 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
5237 def AESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
5238 (ins i128mem:$src1, i8imm:$src2),
5239 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5241 (int_x86_aesni_aeskeygenassist (bitconvert (memopv2i64 addr:$src1)),
5245 //===----------------------------------------------------------------------===//
5246 // CLMUL Instructions
5247 //===----------------------------------------------------------------------===//
5249 // Only the AVX version of CLMUL instructions are described here.
5251 // Carry-less Multiplication instructions
5252 let isAsmParserOnly = 1 in {
5253 def VPCLMULQDQrr : CLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
5254 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5255 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5258 def VPCLMULQDQrm : CLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
5259 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5260 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5264 multiclass avx_vpclmul<string asm> {
5265 def rr : I<0, Pseudo, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
5266 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5269 def rm : I<0, Pseudo, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
5270 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5273 defm VPCLMULHQHQDQ : avx_vpclmul<"vpclmulhqhqdq">;
5274 defm VPCLMULHQLQDQ : avx_vpclmul<"vpclmulhqlqdq">;
5275 defm VPCLMULLQHQDQ : avx_vpclmul<"vpclmullqhqdq">;
5276 defm VPCLMULLQLQDQ : avx_vpclmul<"vpclmullqlqdq">;
5278 } // isAsmParserOnly
5280 //===----------------------------------------------------------------------===//
5282 //===----------------------------------------------------------------------===//
5284 let isAsmParserOnly = 1 in {
5286 // Load from memory and broadcast to all elements of the destination operand
5287 class avx_broadcast<bits<8> opc, string OpcodeStr, RegisterClass RC,
5288 X86MemOperand x86memop, Intrinsic Int> :
5289 AVX8I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
5290 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5291 [(set RC:$dst, (Int addr:$src))]>, VEX;
5293 def VBROADCASTSS : avx_broadcast<0x18, "vbroadcastss", VR128, f32mem,
5294 int_x86_avx_vbroadcastss>;
5295 def VBROADCASTSSY : avx_broadcast<0x18, "vbroadcastss", VR256, f32mem,
5296 int_x86_avx_vbroadcastss_256>;
5297 def VBROADCASTSD : avx_broadcast<0x19, "vbroadcastsd", VR256, f64mem,
5298 int_x86_avx_vbroadcast_sd_256>;
5299 def VBROADCASTF128 : avx_broadcast<0x1A, "vbroadcastf128", VR256, f128mem,
5300 int_x86_avx_vbroadcastf128_pd_256>;
5302 // Insert packed floating-point values
5303 def VINSERTF128rr : AVXAIi8<0x18, MRMSrcReg, (outs VR256:$dst),
5304 (ins VR256:$src1, VR128:$src2, i8imm:$src3),
5305 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5307 def VINSERTF128rm : AVXAIi8<0x18, MRMSrcMem, (outs VR256:$dst),
5308 (ins VR256:$src1, f128mem:$src2, i8imm:$src3),
5309 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5312 // Extract packed floating-point values
5313 def VEXTRACTF128rr : AVXAIi8<0x19, MRMDestReg, (outs VR128:$dst),
5314 (ins VR256:$src1, i8imm:$src2),
5315 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5317 def VEXTRACTF128mr : AVXAIi8<0x19, MRMDestMem, (outs),
5318 (ins f128mem:$dst, VR256:$src1, i8imm:$src2),
5319 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5322 // Conditional SIMD Packed Loads and Stores
5323 multiclass avx_movmask_rm<bits<8> opc_rm, bits<8> opc_mr, string OpcodeStr,
5324 Intrinsic IntLd, Intrinsic IntLd256,
5325 Intrinsic IntSt, Intrinsic IntSt256,
5326 PatFrag pf128, PatFrag pf256> {
5327 def rm : AVX8I<opc_rm, MRMSrcMem, (outs VR128:$dst),
5328 (ins VR128:$src1, f128mem:$src2),
5329 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5330 [(set VR128:$dst, (IntLd addr:$src2, VR128:$src1))]>,
5332 def Yrm : AVX8I<opc_rm, MRMSrcMem, (outs VR256:$dst),
5333 (ins VR256:$src1, f256mem:$src2),
5334 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5335 [(set VR256:$dst, (IntLd256 addr:$src2, VR256:$src1))]>,
5337 def mr : AVX8I<opc_mr, MRMDestMem, (outs),
5338 (ins f128mem:$dst, VR128:$src1, VR128:$src2),
5339 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5340 [(IntSt addr:$dst, VR128:$src1, VR128:$src2)]>, VEX_4V;
5341 def Ymr : AVX8I<opc_mr, MRMDestMem, (outs),
5342 (ins f256mem:$dst, VR256:$src1, VR256:$src2),
5343 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5344 [(IntSt256 addr:$dst, VR256:$src1, VR256:$src2)]>, VEX_4V;
5347 defm VMASKMOVPS : avx_movmask_rm<0x2C, 0x2E, "vmaskmovps",
5348 int_x86_avx_maskload_ps,
5349 int_x86_avx_maskload_ps_256,
5350 int_x86_avx_maskstore_ps,
5351 int_x86_avx_maskstore_ps_256,
5352 memopv4f32, memopv8f32>;
5353 defm VMASKMOVPD : avx_movmask_rm<0x2D, 0x2F, "vmaskmovpd",
5354 int_x86_avx_maskload_pd,
5355 int_x86_avx_maskload_pd_256,
5356 int_x86_avx_maskstore_pd,
5357 int_x86_avx_maskstore_pd_256,
5358 memopv2f64, memopv4f64>;
5360 // Permute Floating-Point Values
5361 multiclass avx_permil<bits<8> opc_rm, bits<8> opc_rmi, string OpcodeStr,
5362 RegisterClass RC, X86MemOperand x86memop_f,
5363 X86MemOperand x86memop_i, PatFrag f_frag, PatFrag i_frag,
5364 Intrinsic IntVar, Intrinsic IntImm> {
5365 def rr : AVX8I<opc_rm, MRMSrcReg, (outs RC:$dst),
5366 (ins RC:$src1, RC:$src2),
5367 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5368 [(set RC:$dst, (IntVar RC:$src1, RC:$src2))]>, VEX_4V;
5369 def rm : AVX8I<opc_rm, MRMSrcMem, (outs RC:$dst),
5370 (ins RC:$src1, x86memop_i:$src2),
5371 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5372 [(set RC:$dst, (IntVar RC:$src1, (i_frag addr:$src2)))]>, VEX_4V;
5374 def ri : AVXAIi8<opc_rmi, MRMSrcReg, (outs RC:$dst),
5375 (ins RC:$src1, i8imm:$src2),
5376 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5377 [(set RC:$dst, (IntImm RC:$src1, imm:$src2))]>, VEX;
5378 def mi : AVXAIi8<opc_rmi, MRMSrcMem, (outs RC:$dst),
5379 (ins x86memop_f:$src1, i8imm:$src2),
5380 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5381 [(set RC:$dst, (IntImm (f_frag addr:$src1), imm:$src2))]>, VEX;
5384 defm VPERMILPS : avx_permil<0x0C, 0x04, "vpermilps", VR128, f128mem, i128mem,
5385 memopv4f32, memopv4i32,
5386 int_x86_avx_vpermilvar_ps,
5387 int_x86_avx_vpermil_ps>;
5388 defm VPERMILPSY : avx_permil<0x0C, 0x04, "vpermilps", VR256, f256mem, i256mem,
5389 memopv8f32, memopv8i32,
5390 int_x86_avx_vpermilvar_ps_256,
5391 int_x86_avx_vpermil_ps_256>;
5392 defm VPERMILPD : avx_permil<0x0D, 0x05, "vpermilpd", VR128, f128mem, i128mem,
5393 memopv2f64, memopv2i64,
5394 int_x86_avx_vpermilvar_pd,
5395 int_x86_avx_vpermil_pd>;
5396 defm VPERMILPDY : avx_permil<0x0D, 0x05, "vpermilpd", VR256, f256mem, i256mem,
5397 memopv4f64, memopv4i64,
5398 int_x86_avx_vpermilvar_pd_256,
5399 int_x86_avx_vpermil_pd_256>;
5401 def VPERM2F128rr : AVXAIi8<0x06, MRMSrcReg, (outs VR256:$dst),
5402 (ins VR256:$src1, VR256:$src2, i8imm:$src3),
5403 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5405 def VPERM2F128rm : AVXAIi8<0x06, MRMSrcMem, (outs VR256:$dst),
5406 (ins VR256:$src1, f256mem:$src2, i8imm:$src3),
5407 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5410 // Zero All YMM registers
5411 def VZEROALL : I<0x77, RawFrm, (outs), (ins), "vzeroall",
5412 [(int_x86_avx_vzeroall)]>, VEX, VEX_L, Requires<[HasAVX]>;
5414 // Zero Upper bits of YMM registers
5415 def VZEROUPPER : I<0x77, RawFrm, (outs), (ins), "vzeroupper",
5416 [(int_x86_avx_vzeroupper)]>, VEX, Requires<[HasAVX]>;
5418 } // isAsmParserOnly
5420 def : Pat<(int_x86_avx_vinsertf128_pd_256 VR256:$src1, VR128:$src2, imm:$src3),
5421 (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
5422 def : Pat<(int_x86_avx_vinsertf128_ps_256 VR256:$src1, VR128:$src2, imm:$src3),
5423 (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
5424 def : Pat<(int_x86_avx_vinsertf128_si_256 VR256:$src1, VR128:$src2, imm:$src3),
5425 (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
5427 def : Pat<(int_x86_avx_vextractf128_pd_256 VR256:$src1, imm:$src2),
5428 (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
5429 def : Pat<(int_x86_avx_vextractf128_ps_256 VR256:$src1, imm:$src2),
5430 (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
5431 def : Pat<(int_x86_avx_vextractf128_si_256 VR256:$src1, imm:$src2),
5432 (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
5434 def : Pat<(int_x86_avx_vbroadcastf128_ps_256 addr:$src),
5435 (VBROADCASTF128 addr:$src)>;
5437 def : Pat<(int_x86_avx_vperm2f128_ps_256 VR256:$src1, VR256:$src2, imm:$src3),
5438 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
5439 def : Pat<(int_x86_avx_vperm2f128_pd_256 VR256:$src1, VR256:$src2, imm:$src3),
5440 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
5441 def : Pat<(int_x86_avx_vperm2f128_si_256 VR256:$src1, VR256:$src2, imm:$src3),
5442 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
5444 def : Pat<(int_x86_avx_vperm2f128_ps_256
5445 VR256:$src1, (memopv8f32 addr:$src2), imm:$src3),
5446 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
5447 def : Pat<(int_x86_avx_vperm2f128_pd_256
5448 VR256:$src1, (memopv4f64 addr:$src2), imm:$src3),
5449 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
5450 def : Pat<(int_x86_avx_vperm2f128_si_256
5451 VR256:$src1, (memopv8i32 addr:$src2), imm:$src3),
5452 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
5454 //===----------------------------------------------------------------------===//
5455 // SSE Shuffle pattern fragments
5456 //===----------------------------------------------------------------------===//
5458 // This is part of a "work in progress" refactoring. The idea is that all
5459 // vector shuffles are going to be translated into target specific nodes and
5460 // directly matched by the patterns below (which can be changed along the way)
5461 // The AVX version of some but not all of them are described here, and more
5462 // should come in a near future.
5464 // Shuffle with PSHUFD instruction folding loads. The first two patterns match
5465 // SSE2 loads, which are always promoted to v2i64. The last one should match
5466 // the SSE1 case, where the only legal load is v4f32, but there is no PSHUFD
5467 // in SSE2, how does it ever worked? Anyway, the pattern will remain here until
5468 // we investigate further.
5469 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv2i64 addr:$src1)),
5471 (VPSHUFDmi addr:$src1, imm:$imm)>, Requires<[HasAVX]>;
5472 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv2i64 addr:$src1)),
5474 (PSHUFDmi addr:$src1, imm:$imm)>;
5475 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv4f32 addr:$src1)),
5477 (PSHUFDmi addr:$src1, imm:$imm)>; // FIXME: has this ever worked?
5479 // Shuffle with PSHUFD instruction.
5480 def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
5481 (VPSHUFDri VR128:$src1, imm:$imm)>, Requires<[HasAVX]>;
5482 def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
5483 (PSHUFDri VR128:$src1, imm:$imm)>;
5485 def : Pat<(v4i32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
5486 (VPSHUFDri VR128:$src1, imm:$imm)>, Requires<[HasAVX]>;
5487 def : Pat<(v4i32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
5488 (PSHUFDri VR128:$src1, imm:$imm)>;
5490 // Shuffle with SHUFPD instruction.
5491 def : Pat<(v2f64 (X86Shufps VR128:$src1,
5492 (memopv2f64 addr:$src2), (i8 imm:$imm))),
5493 (VSHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>, Requires<[HasAVX]>;
5494 def : Pat<(v2f64 (X86Shufps VR128:$src1,
5495 (memopv2f64 addr:$src2), (i8 imm:$imm))),
5496 (SHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>;
5498 def : Pat<(v2i64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5499 (VSHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>, Requires<[HasAVX]>;
5500 def : Pat<(v2i64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5501 (SHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
5503 def : Pat<(v2f64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5504 (VSHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>, Requires<[HasAVX]>;
5505 def : Pat<(v2f64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5506 (SHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
5508 // Shuffle with SHUFPS instruction.
5509 def : Pat<(v4f32 (X86Shufps VR128:$src1,
5510 (memopv4f32 addr:$src2), (i8 imm:$imm))),
5511 (VSHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>, Requires<[HasAVX]>;
5512 def : Pat<(v4f32 (X86Shufps VR128:$src1,
5513 (memopv4f32 addr:$src2), (i8 imm:$imm))),
5514 (SHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
5516 def : Pat<(v4f32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5517 (VSHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>, Requires<[HasAVX]>;
5518 def : Pat<(v4f32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5519 (SHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
5521 def : Pat<(v4i32 (X86Shufps VR128:$src1,
5522 (bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))),
5523 (VSHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>, Requires<[HasAVX]>;
5524 def : Pat<(v4i32 (X86Shufps VR128:$src1,
5525 (bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))),
5526 (SHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
5528 def : Pat<(v4i32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5529 (VSHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>, Requires<[HasAVX]>;
5530 def : Pat<(v4i32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5531 (SHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
5533 // Shuffle with MOVHLPS instruction
5534 def : Pat<(v4f32 (X86Movhlps VR128:$src1, VR128:$src2)),
5535 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
5536 def : Pat<(v4i32 (X86Movhlps VR128:$src1, VR128:$src2)),
5537 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
5539 // Shuffle with MOVDDUP instruction
5540 def : Pat<(X86Movddup (memopv2f64 addr:$src)),
5541 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5542 def : Pat<(X86Movddup (memopv2f64 addr:$src)),
5543 (MOVDDUPrm addr:$src)>;
5545 def : Pat<(X86Movddup (bc_v4f32 (memopv2f64 addr:$src))),
5546 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5547 def : Pat<(X86Movddup (bc_v4f32 (memopv2f64 addr:$src))),
5548 (MOVDDUPrm addr:$src)>;
5550 def : Pat<(X86Movddup (memopv2i64 addr:$src)),
5551 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5552 def : Pat<(X86Movddup (memopv2i64 addr:$src)),
5553 (MOVDDUPrm addr:$src)>;
5555 def : Pat<(X86Movddup (bc_v4i32 (memopv2i64 addr:$src))),
5556 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5557 def : Pat<(X86Movddup (bc_v4i32 (memopv2i64 addr:$src))),
5558 (MOVDDUPrm addr:$src)>;
5560 def : Pat<(X86Movddup (v2f64 (scalar_to_vector (loadf64 addr:$src)))),
5561 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5562 def : Pat<(X86Movddup (v2f64 (scalar_to_vector (loadf64 addr:$src)))),
5563 (MOVDDUPrm addr:$src)>;
5565 def : Pat<(X86Movddup (bc_v2f64
5566 (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
5567 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5568 def : Pat<(X86Movddup (bc_v2f64
5569 (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
5570 (MOVDDUPrm addr:$src)>;
5572 // Shuffle with UNPCKLPS
5573 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, (memopv4f32 addr:$src2))),
5574 (VUNPCKLPSrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
5575 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, (memopv4f32 addr:$src2))),
5576 (UNPCKLPSrm VR128:$src1, addr:$src2)>;
5578 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, VR128:$src2)),
5579 (VUNPCKLPSrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
5580 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, VR128:$src2)),
5581 (UNPCKLPSrr VR128:$src1, VR128:$src2)>;
5583 // Shuffle with UNPCKHPS
5584 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, (memopv4f32 addr:$src2))),
5585 (VUNPCKHPSrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
5586 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, (memopv4f32 addr:$src2))),
5587 (UNPCKHPSrm VR128:$src1, addr:$src2)>;
5589 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, VR128:$src2)),
5590 (VUNPCKHPSrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
5591 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, VR128:$src2)),
5592 (UNPCKHPSrr VR128:$src1, VR128:$src2)>;
5594 // Shuffle with UNPCKLPD
5595 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, (memopv2f64 addr:$src2))),
5596 (VUNPCKLPSrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
5597 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, (memopv2f64 addr:$src2))),
5598 (UNPCKLPSrm VR128:$src1, addr:$src2)>;
5600 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, VR128:$src2)),
5601 (VUNPCKLPDrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
5602 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, VR128:$src2)),
5603 (UNPCKLPDrr VR128:$src1, VR128:$src2)>;
5605 // Shuffle with UNPCKHPD
5606 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, (memopv2f64 addr:$src2))),
5607 (VUNPCKLPSrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
5608 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, (memopv2f64 addr:$src2))),
5609 (UNPCKLPSrm VR128:$src1, addr:$src2)>;
5611 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, VR128:$src2)),
5612 (VUNPCKHPDrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
5613 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, VR128:$src2)),
5614 (UNPCKHPDrr VR128:$src1, VR128:$src2)>;
5616 // Shuffle with PUNPCKLBW
5617 def : Pat<(v16i8 (X86Punpcklbw VR128:$src1,
5618 (bc_v16i8 (memopv2i64 addr:$src2)))),
5619 (PUNPCKLBWrm VR128:$src1, addr:$src2)>;
5620 def : Pat<(v16i8 (X86Punpcklbw VR128:$src1, VR128:$src2)),
5621 (PUNPCKLBWrr VR128:$src1, VR128:$src2)>;
5623 // Shuffle with PUNPCKLWD
5624 def : Pat<(v8i16 (X86Punpcklwd VR128:$src1,
5625 (bc_v8i16 (memopv2i64 addr:$src2)))),
5626 (PUNPCKLWDrm VR128:$src1, addr:$src2)>;
5627 def : Pat<(v8i16 (X86Punpcklwd VR128:$src1, VR128:$src2)),
5628 (PUNPCKLWDrr VR128:$src1, VR128:$src2)>;
5630 // Shuffle with PUNPCKLDQ
5631 def : Pat<(v4i32 (X86Punpckldq VR128:$src1,
5632 (bc_v4i32 (memopv2i64 addr:$src2)))),
5633 (PUNPCKLDQrm VR128:$src1, addr:$src2)>;
5634 def : Pat<(v4i32 (X86Punpckldq VR128:$src1, VR128:$src2)),
5635 (PUNPCKLDQrr VR128:$src1, VR128:$src2)>;
5637 // Shuffle with PUNPCKLQDQ
5638 def : Pat<(v2i64 (X86Punpcklqdq VR128:$src1, (memopv2i64 addr:$src2))),
5639 (PUNPCKLQDQrm VR128:$src1, addr:$src2)>;
5640 def : Pat<(v2i64 (X86Punpcklqdq VR128:$src1, VR128:$src2)),
5641 (PUNPCKLQDQrr VR128:$src1, VR128:$src2)>;
5643 // Shuffle with PUNPCKHBW
5644 def : Pat<(v16i8 (X86Punpckhbw VR128:$src1,
5645 (bc_v16i8 (memopv2i64 addr:$src2)))),
5646 (PUNPCKHBWrm VR128:$src1, addr:$src2)>;
5647 def : Pat<(v16i8 (X86Punpckhbw VR128:$src1, VR128:$src2)),
5648 (PUNPCKHBWrr VR128:$src1, VR128:$src2)>;
5650 // Shuffle with PUNPCKHWD
5651 def : Pat<(v8i16 (X86Punpckhwd VR128:$src1,
5652 (bc_v8i16 (memopv2i64 addr:$src2)))),
5653 (PUNPCKHWDrm VR128:$src1, addr:$src2)>;
5654 def : Pat<(v8i16 (X86Punpckhwd VR128:$src1, VR128:$src2)),
5655 (PUNPCKHWDrr VR128:$src1, VR128:$src2)>;
5657 // Shuffle with PUNPCKHDQ
5658 def : Pat<(v4i32 (X86Punpckhdq VR128:$src1,
5659 (bc_v4i32 (memopv2i64 addr:$src2)))),
5660 (PUNPCKHDQrm VR128:$src1, addr:$src2)>;
5661 def : Pat<(v4i32 (X86Punpckhdq VR128:$src1, VR128:$src2)),
5662 (PUNPCKHDQrr VR128:$src1, VR128:$src2)>;
5664 // Shuffle with PUNPCKHQDQ
5665 def : Pat<(v2i64 (X86Punpckhqdq VR128:$src1, (memopv2i64 addr:$src2))),
5666 (PUNPCKHQDQrm VR128:$src1, addr:$src2)>;
5667 def : Pat<(v2i64 (X86Punpckhqdq VR128:$src1, VR128:$src2)),
5668 (PUNPCKHQDQrr VR128:$src1, VR128:$src2)>;
5670 // Shuffle with MOVLHPS
5671 def : Pat<(X86Movlhps VR128:$src1,
5672 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
5673 (MOVHPSrm VR128:$src1, addr:$src2)>;
5674 def : Pat<(X86Movlhps VR128:$src1,
5675 (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
5676 (MOVHPSrm VR128:$src1, addr:$src2)>;
5677 def : Pat<(v4f32 (X86Movlhps VR128:$src1, VR128:$src2)),
5678 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
5679 def : Pat<(v4i32 (X86Movlhps VR128:$src1, VR128:$src2)),
5680 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
5681 def : Pat<(v2i64 (X86Movlhps VR128:$src1, VR128:$src2)),
5682 (MOVLHPSrr (v2i64 VR128:$src1), VR128:$src2)>;
5683 // FIXME: Instead of X86Movddup, there should be a X86Movlhps here, the problem
5684 // is during lowering, where it's not possible to recognize the load fold cause
5685 // it has two uses through a bitcast. One use disappears at isel time and the
5686 // fold opportunity reappears.
5687 def : Pat<(v2i64 (X86Movddup VR128:$src)),
5688 (MOVLHPSrr VR128:$src, VR128:$src)>;
5689 def : Pat<(v4f32 (X86Movddup VR128:$src)),
5690 (MOVLHPSrr VR128:$src, VR128:$src)>;
5691 def : Pat<(v2f64 (X86Movddup VR128:$src)),
5692 (UNPCKLPDrr VR128:$src, VR128:$src)>;
5694 // Shuffle with MOVLHPD
5695 def : Pat<(v2f64 (X86Movlhpd VR128:$src1,
5696 (scalar_to_vector (loadf64 addr:$src2)))),
5697 (MOVHPDrm VR128:$src1, addr:$src2)>;
5698 // FIXME: Instead of X86Unpcklpd, there should be a X86Movlhpd here, the problem
5699 // is during lowering, where it's not possible to recognize the load fold cause
5700 // it has two uses through a bitcast. One use disappears at isel time and the
5701 // fold opportunity reappears.
5702 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1,
5703 (scalar_to_vector (loadf64 addr:$src2)))),
5704 (MOVHPDrm VR128:$src1, addr:$src2)>;
5706 // Shuffle with MOVSS
5707 def : Pat<(v4f32 (X86Movss VR128:$src1, (scalar_to_vector FR32:$src2))),
5708 (MOVSSrr VR128:$src1, FR32:$src2)>;
5709 def : Pat<(v4i32 (X86Movss VR128:$src1, VR128:$src2)),
5710 (MOVSSrr (v4i32 VR128:$src1),
5711 (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_ss))>;
5712 def : Pat<(v4f32 (X86Movss VR128:$src1, VR128:$src2)),
5713 (MOVSSrr (v4f32 VR128:$src1),
5714 (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>;
5715 // FIXME: Instead of a X86Movss there should be a X86Movlps here, the problem
5716 // is during lowering, where it's not possible to recognize the load fold cause
5717 // it has two uses through a bitcast. One use disappears at isel time and the
5718 // fold opportunity reappears.
5719 def : Pat<(X86Movss VR128:$src1,
5720 (bc_v4i32 (v2i64 (load addr:$src2)))),
5721 (MOVLPSrm VR128:$src1, addr:$src2)>;
5723 // Shuffle with MOVSD
5724 def : Pat<(v2f64 (X86Movsd VR128:$src1, (scalar_to_vector FR64:$src2))),
5725 (MOVSDrr VR128:$src1, FR64:$src2)>;
5726 def : Pat<(v2i64 (X86Movsd VR128:$src1, VR128:$src2)),
5727 (MOVSDrr (v2i64 VR128:$src1),
5728 (EXTRACT_SUBREG (v2i64 VR128:$src2), sub_sd))>;
5729 def : Pat<(v2f64 (X86Movsd VR128:$src1, VR128:$src2)),
5730 (MOVSDrr (v2f64 VR128:$src1),
5731 (EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>;
5732 def : Pat<(v4f32 (X86Movsd VR128:$src1, VR128:$src2)),
5733 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_sd))>;
5734 def : Pat<(v4i32 (X86Movsd VR128:$src1, VR128:$src2)),
5735 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_sd))>;
5737 // Shuffle with MOVSHDUP
5738 def : Pat<(v4i32 (X86Movshdup VR128:$src)),
5739 (MOVSHDUPrr VR128:$src)>;
5740 def : Pat<(X86Movshdup (bc_v4i32 (memopv2i64 addr:$src))),
5741 (MOVSHDUPrm addr:$src)>;
5743 def : Pat<(v4f32 (X86Movshdup VR128:$src)),
5744 (MOVSHDUPrr VR128:$src)>;
5745 def : Pat<(X86Movshdup (memopv4f32 addr:$src)),
5746 (MOVSHDUPrm addr:$src)>;
5748 // Shuffle with MOVSLDUP
5749 def : Pat<(v4i32 (X86Movsldup VR128:$src)),
5750 (MOVSLDUPrr VR128:$src)>;
5751 def : Pat<(X86Movsldup (bc_v4i32 (memopv2i64 addr:$src))),
5752 (MOVSLDUPrm addr:$src)>;
5754 def : Pat<(v4f32 (X86Movsldup VR128:$src)),
5755 (MOVSLDUPrr VR128:$src)>;
5756 def : Pat<(X86Movsldup (memopv4f32 addr:$src)),
5757 (MOVSLDUPrm addr:$src)>;
5759 // Shuffle with PSHUFHW
5760 def : Pat<(v8i16 (X86PShufhw VR128:$src, (i8 imm:$imm))),
5761 (PSHUFHWri VR128:$src, imm:$imm)>;
5762 def : Pat<(v8i16 (X86PShufhw (bc_v8i16 (memopv2i64 addr:$src)), (i8 imm:$imm))),
5763 (PSHUFHWmi addr:$src, imm:$imm)>;
5765 // Shuffle with PSHUFLW
5766 def : Pat<(v8i16 (X86PShuflw VR128:$src, (i8 imm:$imm))),
5767 (PSHUFLWri VR128:$src, imm:$imm)>;
5768 def : Pat<(v8i16 (X86PShuflw (bc_v8i16 (memopv2i64 addr:$src)), (i8 imm:$imm))),
5769 (PSHUFLWmi addr:$src, imm:$imm)>;
5771 // Shuffle with PALIGN
5772 def : Pat<(v4i32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5773 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5774 def : Pat<(v4f32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5775 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5776 def : Pat<(v8i16 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5777 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5778 def : Pat<(v16i8 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5779 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5781 // Shuffle with MOVLPS
5782 def : Pat<(v4f32 (X86Movlps VR128:$src1, (load addr:$src2))),
5783 (MOVLPSrm VR128:$src1, addr:$src2)>;
5784 def : Pat<(v4i32 (X86Movlps VR128:$src1, (load addr:$src2))),
5785 (MOVLPSrm VR128:$src1, addr:$src2)>;
5786 def : Pat<(X86Movlps VR128:$src1,
5787 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
5788 (MOVLPSrm VR128:$src1, addr:$src2)>;
5790 // Shuffle with MOVLPD
5791 def : Pat<(v2f64 (X86Movlpd VR128:$src1, (load addr:$src2))),
5792 (MOVLPDrm VR128:$src1, addr:$src2)>;
5793 def : Pat<(v2i64 (X86Movlpd VR128:$src1, (load addr:$src2))),
5794 (MOVLPDrm VR128:$src1, addr:$src2)>;
5795 def : Pat<(v2f64 (X86Movlpd VR128:$src1,
5796 (scalar_to_vector (loadf64 addr:$src2)))),
5797 (MOVLPDrm VR128:$src1, addr:$src2)>;
5799 // Extra patterns to match stores with MOVHPS/PD and MOVLPS/PD
5800 def : Pat<(store (f64 (vector_extract
5801 (v2f64 (X86Unpckhps VR128:$src, (undef))), (iPTR 0))),addr:$dst),
5802 (MOVHPSmr addr:$dst, VR128:$src)>;
5803 def : Pat<(store (f64 (vector_extract
5804 (v2f64 (X86Unpckhpd VR128:$src, (undef))), (iPTR 0))),addr:$dst),
5805 (MOVHPDmr addr:$dst, VR128:$src)>;
5807 def : Pat<(store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)),addr:$src1),
5808 (MOVLPSmr addr:$src1, VR128:$src2)>;
5809 def : Pat<(store (v4i32 (X86Movlps
5810 (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)), addr:$src1),
5811 (MOVLPSmr addr:$src1, VR128:$src2)>;
5813 def : Pat<(store (v2f64 (X86Movlpd (load addr:$src1), VR128:$src2)),addr:$src1),
5814 (MOVLPDmr addr:$src1, VR128:$src2)>;
5815 def : Pat<(store (v2i64 (X86Movlpd (load addr:$src1), VR128:$src2)),addr:$src1),
5816 (MOVLPDmr addr:$src1, VR128:$src2)>;