1 //====- X86InstrSSE.td - Describe the X86 Instruction Set --*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86 SSE instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
17 //===----------------------------------------------------------------------===//
18 // SSE scalar FP Instructions
19 //===----------------------------------------------------------------------===//
21 // CMOV* - Used to implement the SSE SELECT DAG operation. Expanded after
22 // instruction selection into a branch sequence.
23 let Uses = [EFLAGS], usesCustomInserter = 1 in {
24 def CMOV_FR32 : I<0, Pseudo,
25 (outs FR32:$dst), (ins FR32:$t, FR32:$f, i8imm:$cond),
27 [(set FR32:$dst, (X86cmov FR32:$t, FR32:$f, imm:$cond,
29 def CMOV_FR64 : I<0, Pseudo,
30 (outs FR64:$dst), (ins FR64:$t, FR64:$f, i8imm:$cond),
32 [(set FR64:$dst, (X86cmov FR64:$t, FR64:$f, imm:$cond,
34 def CMOV_V4F32 : I<0, Pseudo,
35 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
36 "#CMOV_V4F32 PSEUDO!",
38 (v4f32 (X86cmov VR128:$t, VR128:$f, imm:$cond,
40 def CMOV_V2F64 : I<0, Pseudo,
41 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
42 "#CMOV_V2F64 PSEUDO!",
44 (v2f64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
46 def CMOV_V2I64 : I<0, Pseudo,
47 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
48 "#CMOV_V2I64 PSEUDO!",
50 (v2i64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
54 //===----------------------------------------------------------------------===//
55 // SSE 1 & 2 Instructions Classes
56 //===----------------------------------------------------------------------===//
58 /// sse12_fp_scalar - SSE 1 & 2 scalar instructions class
59 multiclass sse12_fp_scalar<bits<8> opc, string OpcodeStr, SDNode OpNode,
60 RegisterClass RC, X86MemOperand x86memop,
62 let isCommutable = 1 in {
63 def rr : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
65 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
66 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
67 [(set RC:$dst, (OpNode RC:$src1, RC:$src2))]>;
69 def rm : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
71 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
72 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
73 [(set RC:$dst, (OpNode RC:$src1, (load addr:$src2)))]>;
76 /// sse12_fp_scalar_int - SSE 1 & 2 scalar instructions intrinsics class
77 multiclass sse12_fp_scalar_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
78 string asm, string SSEVer, string FPSizeStr,
79 Operand memopr, ComplexPattern mem_cpat,
81 def rr_Int : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
83 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
84 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
85 [(set RC:$dst, (!nameconcat<Intrinsic>("int_x86_sse",
86 !strconcat(SSEVer, !strconcat("_",
87 !strconcat(OpcodeStr, FPSizeStr))))
88 RC:$src1, RC:$src2))]>;
89 def rm_Int : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, memopr:$src2),
91 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
92 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
93 [(set RC:$dst, (!nameconcat<Intrinsic>("int_x86_sse",
94 !strconcat(SSEVer, !strconcat("_",
95 !strconcat(OpcodeStr, FPSizeStr))))
96 RC:$src1, mem_cpat:$src2))]>;
99 /// sse12_fp_packed - SSE 1 & 2 packed instructions class
100 multiclass sse12_fp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
101 RegisterClass RC, ValueType vt,
102 X86MemOperand x86memop, PatFrag mem_frag,
103 Domain d, bit Is2Addr = 1> {
104 let isCommutable = 1 in
105 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
107 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
108 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
109 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], d>;
111 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
113 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
114 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
115 [(set RC:$dst, (OpNode RC:$src1, (mem_frag addr:$src2)))], d>;
118 /// sse12_fp_packed_logical_rm - SSE 1 & 2 packed instructions class
119 multiclass sse12_fp_packed_logical_rm<bits<8> opc, RegisterClass RC, Domain d,
120 string OpcodeStr, X86MemOperand x86memop,
121 list<dag> pat_rr, list<dag> pat_rm,
123 let isCommutable = 1 in
124 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
126 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
127 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
129 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
131 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
132 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
136 /// sse12_fp_packed_int - SSE 1 & 2 packed instructions intrinsics class
137 multiclass sse12_fp_packed_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
138 string asm, string SSEVer, string FPSizeStr,
139 X86MemOperand x86memop, PatFrag mem_frag,
140 Domain d, bit Is2Addr = 1> {
141 def rr_Int : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
143 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
144 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
145 [(set RC:$dst, (!nameconcat<Intrinsic>("int_x86_",
146 !strconcat(SSEVer, !strconcat("_",
147 !strconcat(OpcodeStr, FPSizeStr))))
148 RC:$src1, RC:$src2))], d>;
149 def rm_Int : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1,x86memop:$src2),
151 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
152 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
153 [(set RC:$dst, (!nameconcat<Intrinsic>("int_x86_",
154 !strconcat(SSEVer, !strconcat("_",
155 !strconcat(OpcodeStr, FPSizeStr))))
156 RC:$src1, (mem_frag addr:$src2)))], d>;
159 //===----------------------------------------------------------------------===//
160 // SSE 1 & 2 - Move Instructions
161 //===----------------------------------------------------------------------===//
163 class sse12_move_rr<RegisterClass RC, ValueType vt, string asm> :
164 SI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, RC:$src2), asm,
165 [(set (vt VR128:$dst), (movl VR128:$src1, (scalar_to_vector RC:$src2)))]>;
167 // Loading from memory automatically zeroing upper bits.
168 class sse12_move_rm<RegisterClass RC, X86MemOperand x86memop,
169 PatFrag mem_pat, string OpcodeStr> :
170 SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
171 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
172 [(set RC:$dst, (mem_pat addr:$src))]>;
174 // Move Instructions. Register-to-register movss/movsd is not used for FR32/64
175 // register copies because it's a partial register update; FsMOVAPSrr/FsMOVAPDrr
176 // is used instead. Register-to-register movss/movsd is not modeled as an
177 // INSERT_SUBREG because INSERT_SUBREG requires that the insert be implementable
178 // in terms of a copy, and just mentioned, we don't use movss/movsd for copies.
179 let isAsmParserOnly = 1 in {
180 def VMOVSSrr : sse12_move_rr<FR32, v4f32,
181 "movss\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XS, VEX_4V;
182 def VMOVSDrr : sse12_move_rr<FR64, v2f64,
183 "movsd\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XD, VEX_4V;
185 let canFoldAsLoad = 1, isReMaterializable = 1 in {
186 def VMOVSSrm : sse12_move_rm<FR32, f32mem, loadf32, "movss">, XS, VEX;
188 let AddedComplexity = 20 in
189 def VMOVSDrm : sse12_move_rm<FR64, f64mem, loadf64, "movsd">, XD, VEX;
193 let Constraints = "$src1 = $dst" in {
194 def MOVSSrr : sse12_move_rr<FR32, v4f32,
195 "movss\t{$src2, $dst|$dst, $src2}">, XS;
196 def MOVSDrr : sse12_move_rr<FR64, v2f64,
197 "movsd\t{$src2, $dst|$dst, $src2}">, XD;
200 let canFoldAsLoad = 1, isReMaterializable = 1 in {
201 def MOVSSrm : sse12_move_rm<FR32, f32mem, loadf32, "movss">, XS;
203 let AddedComplexity = 20 in
204 def MOVSDrm : sse12_move_rm<FR64, f64mem, loadf64, "movsd">, XD;
207 let AddedComplexity = 15 in {
208 // Extract the low 32-bit value from one vector and insert it into another.
209 def : Pat<(v4f32 (movl VR128:$src1, VR128:$src2)),
210 (MOVSSrr (v4f32 VR128:$src1),
211 (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>;
212 // Extract the low 64-bit value from one vector and insert it into another.
213 def : Pat<(v2f64 (movl VR128:$src1, VR128:$src2)),
214 (MOVSDrr (v2f64 VR128:$src1),
215 (EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>;
218 // Implicitly promote a 32-bit scalar to a vector.
219 def : Pat<(v4f32 (scalar_to_vector FR32:$src)),
220 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src, sub_ss)>;
221 // Implicitly promote a 64-bit scalar to a vector.
222 def : Pat<(v2f64 (scalar_to_vector FR64:$src)),
223 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src, sub_sd)>;
225 let AddedComplexity = 20 in {
226 // MOVSSrm zeros the high parts of the register; represent this
227 // with SUBREG_TO_REG.
228 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
229 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
230 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
231 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
232 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
233 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
234 // MOVSDrm zeros the high parts of the register; represent this
235 // with SUBREG_TO_REG.
236 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
237 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
238 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
239 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
240 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
241 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
242 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
243 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
244 def : Pat<(v2f64 (X86vzload addr:$src)),
245 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
248 // Store scalar value to memory.
249 def MOVSSmr : SSI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
250 "movss\t{$src, $dst|$dst, $src}",
251 [(store FR32:$src, addr:$dst)]>;
252 def MOVSDmr : SDI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
253 "movsd\t{$src, $dst|$dst, $src}",
254 [(store FR64:$src, addr:$dst)]>;
256 let isAsmParserOnly = 1 in {
257 def VMOVSSmr : SI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
258 "movss\t{$src, $dst|$dst, $src}",
259 [(store FR32:$src, addr:$dst)]>, XS, VEX;
260 def VMOVSDmr : SI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
261 "movsd\t{$src, $dst|$dst, $src}",
262 [(store FR64:$src, addr:$dst)]>, XD, VEX;
265 // Extract and store.
266 def : Pat<(store (f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
269 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
270 def : Pat<(store (f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
273 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
275 // Move Aligned/Unaligned floating point values
276 multiclass sse12_mov_packed<bits<8> opc, RegisterClass RC,
277 X86MemOperand x86memop, PatFrag ld_frag,
278 string asm, Domain d,
279 bit IsReMaterializable = 1> {
280 let neverHasSideEffects = 1 in
281 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
282 !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], d>;
283 let canFoldAsLoad = 1, isReMaterializable = IsReMaterializable in
284 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
285 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
286 [(set RC:$dst, (ld_frag addr:$src))], d>;
289 let isAsmParserOnly = 1 in {
290 defm VMOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
291 "movaps", SSEPackedSingle>, VEX;
292 defm VMOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
293 "movapd", SSEPackedDouble>, OpSize, VEX;
294 defm VMOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
295 "movups", SSEPackedSingle>, VEX;
296 defm VMOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
297 "movupd", SSEPackedDouble, 0>, OpSize, VEX;
299 defm VMOVAPSY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv8f32,
300 "movaps", SSEPackedSingle>, VEX;
301 defm VMOVAPDY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv4f64,
302 "movapd", SSEPackedDouble>, OpSize, VEX;
303 defm VMOVUPSY : sse12_mov_packed<0x10, VR256, f256mem, loadv8f32,
304 "movups", SSEPackedSingle>, VEX;
305 defm VMOVUPDY : sse12_mov_packed<0x10, VR256, f256mem, loadv4f64,
306 "movupd", SSEPackedDouble, 0>, OpSize, VEX;
308 defm MOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
309 "movaps", SSEPackedSingle>, TB;
310 defm MOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
311 "movapd", SSEPackedDouble>, TB, OpSize;
312 defm MOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
313 "movups", SSEPackedSingle>, TB;
314 defm MOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
315 "movupd", SSEPackedDouble, 0>, TB, OpSize;
317 let isAsmParserOnly = 1 in {
318 def VMOVAPSmr : VPSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
319 "movaps\t{$src, $dst|$dst, $src}",
320 [(alignedstore (v4f32 VR128:$src), addr:$dst)]>, VEX;
321 def VMOVAPDmr : VPDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
322 "movapd\t{$src, $dst|$dst, $src}",
323 [(alignedstore (v2f64 VR128:$src), addr:$dst)]>, VEX;
324 def VMOVUPSmr : VPSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
325 "movups\t{$src, $dst|$dst, $src}",
326 [(store (v4f32 VR128:$src), addr:$dst)]>, VEX;
327 def VMOVUPDmr : VPDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
328 "movupd\t{$src, $dst|$dst, $src}",
329 [(store (v2f64 VR128:$src), addr:$dst)]>, VEX;
330 def VMOVAPSYmr : VPSI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
331 "movaps\t{$src, $dst|$dst, $src}",
332 [(alignedstore (v8f32 VR256:$src), addr:$dst)]>, VEX;
333 def VMOVAPDYmr : VPDI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
334 "movapd\t{$src, $dst|$dst, $src}",
335 [(alignedstore (v4f64 VR256:$src), addr:$dst)]>, VEX;
336 def VMOVUPSYmr : VPSI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
337 "movups\t{$src, $dst|$dst, $src}",
338 [(store (v8f32 VR256:$src), addr:$dst)]>, VEX;
339 def VMOVUPDYmr : VPDI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
340 "movupd\t{$src, $dst|$dst, $src}",
341 [(store (v4f64 VR256:$src), addr:$dst)]>, VEX;
344 def : Pat<(int_x86_avx_loadu_ps_256 addr:$src), (VMOVUPSYrm addr:$src)>;
345 def : Pat<(int_x86_avx_storeu_ps_256 addr:$dst, VR256:$src),
346 (VMOVUPSYmr addr:$dst, VR256:$src)>;
348 def : Pat<(int_x86_avx_loadu_pd_256 addr:$src), (VMOVUPDYrm addr:$src)>;
349 def : Pat<(int_x86_avx_storeu_pd_256 addr:$dst, VR256:$src),
350 (VMOVUPDYmr addr:$dst, VR256:$src)>;
352 def MOVAPSmr : PSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
353 "movaps\t{$src, $dst|$dst, $src}",
354 [(alignedstore (v4f32 VR128:$src), addr:$dst)]>;
355 def MOVAPDmr : PDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
356 "movapd\t{$src, $dst|$dst, $src}",
357 [(alignedstore (v2f64 VR128:$src), addr:$dst)]>;
358 def MOVUPSmr : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
359 "movups\t{$src, $dst|$dst, $src}",
360 [(store (v4f32 VR128:$src), addr:$dst)]>;
361 def MOVUPDmr : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
362 "movupd\t{$src, $dst|$dst, $src}",
363 [(store (v2f64 VR128:$src), addr:$dst)]>;
365 // Intrinsic forms of MOVUPS/D load and store
366 let isAsmParserOnly = 1 in {
367 let canFoldAsLoad = 1, isReMaterializable = 1 in
368 def VMOVUPSrm_Int : VPSI<0x10, MRMSrcMem, (outs VR128:$dst),
370 "movups\t{$src, $dst|$dst, $src}",
371 [(set VR128:$dst, (int_x86_sse_loadu_ps addr:$src))]>, VEX;
372 def VMOVUPDrm_Int : VPDI<0x10, MRMSrcMem, (outs VR128:$dst),
374 "movupd\t{$src, $dst|$dst, $src}",
375 [(set VR128:$dst, (int_x86_sse2_loadu_pd addr:$src))]>, VEX;
376 def VMOVUPSmr_Int : VPSI<0x11, MRMDestMem, (outs),
377 (ins f128mem:$dst, VR128:$src),
378 "movups\t{$src, $dst|$dst, $src}",
379 [(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>, VEX;
380 def VMOVUPDmr_Int : VPDI<0x11, MRMDestMem, (outs),
381 (ins f128mem:$dst, VR128:$src),
382 "movupd\t{$src, $dst|$dst, $src}",
383 [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>, VEX;
385 let canFoldAsLoad = 1, isReMaterializable = 1 in
386 def MOVUPSrm_Int : PSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
387 "movups\t{$src, $dst|$dst, $src}",
388 [(set VR128:$dst, (int_x86_sse_loadu_ps addr:$src))]>;
389 def MOVUPDrm_Int : PDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
390 "movupd\t{$src, $dst|$dst, $src}",
391 [(set VR128:$dst, (int_x86_sse2_loadu_pd addr:$src))]>;
393 def MOVUPSmr_Int : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
394 "movups\t{$src, $dst|$dst, $src}",
395 [(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>;
396 def MOVUPDmr_Int : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
397 "movupd\t{$src, $dst|$dst, $src}",
398 [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>;
400 // Move Low/High packed floating point values
401 multiclass sse12_mov_hilo_packed<bits<8>opc, RegisterClass RC,
402 PatFrag mov_frag, string base_opc,
404 def PSrm : PI<opc, MRMSrcMem,
405 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
406 !strconcat(!strconcat(base_opc,"s"), asm_opr),
409 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))))],
410 SSEPackedSingle>, TB;
412 def PDrm : PI<opc, MRMSrcMem,
413 (outs RC:$dst), (ins RC:$src1, f64mem:$src2),
414 !strconcat(!strconcat(base_opc,"d"), asm_opr),
415 [(set RC:$dst, (v2f64 (mov_frag RC:$src1,
416 (scalar_to_vector (loadf64 addr:$src2)))))],
417 SSEPackedDouble>, TB, OpSize;
420 let isAsmParserOnly = 1, AddedComplexity = 20 in {
421 defm VMOVL : sse12_mov_hilo_packed<0x12, VR128, movlp, "movlp",
422 "\t{$src2, $src1, $dst|$dst, $src1, $src2}">, VEX_4V;
423 defm VMOVH : sse12_mov_hilo_packed<0x16, VR128, movlhps, "movhp",
424 "\t{$src2, $src1, $dst|$dst, $src1, $src2}">, VEX_4V;
426 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
427 defm MOVL : sse12_mov_hilo_packed<0x12, VR128, movlp, "movlp",
428 "\t{$src2, $dst|$dst, $src2}">;
429 defm MOVH : sse12_mov_hilo_packed<0x16, VR128, movlhps, "movhp",
430 "\t{$src2, $dst|$dst, $src2}">;
433 let isAsmParserOnly = 1 in {
434 def VMOVLPSmr : VPSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
435 "movlps\t{$src, $dst|$dst, $src}",
436 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
437 (iPTR 0))), addr:$dst)]>, VEX;
438 def VMOVLPDmr : VPDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
439 "movlpd\t{$src, $dst|$dst, $src}",
440 [(store (f64 (vector_extract (v2f64 VR128:$src),
441 (iPTR 0))), addr:$dst)]>, VEX;
443 def MOVLPSmr : PSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
444 "movlps\t{$src, $dst|$dst, $src}",
445 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
446 (iPTR 0))), addr:$dst)]>;
447 def MOVLPDmr : PDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
448 "movlpd\t{$src, $dst|$dst, $src}",
449 [(store (f64 (vector_extract (v2f64 VR128:$src),
450 (iPTR 0))), addr:$dst)]>;
452 // v2f64 extract element 1 is always custom lowered to unpack high to low
453 // and extract element 0 so the non-store version isn't too horrible.
454 let isAsmParserOnly = 1 in {
455 def VMOVHPSmr : VPSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
456 "movhps\t{$src, $dst|$dst, $src}",
457 [(store (f64 (vector_extract
458 (unpckh (bc_v2f64 (v4f32 VR128:$src)),
459 (undef)), (iPTR 0))), addr:$dst)]>,
461 def VMOVHPDmr : VPDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
462 "movhpd\t{$src, $dst|$dst, $src}",
463 [(store (f64 (vector_extract
464 (v2f64 (unpckh VR128:$src, (undef))),
465 (iPTR 0))), addr:$dst)]>,
468 def MOVHPSmr : PSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
469 "movhps\t{$src, $dst|$dst, $src}",
470 [(store (f64 (vector_extract
471 (unpckh (bc_v2f64 (v4f32 VR128:$src)),
472 (undef)), (iPTR 0))), addr:$dst)]>;
473 def MOVHPDmr : PDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
474 "movhpd\t{$src, $dst|$dst, $src}",
475 [(store (f64 (vector_extract
476 (v2f64 (unpckh VR128:$src, (undef))),
477 (iPTR 0))), addr:$dst)]>;
479 let isAsmParserOnly = 1, AddedComplexity = 20 in {
480 def VMOVLHPSrr : VPSI<0x16, MRMSrcReg, (outs VR128:$dst),
481 (ins VR128:$src1, VR128:$src2),
482 "movlhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
484 (v4f32 (movlhps VR128:$src1, VR128:$src2)))]>,
486 def VMOVHLPSrr : VPSI<0x12, MRMSrcReg, (outs VR128:$dst),
487 (ins VR128:$src1, VR128:$src2),
488 "movhlps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
490 (v4f32 (movhlps VR128:$src1, VR128:$src2)))]>,
493 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
494 def MOVLHPSrr : PSI<0x16, MRMSrcReg, (outs VR128:$dst),
495 (ins VR128:$src1, VR128:$src2),
496 "movlhps\t{$src2, $dst|$dst, $src2}",
498 (v4f32 (movlhps VR128:$src1, VR128:$src2)))]>;
499 def MOVHLPSrr : PSI<0x12, MRMSrcReg, (outs VR128:$dst),
500 (ins VR128:$src1, VR128:$src2),
501 "movhlps\t{$src2, $dst|$dst, $src2}",
503 (v4f32 (movhlps VR128:$src1, VR128:$src2)))]>;
506 def : Pat<(movlhps VR128:$src1, (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
507 (MOVHPSrm (v4i32 VR128:$src1), addr:$src2)>;
508 let AddedComplexity = 20 in {
509 def : Pat<(v4f32 (movddup VR128:$src, (undef))),
510 (MOVLHPSrr (v4f32 VR128:$src), (v4f32 VR128:$src))>;
511 def : Pat<(v2i64 (movddup VR128:$src, (undef))),
512 (MOVLHPSrr (v2i64 VR128:$src), (v2i64 VR128:$src))>;
515 //===----------------------------------------------------------------------===//
516 // SSE 1 & 2 - Conversion Instructions
517 //===----------------------------------------------------------------------===//
519 multiclass sse12_cvt_s<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
520 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
522 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
523 [(set DstRC:$dst, (OpNode SrcRC:$src))]>;
524 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
525 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))]>;
528 multiclass sse12_cvt_s_np<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
529 X86MemOperand x86memop, string asm> {
530 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
532 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
536 multiclass sse12_cvt_p<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
537 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
538 string asm, Domain d> {
539 def rr : PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
540 [(set DstRC:$dst, (OpNode SrcRC:$src))], d>;
541 def rm : PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
542 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))], d>;
545 multiclass sse12_vcvt_avx<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
546 X86MemOperand x86memop, string asm> {
547 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src),
548 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>;
549 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
550 (ins DstRC:$src1, x86memop:$src),
551 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>;
554 let isAsmParserOnly = 1 in {
555 defm VCVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
556 "cvttss2si\t{$src, $dst|$dst, $src}">, XS, VEX;
557 defm VCVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
558 "cvttss2si\t{$src, $dst|$dst, $src}">, XS, VEX,
560 defm VCVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
561 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD, VEX;
562 defm VCVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
563 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD,
566 // The assembler can recognize rr 64-bit instructions by seeing a rxx
567 // register, but the same isn't true when only using memory operands,
568 // provide other assembly "l" and "q" forms to address this explicitly
569 // where appropriate to do so.
570 defm VCVTSI2SS : sse12_vcvt_avx<0x2A, GR32, FR32, i32mem, "cvtsi2ss">, XS,
572 defm VCVTSI2SS64 : sse12_vcvt_avx<0x2A, GR64, FR32, i64mem, "cvtsi2ss{q}">, XS,
574 defm VCVTSI2SD : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd">, XD,
576 defm VCVTSI2SDL : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd{l}">, XD,
578 defm VCVTSI2SD64 : sse12_vcvt_avx<0x2A, GR64, FR64, i64mem, "cvtsi2sd{q}">, XD,
582 defm CVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
583 "cvttss2si\t{$src, $dst|$dst, $src}">, XS;
584 defm CVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
585 "cvttss2si{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
586 defm CVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
587 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD;
588 defm CVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
589 "cvttsd2si{q}\t{$src, $dst|$dst, $src}">, XD, REX_W;
590 defm CVTSI2SS : sse12_cvt_s<0x2A, GR32, FR32, sint_to_fp, i32mem, loadi32,
591 "cvtsi2ss\t{$src, $dst|$dst, $src}">, XS;
592 defm CVTSI2SS64 : sse12_cvt_s<0x2A, GR64, FR32, sint_to_fp, i64mem, loadi64,
593 "cvtsi2ss{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
594 defm CVTSI2SD : sse12_cvt_s<0x2A, GR32, FR64, sint_to_fp, i32mem, loadi32,
595 "cvtsi2sd\t{$src, $dst|$dst, $src}">, XD;
596 defm CVTSI2SD64 : sse12_cvt_s<0x2A, GR64, FR64, sint_to_fp, i64mem, loadi64,
597 "cvtsi2sd{q}\t{$src, $dst|$dst, $src}">, XD, REX_W;
599 // Conversion Instructions Intrinsics - Match intrinsics which expect MM
600 // and/or XMM operand(s).
601 multiclass sse12_cvt_pint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
602 Intrinsic Int, X86MemOperand x86memop, PatFrag ld_frag,
603 string asm, Domain d> {
604 def rr : PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
605 [(set DstRC:$dst, (Int SrcRC:$src))], d>;
606 def rm : PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
607 [(set DstRC:$dst, (Int (ld_frag addr:$src)))], d>;
610 multiclass sse12_cvt_sint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
611 Intrinsic Int, X86MemOperand x86memop, PatFrag ld_frag,
613 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
614 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
615 [(set DstRC:$dst, (Int SrcRC:$src))]>;
616 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
617 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
618 [(set DstRC:$dst, (Int (ld_frag addr:$src)))]>;
621 multiclass sse12_cvt_pint_3addr<bits<8> opc, RegisterClass SrcRC,
622 RegisterClass DstRC, Intrinsic Int, X86MemOperand x86memop,
623 PatFrag ld_frag, string asm, Domain d> {
624 def rr : PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src2),
625 asm, [(set DstRC:$dst, (Int DstRC:$src1, SrcRC:$src2))], d>;
626 def rm : PI<opc, MRMSrcMem, (outs DstRC:$dst),
627 (ins DstRC:$src1, x86memop:$src2), asm,
628 [(set DstRC:$dst, (Int DstRC:$src1, (ld_frag addr:$src2)))], d>;
631 multiclass sse12_cvt_sint_3addr<bits<8> opc, RegisterClass SrcRC,
632 RegisterClass DstRC, Intrinsic Int, X86MemOperand x86memop,
633 PatFrag ld_frag, string asm, bit Is2Addr = 1> {
634 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src2),
636 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
637 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
638 [(set DstRC:$dst, (Int DstRC:$src1, SrcRC:$src2))]>;
639 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
640 (ins DstRC:$src1, x86memop:$src2),
642 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
643 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
644 [(set DstRC:$dst, (Int DstRC:$src1, (ld_frag addr:$src2)))]>;
647 let isAsmParserOnly = 1 in {
648 defm Int_VCVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
649 f32mem, load, "cvtss2si">, XS, VEX;
650 defm Int_VCVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64,
651 int_x86_sse_cvtss2si64, f32mem, load, "cvtss2si">,
653 defm Int_VCVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
654 f128mem, load, "cvtsd2si">, XD, VEX;
655 defm Int_VCVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64,
656 int_x86_sse2_cvtsd2si64, f128mem, load, "cvtsd2si">,
659 // FIXME: The asm matcher has a hack to ignore instructions with _Int and Int_
660 // Get rid of this hack or rename the intrinsics, there are several
661 // intructions that only match with the intrinsic form, why create duplicates
662 // to let them be recognized by the assembler?
663 defm VCVTSD2SI_alt : sse12_cvt_s_np<0x2D, FR64, GR32, f64mem,
664 "cvtsd2si\t{$src, $dst|$dst, $src}">, XD, VEX;
665 defm VCVTSD2SI64 : sse12_cvt_s_np<0x2D, FR64, GR64, f64mem,
666 "cvtsd2si\t{$src, $dst|$dst, $src}">, XD, VEX, VEX_W;
668 defm Int_CVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
669 f32mem, load, "cvtss2si">, XS;
670 defm Int_CVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse_cvtss2si64,
671 f32mem, load, "cvtss2si{q}">, XS, REX_W;
672 defm Int_CVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
673 f128mem, load, "cvtsd2si">, XD;
674 defm Int_CVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse2_cvtsd2si64,
675 f128mem, load, "cvtsd2si">, XD, REX_W;
677 defm CVTSD2SI64 : sse12_cvt_s_np<0x2D, VR128, GR64, f64mem, "cvtsd2si{q}">, XD,
680 let isAsmParserOnly = 1 in {
681 defm Int_VCVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
682 int_x86_sse_cvtsi2ss, i32mem, loadi32, "cvtsi2ss", 0>, XS, VEX_4V;
683 defm Int_VCVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
684 int_x86_sse_cvtsi642ss, i64mem, loadi64, "cvtsi2ss", 0>, XS, VEX_4V,
686 defm Int_VCVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
687 int_x86_sse2_cvtsi2sd, i32mem, loadi32, "cvtsi2sd", 0>, XD, VEX_4V;
688 defm Int_VCVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
689 int_x86_sse2_cvtsi642sd, i64mem, loadi64, "cvtsi2sd", 0>, XD,
693 let Constraints = "$src1 = $dst" in {
694 defm Int_CVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
695 int_x86_sse_cvtsi2ss, i32mem, loadi32,
697 defm Int_CVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
698 int_x86_sse_cvtsi642ss, i64mem, loadi64,
699 "cvtsi2ss{q}">, XS, REX_W;
700 defm Int_CVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
701 int_x86_sse2_cvtsi2sd, i32mem, loadi32,
703 defm Int_CVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
704 int_x86_sse2_cvtsi642sd, i64mem, loadi64,
705 "cvtsi2sd">, XD, REX_W;
708 // Instructions below don't have an AVX form.
709 defm Int_CVTPS2PI : sse12_cvt_pint<0x2D, VR128, VR64, int_x86_sse_cvtps2pi,
710 f64mem, load, "cvtps2pi\t{$src, $dst|$dst, $src}",
711 SSEPackedSingle>, TB;
712 defm Int_CVTPD2PI : sse12_cvt_pint<0x2D, VR128, VR64, int_x86_sse_cvtpd2pi,
713 f128mem, memop, "cvtpd2pi\t{$src, $dst|$dst, $src}",
714 SSEPackedDouble>, TB, OpSize;
715 defm Int_CVTTPS2PI : sse12_cvt_pint<0x2C, VR128, VR64, int_x86_sse_cvttps2pi,
716 f64mem, load, "cvttps2pi\t{$src, $dst|$dst, $src}",
717 SSEPackedSingle>, TB;
718 defm Int_CVTTPD2PI : sse12_cvt_pint<0x2C, VR128, VR64, int_x86_sse_cvttpd2pi,
719 f128mem, memop, "cvttpd2pi\t{$src, $dst|$dst, $src}",
720 SSEPackedDouble>, TB, OpSize;
721 defm Int_CVTPI2PD : sse12_cvt_pint<0x2A, VR64, VR128, int_x86_sse_cvtpi2pd,
722 i64mem, load, "cvtpi2pd\t{$src, $dst|$dst, $src}",
723 SSEPackedDouble>, TB, OpSize;
724 let Constraints = "$src1 = $dst" in {
725 defm Int_CVTPI2PS : sse12_cvt_pint_3addr<0x2A, VR64, VR128,
726 int_x86_sse_cvtpi2ps,
727 i64mem, load, "cvtpi2ps\t{$src2, $dst|$dst, $src2}",
728 SSEPackedSingle>, TB;
733 // Aliases for intrinsics
734 let isAsmParserOnly = 1 in {
735 defm Int_VCVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
736 f32mem, load, "cvttss2si">, XS, VEX;
737 defm Int_VCVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
738 int_x86_sse_cvttss2si64, f32mem, load,
739 "cvttss2si">, XS, VEX, VEX_W;
740 defm Int_VCVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
741 f128mem, load, "cvttss2si">, XD, VEX;
742 defm Int_VCVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
743 int_x86_sse2_cvttsd2si64, f128mem, load,
744 "cvttss2si">, XD, VEX, VEX_W;
746 defm Int_CVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
747 f32mem, load, "cvttss2si">, XS;
748 defm Int_CVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
749 int_x86_sse_cvttss2si64, f32mem, load,
750 "cvttss2si{q}">, XS, REX_W;
751 defm Int_CVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
752 f128mem, load, "cvttss2si">, XD;
753 defm Int_CVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
754 int_x86_sse2_cvttsd2si64, f128mem, load,
755 "cvttss2si{q}">, XD, REX_W;
757 let isAsmParserOnly = 1, Pattern = []<dag> in {
758 defm VCVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load,
759 "cvtss2si{l}\t{$src, $dst|$dst, $src}">, XS, VEX;
760 defm VCVTSS2SI64 : sse12_cvt_s<0x2D, FR32, GR64, undef, f32mem, load,
761 "cvtss2si\t{$src, $dst|$dst, $src}">, XS, VEX,
763 defm VCVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, i128mem, load,
764 "cvtdq2ps\t{$src, $dst|$dst, $src}",
765 SSEPackedSingle>, TB, VEX;
766 defm VCVTDQ2PSY : sse12_cvt_p<0x5B, VR256, VR256, undef, i256mem, load,
767 "cvtdq2ps\t{$src, $dst|$dst, $src}",
768 SSEPackedSingle>, TB, VEX;
770 let Pattern = []<dag> in {
771 defm CVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load /*dummy*/,
772 "cvtss2si{l}\t{$src, $dst|$dst, $src}">, XS;
773 defm CVTSS2SI64 : sse12_cvt_s<0x2D, FR32, GR64, undef, f32mem, load /*dummy*/,
774 "cvtss2si{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
775 defm CVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, i128mem, load /*dummy*/,
776 "cvtdq2ps\t{$src, $dst|$dst, $src}",
777 SSEPackedSingle>, TB; /* PD SSE3 form is avaiable */
782 // Convert scalar double to scalar single
783 let isAsmParserOnly = 1 in {
784 def VCVTSD2SSrr : VSDI<0x5A, MRMSrcReg, (outs FR32:$dst),
785 (ins FR64:$src1, FR64:$src2),
786 "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
788 def VCVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst),
789 (ins FR64:$src1, f64mem:$src2),
790 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
791 []>, XD, Requires<[HasAVX, OptForSize]>, VEX_4V;
793 def CVTSD2SSrr : SDI<0x5A, MRMSrcReg, (outs FR32:$dst), (ins FR64:$src),
794 "cvtsd2ss\t{$src, $dst|$dst, $src}",
795 [(set FR32:$dst, (fround FR64:$src))]>;
796 def CVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src),
797 "cvtsd2ss\t{$src, $dst|$dst, $src}",
798 [(set FR32:$dst, (fround (loadf64 addr:$src)))]>, XD,
799 Requires<[HasSSE2, OptForSize]>;
801 let isAsmParserOnly = 1 in
802 defm Int_VCVTSD2SS: sse12_cvt_sint_3addr<0x5A, VR128, VR128,
803 int_x86_sse2_cvtsd2ss, f64mem, load, "cvtsd2ss", 0>,
805 let Constraints = "$src1 = $dst" in
806 defm Int_CVTSD2SS: sse12_cvt_sint_3addr<0x5A, VR128, VR128,
807 int_x86_sse2_cvtsd2ss, f64mem, load, "cvtsd2ss">, XS;
809 // Convert scalar single to scalar double
810 let isAsmParserOnly = 1 in { // SSE2 instructions with XS prefix
811 def VCVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst),
812 (ins FR32:$src1, FR32:$src2),
813 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
814 []>, XS, Requires<[HasAVX]>, VEX_4V;
815 def VCVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst),
816 (ins FR32:$src1, f32mem:$src2),
817 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
818 []>, XS, VEX_4V, Requires<[HasAVX, OptForSize]>;
820 def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src),
821 "cvtss2sd\t{$src, $dst|$dst, $src}",
822 [(set FR64:$dst, (fextend FR32:$src))]>, XS,
824 def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src),
825 "cvtss2sd\t{$src, $dst|$dst, $src}",
826 [(set FR64:$dst, (extloadf32 addr:$src))]>, XS,
827 Requires<[HasSSE2, OptForSize]>;
829 let isAsmParserOnly = 1 in {
830 def Int_VCVTSS2SDrr: I<0x5A, MRMSrcReg,
831 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
832 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
833 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
834 VR128:$src2))]>, XS, VEX_4V,
836 def Int_VCVTSS2SDrm: I<0x5A, MRMSrcMem,
837 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
838 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
839 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
840 (load addr:$src2)))]>, XS, VEX_4V,
843 let Constraints = "$src1 = $dst" in { // SSE2 instructions with XS prefix
844 def Int_CVTSS2SDrr: I<0x5A, MRMSrcReg,
845 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
846 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
847 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
850 def Int_CVTSS2SDrm: I<0x5A, MRMSrcMem,
851 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
852 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
853 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
854 (load addr:$src2)))]>, XS,
858 def : Pat<(extloadf32 addr:$src),
859 (CVTSS2SDrr (MOVSSrm addr:$src))>,
860 Requires<[HasSSE2, OptForSpeed]>;
862 // Convert doubleword to packed single/double fp
863 let isAsmParserOnly = 1 in { // SSE2 instructions without OpSize prefix
864 def Int_VCVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
865 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
866 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
867 TB, VEX, Requires<[HasAVX]>;
868 def Int_VCVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
869 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
870 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps
871 (bitconvert (memopv2i64 addr:$src))))]>,
872 TB, VEX, Requires<[HasAVX]>;
874 def Int_CVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
875 "cvtdq2ps\t{$src, $dst|$dst, $src}",
876 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
877 TB, Requires<[HasSSE2]>;
878 def Int_CVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
879 "cvtdq2ps\t{$src, $dst|$dst, $src}",
880 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps
881 (bitconvert (memopv2i64 addr:$src))))]>,
882 TB, Requires<[HasSSE2]>;
884 // FIXME: why the non-intrinsic version is described as SSE3?
885 let isAsmParserOnly = 1 in { // SSE2 instructions with XS prefix
886 def Int_VCVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
887 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
888 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
889 XS, VEX, Requires<[HasAVX]>;
890 def Int_VCVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
891 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
892 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd
893 (bitconvert (memopv2i64 addr:$src))))]>,
894 XS, VEX, Requires<[HasAVX]>;
896 def Int_CVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
897 "cvtdq2pd\t{$src, $dst|$dst, $src}",
898 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
899 XS, Requires<[HasSSE2]>;
900 def Int_CVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
901 "cvtdq2pd\t{$src, $dst|$dst, $src}",
902 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd
903 (bitconvert (memopv2i64 addr:$src))))]>,
904 XS, Requires<[HasSSE2]>;
907 // Convert packed single/double fp to doubleword
908 let isAsmParserOnly = 1 in {
909 def VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
910 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
911 def VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
912 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
913 def VCVTPS2DQYrr : VPDI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
914 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
915 def VCVTPS2DQYrm : VPDI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
916 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
918 def CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
919 "cvtps2dq\t{$src, $dst|$dst, $src}", []>;
920 def CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
921 "cvtps2dq\t{$src, $dst|$dst, $src}", []>;
923 let isAsmParserOnly = 1 in {
924 def Int_VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
925 "cvtps2dq\t{$src, $dst|$dst, $src}",
926 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>,
928 def Int_VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst),
930 "cvtps2dq\t{$src, $dst|$dst, $src}",
931 [(set VR128:$dst, (int_x86_sse2_cvtps2dq
932 (memop addr:$src)))]>, VEX;
934 def Int_CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
935 "cvtps2dq\t{$src, $dst|$dst, $src}",
936 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>;
937 def Int_CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
938 "cvtps2dq\t{$src, $dst|$dst, $src}",
939 [(set VR128:$dst, (int_x86_sse2_cvtps2dq
940 (memop addr:$src)))]>;
942 let isAsmParserOnly = 1 in { // SSE2 packed instructions with XD prefix
943 def Int_VCVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
944 "vcvtpd2dq\t{$src, $dst|$dst, $src}",
945 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
946 XD, VEX, Requires<[HasAVX]>;
947 def Int_VCVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
948 "vcvtpd2dq\t{$src, $dst|$dst, $src}",
949 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
950 (memop addr:$src)))]>,
951 XD, VEX, Requires<[HasAVX]>;
953 def Int_CVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
954 "cvtpd2dq\t{$src, $dst|$dst, $src}",
955 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
956 XD, Requires<[HasSSE2]>;
957 def Int_CVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
958 "cvtpd2dq\t{$src, $dst|$dst, $src}",
959 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
960 (memop addr:$src)))]>,
961 XD, Requires<[HasSSE2]>;
964 // Convert with truncation packed single/double fp to doubleword
965 let isAsmParserOnly = 1 in { // SSE2 packed instructions with XS prefix
966 def VCVTTPS2DQrr : VSSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
967 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
968 def VCVTTPS2DQrm : VSSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
969 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
970 def VCVTTPS2DQYrr : VSSI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
971 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
972 def VCVTTPS2DQYrm : VSSI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
973 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
975 def CVTTPS2DQrr : SSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
976 "cvttps2dq\t{$src, $dst|$dst, $src}", []>;
977 def CVTTPS2DQrm : SSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
978 "cvttps2dq\t{$src, $dst|$dst, $src}", []>;
981 let isAsmParserOnly = 1 in {
982 def Int_VCVTTPS2DQrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
983 "vcvttps2dq\t{$src, $dst|$dst, $src}",
985 (int_x86_sse2_cvttps2dq VR128:$src))]>,
986 XS, VEX, Requires<[HasAVX]>;
987 def Int_VCVTTPS2DQrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
988 "vcvttps2dq\t{$src, $dst|$dst, $src}",
989 [(set VR128:$dst, (int_x86_sse2_cvttps2dq
990 (memop addr:$src)))]>,
991 XS, VEX, Requires<[HasAVX]>;
993 def Int_CVTTPS2DQrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
994 "cvttps2dq\t{$src, $dst|$dst, $src}",
996 (int_x86_sse2_cvttps2dq VR128:$src))]>,
997 XS, Requires<[HasSSE2]>;
998 def Int_CVTTPS2DQrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
999 "cvttps2dq\t{$src, $dst|$dst, $src}",
1000 [(set VR128:$dst, (int_x86_sse2_cvttps2dq
1001 (memop addr:$src)))]>,
1002 XS, Requires<[HasSSE2]>;
1004 let isAsmParserOnly = 1 in {
1005 def Int_VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst),
1007 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1008 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>,
1010 def Int_VCVTTPD2DQrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst),
1012 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1013 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
1014 (memop addr:$src)))]>, VEX;
1016 def Int_CVTTPD2DQrr : PDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1017 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1018 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>;
1019 def Int_CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst),(ins f128mem:$src),
1020 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1021 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
1022 (memop addr:$src)))]>;
1024 let isAsmParserOnly = 1 in {
1025 // The assembler can recognize rr 256-bit instructions by seeing a ymm
1026 // register, but the same isn't true when using memory operands instead.
1027 // Provide other assembly rr and rm forms to address this explicitly.
1028 def VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1029 "cvttpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1030 def VCVTTPD2DQXrYr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1031 "cvttpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1034 def VCVTTPD2DQXrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1035 "cvttpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
1036 def VCVTTPD2DQXrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1037 "cvttpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
1040 def VCVTTPD2DQYrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1041 "cvttpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX;
1042 def VCVTTPD2DQYrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
1043 "cvttpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
1046 // Convert packed single to packed double
1047 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
1048 // SSE2 instructions without OpSize prefix
1049 def VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1050 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
1051 def VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1052 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
1053 def VCVTPS2PDYrr : I<0x5A, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
1054 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
1055 def VCVTPS2PDYrm : I<0x5A, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
1056 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
1058 def CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1059 "cvtps2pd\t{$src, $dst|$dst, $src}", []>, TB;
1060 def CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1061 "cvtps2pd\t{$src, $dst|$dst, $src}", []>, TB;
1063 let isAsmParserOnly = 1 in {
1064 def Int_VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1065 "vcvtps2pd\t{$src, $dst|$dst, $src}",
1066 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
1067 VEX, Requires<[HasAVX]>;
1068 def Int_VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1069 "vcvtps2pd\t{$src, $dst|$dst, $src}",
1070 [(set VR128:$dst, (int_x86_sse2_cvtps2pd
1071 (load addr:$src)))]>,
1072 VEX, Requires<[HasAVX]>;
1074 def Int_CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1075 "cvtps2pd\t{$src, $dst|$dst, $src}",
1076 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
1077 TB, Requires<[HasSSE2]>;
1078 def Int_CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1079 "cvtps2pd\t{$src, $dst|$dst, $src}",
1080 [(set VR128:$dst, (int_x86_sse2_cvtps2pd
1081 (load addr:$src)))]>,
1082 TB, Requires<[HasSSE2]>;
1084 // Convert packed double to packed single
1085 let isAsmParserOnly = 1 in {
1086 // The assembler can recognize rr 256-bit instructions by seeing a ymm
1087 // register, but the same isn't true when using memory operands instead.
1088 // Provide other assembly rr and rm forms to address this explicitly.
1089 def VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1090 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>, VEX;
1091 def VCVTPD2PSXrYr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1092 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>, VEX;
1095 def VCVTPD2PSXrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1096 "cvtpd2psx\t{$src, $dst|$dst, $src}", []>, VEX;
1097 def VCVTPD2PSXrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1098 "cvtpd2psx\t{$src, $dst|$dst, $src}", []>, VEX;
1101 def VCVTPD2PSYrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1102 "cvtpd2psy\t{$src, $dst|$dst, $src}", []>, VEX;
1103 def VCVTPD2PSYrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
1104 "cvtpd2psy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
1106 def CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1107 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>;
1108 def CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1109 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>;
1112 let isAsmParserOnly = 1 in {
1113 def Int_VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1114 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1115 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
1116 def Int_VCVTPD2PSrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst),
1118 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1119 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps
1120 (memop addr:$src)))]>;
1122 def Int_CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1123 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1124 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
1125 def Int_CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1126 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1127 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps
1128 (memop addr:$src)))]>;
1130 // AVX 256-bit register conversion intrinsics
1131 // FIXME: Migrate SSE conversion intrinsics matching to use patterns as below
1132 // whenever possible to avoid declaring two versions of each one.
1133 def : Pat<(int_x86_avx_cvtdq2_ps_256 VR256:$src),
1134 (VCVTDQ2PSYrr VR256:$src)>;
1135 def : Pat<(int_x86_avx_cvtdq2_ps_256 (memopv8i32 addr:$src)),
1136 (VCVTDQ2PSYrm addr:$src)>;
1138 def : Pat<(int_x86_avx_cvt_pd2_ps_256 VR256:$src),
1139 (VCVTPD2PSYrr VR256:$src)>;
1140 def : Pat<(int_x86_avx_cvt_pd2_ps_256 (memopv4f64 addr:$src)),
1141 (VCVTPD2PSYrm addr:$src)>;
1143 def : Pat<(int_x86_avx_cvt_ps2dq_256 VR256:$src),
1144 (VCVTPS2DQYrr VR256:$src)>;
1145 def : Pat<(int_x86_avx_cvt_ps2dq_256 (memopv8f32 addr:$src)),
1146 (VCVTPS2DQYrm addr:$src)>;
1148 def : Pat<(int_x86_avx_cvt_ps2_pd_256 VR128:$src),
1149 (VCVTPS2PDYrr VR128:$src)>;
1150 def : Pat<(int_x86_avx_cvt_ps2_pd_256 (memopv4f32 addr:$src)),
1151 (VCVTPS2PDYrm addr:$src)>;
1153 def : Pat<(int_x86_avx_cvtt_pd2dq_256 VR256:$src),
1154 (VCVTTPD2DQYrr VR256:$src)>;
1155 def : Pat<(int_x86_avx_cvtt_pd2dq_256 (memopv4f64 addr:$src)),
1156 (VCVTTPD2DQYrm addr:$src)>;
1158 def : Pat<(int_x86_avx_cvtt_ps2dq_256 VR256:$src),
1159 (VCVTTPS2DQYrr VR256:$src)>;
1160 def : Pat<(int_x86_avx_cvtt_ps2dq_256 (memopv8f32 addr:$src)),
1161 (VCVTTPS2DQYrm addr:$src)>;
1163 //===----------------------------------------------------------------------===//
1164 // SSE 1 & 2 - Compare Instructions
1165 //===----------------------------------------------------------------------===//
1167 // sse12_cmp_scalar - sse 1 & 2 compare scalar instructions
1168 multiclass sse12_cmp_scalar<RegisterClass RC, X86MemOperand x86memop,
1169 string asm, string asm_alt> {
1170 def rr : SIi8<0xC2, MRMSrcReg,
1171 (outs RC:$dst), (ins RC:$src1, RC:$src, SSECC:$cc),
1174 def rm : SIi8<0xC2, MRMSrcMem,
1175 (outs RC:$dst), (ins RC:$src1, x86memop:$src, SSECC:$cc),
1177 // Accept explicit immediate argument form instead of comparison code.
1178 let isAsmParserOnly = 1 in {
1179 def rr_alt : SIi8<0xC2, MRMSrcReg,
1180 (outs RC:$dst), (ins RC:$src1, RC:$src, i8imm:$src2),
1183 def rm_alt : SIi8<0xC2, MRMSrcMem,
1184 (outs RC:$dst), (ins RC:$src1, x86memop:$src, i8imm:$src2),
1189 let neverHasSideEffects = 1, isAsmParserOnly = 1 in {
1190 defm VCMPSS : sse12_cmp_scalar<FR32, f32mem,
1191 "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}",
1192 "cmpss\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}">,
1194 defm VCMPSD : sse12_cmp_scalar<FR64, f64mem,
1195 "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}",
1196 "cmpsd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}">,
1200 let Constraints = "$src1 = $dst", neverHasSideEffects = 1 in {
1201 defm CMPSS : sse12_cmp_scalar<FR32, f32mem,
1202 "cmp${cc}ss\t{$src, $dst|$dst, $src}",
1203 "cmpss\t{$src2, $src, $dst|$dst, $src, $src2}">, XS;
1204 defm CMPSD : sse12_cmp_scalar<FR64, f64mem,
1205 "cmp${cc}sd\t{$src, $dst|$dst, $src}",
1206 "cmpsd\t{$src2, $src, $dst|$dst, $src, $src2}">, XD;
1209 multiclass sse12_cmp_scalar_int<RegisterClass RC, X86MemOperand x86memop,
1210 Intrinsic Int, string asm> {
1211 def rr : SIi8<0xC2, MRMSrcReg, (outs VR128:$dst),
1212 (ins VR128:$src1, VR128:$src, SSECC:$cc), asm,
1213 [(set VR128:$dst, (Int VR128:$src1,
1214 VR128:$src, imm:$cc))]>;
1215 def rm : SIi8<0xC2, MRMSrcMem, (outs VR128:$dst),
1216 (ins VR128:$src1, f32mem:$src, SSECC:$cc), asm,
1217 [(set VR128:$dst, (Int VR128:$src1,
1218 (load addr:$src), imm:$cc))]>;
1221 // Aliases to match intrinsics which expect XMM operand(s).
1222 let isAsmParserOnly = 1 in {
1223 defm Int_VCMPSS : sse12_cmp_scalar_int<VR128, f32mem, int_x86_sse_cmp_ss,
1224 "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}">,
1226 defm Int_VCMPSD : sse12_cmp_scalar_int<VR128, f64mem, int_x86_sse2_cmp_sd,
1227 "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}">,
1230 let Constraints = "$src1 = $dst" in {
1231 defm Int_CMPSS : sse12_cmp_scalar_int<VR128, f32mem, int_x86_sse_cmp_ss,
1232 "cmp${cc}ss\t{$src, $dst|$dst, $src}">, XS;
1233 defm Int_CMPSD : sse12_cmp_scalar_int<VR128, f64mem, int_x86_sse2_cmp_sd,
1234 "cmp${cc}sd\t{$src, $dst|$dst, $src}">, XD;
1238 // sse12_ord_cmp - Unordered/Ordered scalar fp compare and set EFLAGS
1239 multiclass sse12_ord_cmp<bits<8> opc, RegisterClass RC, SDNode OpNode,
1240 ValueType vt, X86MemOperand x86memop,
1241 PatFrag ld_frag, string OpcodeStr, Domain d> {
1242 def rr: PI<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
1243 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
1244 [(set EFLAGS, (OpNode (vt RC:$src1), RC:$src2))], d>;
1245 def rm: PI<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
1246 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
1247 [(set EFLAGS, (OpNode (vt RC:$src1),
1248 (ld_frag addr:$src2)))], d>;
1251 let Defs = [EFLAGS] in {
1252 let isAsmParserOnly = 1 in {
1253 defm VUCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
1254 "ucomiss", SSEPackedSingle>, VEX;
1255 defm VUCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
1256 "ucomisd", SSEPackedDouble>, OpSize, VEX;
1257 let Pattern = []<dag> in {
1258 defm VCOMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
1259 "comiss", SSEPackedSingle>, VEX;
1260 defm VCOMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
1261 "comisd", SSEPackedDouble>, OpSize, VEX;
1264 defm Int_VUCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
1265 load, "ucomiss", SSEPackedSingle>, VEX;
1266 defm Int_VUCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
1267 load, "ucomisd", SSEPackedDouble>, OpSize, VEX;
1269 defm Int_VCOMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem,
1270 load, "comiss", SSEPackedSingle>, VEX;
1271 defm Int_VCOMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem,
1272 load, "comisd", SSEPackedDouble>, OpSize, VEX;
1274 defm UCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
1275 "ucomiss", SSEPackedSingle>, TB;
1276 defm UCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
1277 "ucomisd", SSEPackedDouble>, TB, OpSize;
1279 let Pattern = []<dag> in {
1280 defm COMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
1281 "comiss", SSEPackedSingle>, TB;
1282 defm COMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
1283 "comisd", SSEPackedDouble>, TB, OpSize;
1286 defm Int_UCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
1287 load, "ucomiss", SSEPackedSingle>, TB;
1288 defm Int_UCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
1289 load, "ucomisd", SSEPackedDouble>, TB, OpSize;
1291 defm Int_COMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem, load,
1292 "comiss", SSEPackedSingle>, TB;
1293 defm Int_COMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem, load,
1294 "comisd", SSEPackedDouble>, TB, OpSize;
1295 } // Defs = [EFLAGS]
1297 // sse12_cmp_packed - sse 1 & 2 compared packed instructions
1298 multiclass sse12_cmp_packed<RegisterClass RC, X86MemOperand x86memop,
1299 Intrinsic Int, string asm, string asm_alt,
1301 def rri : PIi8<0xC2, MRMSrcReg,
1302 (outs RC:$dst), (ins RC:$src1, RC:$src, SSECC:$cc), asm,
1303 [(set RC:$dst, (Int RC:$src1, RC:$src, imm:$cc))], d>;
1304 def rmi : PIi8<0xC2, MRMSrcMem,
1305 (outs RC:$dst), (ins RC:$src1, f128mem:$src, SSECC:$cc), asm,
1306 [(set RC:$dst, (Int RC:$src1, (memop addr:$src), imm:$cc))], d>;
1307 // Accept explicit immediate argument form instead of comparison code.
1308 let isAsmParserOnly = 1 in {
1309 def rri_alt : PIi8<0xC2, MRMSrcReg,
1310 (outs RC:$dst), (ins RC:$src1, RC:$src, i8imm:$src2),
1312 def rmi_alt : PIi8<0xC2, MRMSrcMem,
1313 (outs RC:$dst), (ins RC:$src1, f128mem:$src, i8imm:$src2),
1318 let isAsmParserOnly = 1 in {
1319 defm VCMPPS : sse12_cmp_packed<VR128, f128mem, int_x86_sse_cmp_ps,
1320 "cmp${cc}ps\t{$src, $src1, $dst|$dst, $src1, $src}",
1321 "cmpps\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1322 SSEPackedSingle>, VEX_4V;
1323 defm VCMPPD : sse12_cmp_packed<VR128, f128mem, int_x86_sse2_cmp_pd,
1324 "cmp${cc}pd\t{$src, $src1, $dst|$dst, $src1, $src}",
1325 "cmppd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1326 SSEPackedDouble>, OpSize, VEX_4V;
1327 defm VCMPPSY : sse12_cmp_packed<VR256, f256mem, int_x86_avx_cmp_ps_256,
1328 "cmp${cc}ps\t{$src, $src1, $dst|$dst, $src1, $src}",
1329 "cmpps\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1330 SSEPackedSingle>, VEX_4V;
1331 defm VCMPPDY : sse12_cmp_packed<VR256, f256mem, int_x86_avx_cmp_pd_256,
1332 "cmp${cc}pd\t{$src, $src1, $dst|$dst, $src1, $src}",
1333 "cmppd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1334 SSEPackedDouble>, OpSize, VEX_4V;
1336 let Constraints = "$src1 = $dst" in {
1337 defm CMPPS : sse12_cmp_packed<VR128, f128mem, int_x86_sse_cmp_ps,
1338 "cmp${cc}ps\t{$src, $dst|$dst, $src}",
1339 "cmpps\t{$src2, $src, $dst|$dst, $src, $src2}",
1340 SSEPackedSingle>, TB;
1341 defm CMPPD : sse12_cmp_packed<VR128, f128mem, int_x86_sse2_cmp_pd,
1342 "cmp${cc}pd\t{$src, $dst|$dst, $src}",
1343 "cmppd\t{$src2, $src, $dst|$dst, $src, $src2}",
1344 SSEPackedDouble>, TB, OpSize;
1347 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
1348 (CMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
1349 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), (memop addr:$src2), imm:$cc)),
1350 (CMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
1351 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
1352 (CMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
1353 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), (memop addr:$src2), imm:$cc)),
1354 (CMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
1356 //===----------------------------------------------------------------------===//
1357 // SSE 1 & 2 - Shuffle Instructions
1358 //===----------------------------------------------------------------------===//
1360 /// sse12_shuffle - sse 1 & 2 shuffle instructions
1361 multiclass sse12_shuffle<RegisterClass RC, X86MemOperand x86memop,
1362 ValueType vt, string asm, PatFrag mem_frag,
1363 Domain d, bit IsConvertibleToThreeAddress = 0> {
1364 def rmi : PIi8<0xC6, MRMSrcMem, (outs RC:$dst),
1365 (ins RC:$src1, f128mem:$src2, i8imm:$src3), asm,
1366 [(set RC:$dst, (vt (shufp:$src3
1367 RC:$src1, (mem_frag addr:$src2))))], d>;
1368 let isConvertibleToThreeAddress = IsConvertibleToThreeAddress in
1369 def rri : PIi8<0xC6, MRMSrcReg, (outs RC:$dst),
1370 (ins RC:$src1, RC:$src2, i8imm:$src3), asm,
1372 (vt (shufp:$src3 RC:$src1, RC:$src2)))], d>;
1375 let isAsmParserOnly = 1 in {
1376 defm VSHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
1377 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
1378 memopv4f32, SSEPackedSingle>, VEX_4V;
1379 defm VSHUFPSY : sse12_shuffle<VR256, f256mem, v8f32,
1380 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
1381 memopv8f32, SSEPackedSingle>, VEX_4V;
1382 defm VSHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
1383 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src2, $src2, $src3}",
1384 memopv2f64, SSEPackedDouble>, OpSize, VEX_4V;
1385 defm VSHUFPDY : sse12_shuffle<VR256, f256mem, v4f64,
1386 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src2, $src2, $src3}",
1387 memopv4f64, SSEPackedDouble>, OpSize, VEX_4V;
1390 let Constraints = "$src1 = $dst" in {
1391 defm SHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
1392 "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1393 memopv4f32, SSEPackedSingle, 1 /* cvt to pshufd */>,
1395 defm SHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
1396 "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1397 memopv2f64, SSEPackedDouble>, TB, OpSize;
1400 //===----------------------------------------------------------------------===//
1401 // SSE 1 & 2 - Unpack Instructions
1402 //===----------------------------------------------------------------------===//
1404 /// sse12_unpack_interleave - sse 1 & 2 unpack and interleave
1405 multiclass sse12_unpack_interleave<bits<8> opc, PatFrag OpNode, ValueType vt,
1406 PatFrag mem_frag, RegisterClass RC,
1407 X86MemOperand x86memop, string asm,
1409 def rr : PI<opc, MRMSrcReg,
1410 (outs RC:$dst), (ins RC:$src1, RC:$src2),
1412 (vt (OpNode RC:$src1, RC:$src2)))], d>;
1413 def rm : PI<opc, MRMSrcMem,
1414 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
1416 (vt (OpNode RC:$src1,
1417 (mem_frag addr:$src2))))], d>;
1420 let AddedComplexity = 10 in {
1421 let isAsmParserOnly = 1 in {
1422 defm VUNPCKHPS: sse12_unpack_interleave<0x15, unpckh, v4f32, memopv4f32,
1423 VR128, f128mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1424 SSEPackedSingle>, VEX_4V;
1425 defm VUNPCKHPD: sse12_unpack_interleave<0x15, unpckh, v2f64, memopv2f64,
1426 VR128, f128mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1427 SSEPackedDouble>, OpSize, VEX_4V;
1428 defm VUNPCKLPS: sse12_unpack_interleave<0x14, unpckl, v4f32, memopv4f32,
1429 VR128, f128mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1430 SSEPackedSingle>, VEX_4V;
1431 defm VUNPCKLPD: sse12_unpack_interleave<0x14, unpckl, v2f64, memopv2f64,
1432 VR128, f128mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1433 SSEPackedDouble>, OpSize, VEX_4V;
1435 defm VUNPCKHPSY: sse12_unpack_interleave<0x15, unpckh, v8f32, memopv8f32,
1436 VR256, f256mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1437 SSEPackedSingle>, VEX_4V;
1438 defm VUNPCKHPDY: sse12_unpack_interleave<0x15, unpckh, v4f64, memopv4f64,
1439 VR256, f256mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1440 SSEPackedDouble>, OpSize, VEX_4V;
1441 defm VUNPCKLPSY: sse12_unpack_interleave<0x14, unpckl, v8f32, memopv8f32,
1442 VR256, f256mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1443 SSEPackedSingle>, VEX_4V;
1444 defm VUNPCKLPDY: sse12_unpack_interleave<0x14, unpckl, v4f64, memopv4f64,
1445 VR256, f256mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1446 SSEPackedDouble>, OpSize, VEX_4V;
1449 let Constraints = "$src1 = $dst" in {
1450 defm UNPCKHPS: sse12_unpack_interleave<0x15, unpckh, v4f32, memopv4f32,
1451 VR128, f128mem, "unpckhps\t{$src2, $dst|$dst, $src2}",
1452 SSEPackedSingle>, TB;
1453 defm UNPCKHPD: sse12_unpack_interleave<0x15, unpckh, v2f64, memopv2f64,
1454 VR128, f128mem, "unpckhpd\t{$src2, $dst|$dst, $src2}",
1455 SSEPackedDouble>, TB, OpSize;
1456 defm UNPCKLPS: sse12_unpack_interleave<0x14, unpckl, v4f32, memopv4f32,
1457 VR128, f128mem, "unpcklps\t{$src2, $dst|$dst, $src2}",
1458 SSEPackedSingle>, TB;
1459 defm UNPCKLPD: sse12_unpack_interleave<0x14, unpckl, v2f64, memopv2f64,
1460 VR128, f128mem, "unpcklpd\t{$src2, $dst|$dst, $src2}",
1461 SSEPackedDouble>, TB, OpSize;
1462 } // Constraints = "$src1 = $dst"
1463 } // AddedComplexity
1465 //===----------------------------------------------------------------------===//
1466 // SSE 1 & 2 - Extract Floating-Point Sign mask
1467 //===----------------------------------------------------------------------===//
1469 /// sse12_extr_sign_mask - sse 1 & 2 unpack and interleave
1470 multiclass sse12_extr_sign_mask<RegisterClass RC, Intrinsic Int, string asm,
1472 def rr : PI<0x50, MRMSrcReg, (outs GR32:$dst), (ins RC:$src),
1473 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
1474 [(set GR32:$dst, (Int RC:$src))], d>;
1478 defm MOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps, "movmskps",
1479 SSEPackedSingle>, TB;
1480 defm MOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd, "movmskpd",
1481 SSEPackedDouble>, TB, OpSize;
1483 let isAsmParserOnly = 1 in {
1484 defm VMOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps,
1485 "movmskps", SSEPackedSingle>, VEX;
1486 defm VMOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd,
1487 "movmskpd", SSEPackedDouble>, OpSize,
1489 defm VMOVMSKPSY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_ps_256,
1490 "movmskps", SSEPackedSingle>, VEX;
1491 defm VMOVMSKPDY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_pd_256,
1492 "movmskpd", SSEPackedDouble>, OpSize,
1496 def VMOVMSKPSr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1497 "movmskps\t{$src, $dst|$dst, $src}", [], SSEPackedSingle>, VEX;
1498 def VMOVMSKPDr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1499 "movmskpd\t{$src, $dst|$dst, $src}", [], SSEPackedDouble>, OpSize,
1501 def VMOVMSKPSYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
1502 "movmskps\t{$src, $dst|$dst, $src}", [], SSEPackedSingle>, VEX;
1503 def VMOVMSKPDYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
1504 "movmskpd\t{$src, $dst|$dst, $src}", [], SSEPackedDouble>, OpSize,
1508 //===----------------------------------------------------------------------===//
1509 // SSE 1 & 2 - Misc aliasing of packed SSE 1 & 2 instructions
1510 //===----------------------------------------------------------------------===//
1512 // Aliases of packed SSE1 & SSE2 instructions for scalar use. These all have
1513 // names that start with 'Fs'.
1515 // Alias instructions that map fld0 to pxor for sse.
1516 let isReMaterializable = 1, isAsCheapAsAMove = 1, isCodeGenOnly = 1,
1517 canFoldAsLoad = 1 in {
1518 // FIXME: Set encoding to pseudo!
1519 def FsFLD0SS : I<0xEF, MRMInitReg, (outs FR32:$dst), (ins), "",
1520 [(set FR32:$dst, fp32imm0)]>,
1521 Requires<[HasSSE1]>, TB, OpSize;
1522 def FsFLD0SD : I<0xEF, MRMInitReg, (outs FR64:$dst), (ins), "",
1523 [(set FR64:$dst, fpimm0)]>,
1524 Requires<[HasSSE2]>, TB, OpSize;
1527 // Alias instruction to do FR32 or FR64 reg-to-reg copy using movaps. Upper
1528 // bits are disregarded.
1529 let neverHasSideEffects = 1 in {
1530 def FsMOVAPSrr : PSI<0x28, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
1531 "movaps\t{$src, $dst|$dst, $src}", []>;
1532 def FsMOVAPDrr : PDI<0x28, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
1533 "movapd\t{$src, $dst|$dst, $src}", []>;
1536 // Alias instruction to load FR32 or FR64 from f128mem using movaps. Upper
1537 // bits are disregarded.
1538 let canFoldAsLoad = 1, isReMaterializable = 1 in {
1539 def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
1540 "movaps\t{$src, $dst|$dst, $src}",
1541 [(set FR32:$dst, (alignedloadfsf32 addr:$src))]>;
1542 def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
1543 "movapd\t{$src, $dst|$dst, $src}",
1544 [(set FR64:$dst, (alignedloadfsf64 addr:$src))]>;
1547 //===----------------------------------------------------------------------===//
1548 // SSE 1 & 2 - Logical Instructions
1549 //===----------------------------------------------------------------------===//
1551 /// sse12_fp_alias_pack_logical - SSE 1 & 2 aliased packed FP logical ops
1553 multiclass sse12_fp_alias_pack_logical<bits<8> opc, string OpcodeStr,
1555 let isAsmParserOnly = 1 in {
1556 defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
1557 FR32, f32, f128mem, memopfsf32, SSEPackedSingle, 0>, VEX_4V;
1559 defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
1560 FR64, f64, f128mem, memopfsf64, SSEPackedDouble, 0>, OpSize, VEX_4V;
1563 let Constraints = "$src1 = $dst" in {
1564 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, FR32,
1565 f32, f128mem, memopfsf32, SSEPackedSingle>, TB;
1567 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, FR64,
1568 f64, f128mem, memopfsf64, SSEPackedDouble>, TB, OpSize;
1572 // Alias bitwise logical operations using SSE logical ops on packed FP values.
1573 let mayLoad = 0 in {
1574 defm FsAND : sse12_fp_alias_pack_logical<0x54, "and", X86fand>;
1575 defm FsOR : sse12_fp_alias_pack_logical<0x56, "or", X86for>;
1576 defm FsXOR : sse12_fp_alias_pack_logical<0x57, "xor", X86fxor>;
1579 let neverHasSideEffects = 1, Pattern = []<dag>, isCommutable = 0 in
1580 defm FsANDN : sse12_fp_alias_pack_logical<0x55, "andn", undef>;
1582 /// sse12_fp_packed_logical - SSE 1 & 2 packed FP logical ops
1584 multiclass sse12_fp_packed_logical<bits<8> opc, string OpcodeStr,
1585 SDNode OpNode, int HasPat = 0,
1586 list<list<dag>> Pattern = []> {
1587 let isAsmParserOnly = 1, Pattern = []<dag> in {
1588 defm V#NAME#PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
1589 !strconcat(OpcodeStr, "ps"), f128mem,
1590 !if(HasPat, Pattern[0], // rr
1591 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1,
1593 !if(HasPat, Pattern[2], // rm
1594 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
1595 (memopv2i64 addr:$src2)))]), 0>,
1598 defm V#NAME#PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
1599 !strconcat(OpcodeStr, "pd"), f128mem,
1600 !if(HasPat, Pattern[1], // rr
1601 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1604 !if(HasPat, Pattern[3], // rm
1605 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1606 (memopv2i64 addr:$src2)))]), 0>,
1609 let Constraints = "$src1 = $dst" in {
1610 defm PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
1611 !strconcat(OpcodeStr, "ps"), f128mem,
1612 !if(HasPat, Pattern[0], // rr
1613 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1,
1615 !if(HasPat, Pattern[2], // rm
1616 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
1617 (memopv2i64 addr:$src2)))])>, TB;
1619 defm PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
1620 !strconcat(OpcodeStr, "pd"), f128mem,
1621 !if(HasPat, Pattern[1], // rr
1622 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1625 !if(HasPat, Pattern[3], // rm
1626 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1627 (memopv2i64 addr:$src2)))])>,
1632 /// sse12_fp_packed_logical_y - AVX 256-bit SSE 1 & 2 logical ops forms
1634 let isAsmParserOnly = 1 in {
1635 multiclass sse12_fp_packed_logical_y<bits<8> opc, string OpcodeStr> {
1636 defm PSY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedSingle,
1637 !strconcat(OpcodeStr, "ps"), f256mem, [], [], 0>, VEX_4V;
1639 defm PDY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedDouble,
1640 !strconcat(OpcodeStr, "pd"), f256mem, [], [], 0>, OpSize, VEX_4V;
1644 // AVX 256-bit packed logical ops forms
1645 defm VAND : sse12_fp_packed_logical_y<0x54, "and">;
1646 defm VOR : sse12_fp_packed_logical_y<0x56, "or">;
1647 defm VXOR : sse12_fp_packed_logical_y<0x57, "xor">;
1648 let isCommutable = 0 in
1649 defm VANDN : sse12_fp_packed_logical_y<0x55, "andn">;
1651 defm AND : sse12_fp_packed_logical<0x54, "and", and>;
1652 defm OR : sse12_fp_packed_logical<0x56, "or", or>;
1653 defm XOR : sse12_fp_packed_logical<0x57, "xor", xor>;
1654 let isCommutable = 0 in
1655 defm ANDN : sse12_fp_packed_logical<0x55, "andn", undef /* dummy */, 1, [
1657 [(set VR128:$dst, (v2i64 (and (xor VR128:$src1,
1658 (bc_v2i64 (v4i32 immAllOnesV))),
1661 [(set VR128:$dst, (and (vnot (bc_v2i64 (v2f64 VR128:$src1))),
1662 (bc_v2i64 (v2f64 VR128:$src2))))],
1664 [(set VR128:$dst, (v2i64 (and (xor (bc_v2i64 (v4f32 VR128:$src1)),
1665 (bc_v2i64 (v4i32 immAllOnesV))),
1666 (memopv2i64 addr:$src2))))],
1668 [(set VR128:$dst, (and (vnot (bc_v2i64 (v2f64 VR128:$src1))),
1669 (memopv2i64 addr:$src2)))]]>;
1671 //===----------------------------------------------------------------------===//
1672 // SSE 1 & 2 - Arithmetic Instructions
1673 //===----------------------------------------------------------------------===//
1675 /// basic_sse12_fp_binop_xxx - SSE 1 & 2 binops come in both scalar and
1678 /// In addition, we also have a special variant of the scalar form here to
1679 /// represent the associated intrinsic operation. This form is unlike the
1680 /// plain scalar form, in that it takes an entire vector (instead of a scalar)
1681 /// and leaves the top elements unmodified (therefore these cannot be commuted).
1683 /// These three forms can each be reg+reg or reg+mem.
1686 /// FIXME: once all 256-bit intrinsics are matched, cleanup and refactor those
1688 multiclass basic_sse12_fp_binop_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
1690 defm SS : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"),
1691 OpNode, FR32, f32mem, Is2Addr>, XS;
1692 defm SD : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd"),
1693 OpNode, FR64, f64mem, Is2Addr>, XD;
1696 multiclass basic_sse12_fp_binop_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
1698 let mayLoad = 0 in {
1699 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR128,
1700 v4f32, f128mem, memopv4f32, SSEPackedSingle, Is2Addr>, TB;
1701 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR128,
1702 v2f64, f128mem, memopv2f64, SSEPackedDouble, Is2Addr>, TB, OpSize;
1706 multiclass basic_sse12_fp_binop_p_y<bits<8> opc, string OpcodeStr,
1708 let mayLoad = 0 in {
1709 defm PSY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR256,
1710 v8f32, f256mem, memopv8f32, SSEPackedSingle, 0>, TB;
1711 defm PDY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR256,
1712 v4f64, f256mem, memopv4f64, SSEPackedDouble, 0>, TB, OpSize;
1716 multiclass basic_sse12_fp_binop_s_int<bits<8> opc, string OpcodeStr,
1718 defm SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
1719 !strconcat(OpcodeStr, "ss"), "", "_ss", ssmem, sse_load_f32, Is2Addr>, XS;
1720 defm SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
1721 !strconcat(OpcodeStr, "sd"), "2", "_sd", sdmem, sse_load_f64, Is2Addr>, XD;
1724 multiclass basic_sse12_fp_binop_p_int<bits<8> opc, string OpcodeStr,
1726 defm PS : sse12_fp_packed_int<opc, OpcodeStr, VR128,
1727 !strconcat(OpcodeStr, "ps"), "sse", "_ps", f128mem, memopv4f32,
1728 SSEPackedSingle, Is2Addr>, TB;
1730 defm PD : sse12_fp_packed_int<opc, OpcodeStr, VR128,
1731 !strconcat(OpcodeStr, "pd"), "sse2", "_pd", f128mem, memopv2f64,
1732 SSEPackedDouble, Is2Addr>, TB, OpSize;
1735 multiclass basic_sse12_fp_binop_p_y_int<bits<8> opc, string OpcodeStr> {
1736 defm PSY : sse12_fp_packed_int<opc, OpcodeStr, VR256,
1737 !strconcat(OpcodeStr, "ps"), "avx", "_ps_256", f256mem, memopv8f32,
1738 SSEPackedSingle, 0>, TB;
1740 defm PDY : sse12_fp_packed_int<opc, OpcodeStr, VR256,
1741 !strconcat(OpcodeStr, "pd"), "avx", "_pd_256", f256mem, memopv4f64,
1742 SSEPackedDouble, 0>, TB, OpSize;
1745 // Binary Arithmetic instructions
1746 let isAsmParserOnly = 1 in {
1747 defm VADD : basic_sse12_fp_binop_s<0x58, "add", fadd, 0>,
1748 basic_sse12_fp_binop_s_int<0x58, "add", 0>,
1749 basic_sse12_fp_binop_p<0x58, "add", fadd, 0>,
1750 basic_sse12_fp_binop_p_y<0x58, "add", fadd>, VEX_4V;
1751 defm VMUL : basic_sse12_fp_binop_s<0x59, "mul", fmul, 0>,
1752 basic_sse12_fp_binop_s_int<0x59, "mul", 0>,
1753 basic_sse12_fp_binop_p<0x59, "mul", fmul, 0>,
1754 basic_sse12_fp_binop_p_y<0x59, "mul", fmul>, VEX_4V;
1756 let isCommutable = 0 in {
1757 defm VSUB : basic_sse12_fp_binop_s<0x5C, "sub", fsub, 0>,
1758 basic_sse12_fp_binop_s_int<0x5C, "sub", 0>,
1759 basic_sse12_fp_binop_p<0x5C, "sub", fsub, 0>,
1760 basic_sse12_fp_binop_p_y<0x5C, "sub", fsub>, VEX_4V;
1761 defm VDIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv, 0>,
1762 basic_sse12_fp_binop_s_int<0x5E, "div", 0>,
1763 basic_sse12_fp_binop_p<0x5E, "div", fdiv, 0>,
1764 basic_sse12_fp_binop_p_y<0x5E, "div", fdiv>, VEX_4V;
1765 defm VMAX : basic_sse12_fp_binop_s<0x5F, "max", X86fmax, 0>,
1766 basic_sse12_fp_binop_s_int<0x5F, "max", 0>,
1767 basic_sse12_fp_binop_p<0x5F, "max", X86fmax, 0>,
1768 basic_sse12_fp_binop_p_int<0x5F, "max", 0>,
1769 basic_sse12_fp_binop_p_y<0x5F, "max", X86fmax>,
1770 basic_sse12_fp_binop_p_y_int<0x5F, "max">, VEX_4V;
1771 defm VMIN : basic_sse12_fp_binop_s<0x5D, "min", X86fmin, 0>,
1772 basic_sse12_fp_binop_s_int<0x5D, "min", 0>,
1773 basic_sse12_fp_binop_p<0x5D, "min", X86fmin, 0>,
1774 basic_sse12_fp_binop_p_int<0x5D, "min", 0>,
1775 basic_sse12_fp_binop_p_y_int<0x5D, "min">,
1776 basic_sse12_fp_binop_p_y<0x5D, "min", X86fmin>, VEX_4V;
1780 let Constraints = "$src1 = $dst" in {
1781 defm ADD : basic_sse12_fp_binop_s<0x58, "add", fadd>,
1782 basic_sse12_fp_binop_p<0x58, "add", fadd>,
1783 basic_sse12_fp_binop_s_int<0x58, "add">;
1784 defm MUL : basic_sse12_fp_binop_s<0x59, "mul", fmul>,
1785 basic_sse12_fp_binop_p<0x59, "mul", fmul>,
1786 basic_sse12_fp_binop_s_int<0x59, "mul">;
1788 let isCommutable = 0 in {
1789 defm SUB : basic_sse12_fp_binop_s<0x5C, "sub", fsub>,
1790 basic_sse12_fp_binop_p<0x5C, "sub", fsub>,
1791 basic_sse12_fp_binop_s_int<0x5C, "sub">;
1792 defm DIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv>,
1793 basic_sse12_fp_binop_p<0x5E, "div", fdiv>,
1794 basic_sse12_fp_binop_s_int<0x5E, "div">;
1795 defm MAX : basic_sse12_fp_binop_s<0x5F, "max", X86fmax>,
1796 basic_sse12_fp_binop_p<0x5F, "max", X86fmax>,
1797 basic_sse12_fp_binop_s_int<0x5F, "max">,
1798 basic_sse12_fp_binop_p_int<0x5F, "max">;
1799 defm MIN : basic_sse12_fp_binop_s<0x5D, "min", X86fmin>,
1800 basic_sse12_fp_binop_p<0x5D, "min", X86fmin>,
1801 basic_sse12_fp_binop_s_int<0x5D, "min">,
1802 basic_sse12_fp_binop_p_int<0x5D, "min">;
1807 /// In addition, we also have a special variant of the scalar form here to
1808 /// represent the associated intrinsic operation. This form is unlike the
1809 /// plain scalar form, in that it takes an entire vector (instead of a
1810 /// scalar) and leaves the top elements undefined.
1812 /// And, we have a special variant form for a full-vector intrinsic form.
1814 /// sse1_fp_unop_s - SSE1 unops in scalar form.
1815 multiclass sse1_fp_unop_s<bits<8> opc, string OpcodeStr,
1816 SDNode OpNode, Intrinsic F32Int> {
1817 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
1818 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1819 [(set FR32:$dst, (OpNode FR32:$src))]>;
1820 // For scalar unary operations, fold a load into the operation
1821 // only in OptForSize mode. It eliminates an instruction, but it also
1822 // eliminates a whole-register clobber (the load), so it introduces a
1823 // partial register update condition.
1824 def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
1825 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1826 [(set FR32:$dst, (OpNode (load addr:$src)))]>, XS,
1827 Requires<[HasSSE1, OptForSize]>;
1828 def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1829 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1830 [(set VR128:$dst, (F32Int VR128:$src))]>;
1831 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins ssmem:$src),
1832 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1833 [(set VR128:$dst, (F32Int sse_load_f32:$src))]>;
1836 /// sse1_fp_unop_s_avx - AVX SSE1 unops in scalar form.
1837 multiclass sse1_fp_unop_s_avx<bits<8> opc, string OpcodeStr,
1838 SDNode OpNode, Intrinsic F32Int> {
1839 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
1840 !strconcat(OpcodeStr,
1841 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1842 def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f32mem:$src2),
1843 !strconcat(OpcodeStr,
1844 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1845 []>, XS, Requires<[HasAVX, OptForSize]>;
1846 def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1847 !strconcat(OpcodeStr,
1848 "ss\t{$src, $dst, $dst|$dst, $dst, $src}"),
1849 [(set VR128:$dst, (F32Int VR128:$src))]>;
1850 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins ssmem:$src),
1851 !strconcat(OpcodeStr,
1852 "ss\t{$src, $dst, $dst|$dst, $dst, $src}"),
1853 [(set VR128:$dst, (F32Int sse_load_f32:$src))]>;
1856 /// sse1_fp_unop_p - SSE1 unops in packed form.
1857 multiclass sse1_fp_unop_p<bits<8> opc, string OpcodeStr, SDNode OpNode> {
1858 def PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1859 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1860 [(set VR128:$dst, (v4f32 (OpNode VR128:$src)))]>;
1861 def PSm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1862 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1863 [(set VR128:$dst, (OpNode (memopv4f32 addr:$src)))]>;
1866 /// sse1_fp_unop_p_y - AVX 256-bit SSE1 unops in packed form.
1867 multiclass sse1_fp_unop_p_y<bits<8> opc, string OpcodeStr, SDNode OpNode> {
1868 def PSYr : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1869 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1870 [(set VR256:$dst, (v8f32 (OpNode VR256:$src)))]>;
1871 def PSYm : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1872 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1873 [(set VR256:$dst, (OpNode (memopv8f32 addr:$src)))]>;
1876 /// sse1_fp_unop_p_int - SSE1 intrinsics unops in packed forms.
1877 multiclass sse1_fp_unop_p_int<bits<8> opc, string OpcodeStr,
1878 Intrinsic V4F32Int> {
1879 def PSr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1880 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1881 [(set VR128:$dst, (V4F32Int VR128:$src))]>;
1882 def PSm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1883 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1884 [(set VR128:$dst, (V4F32Int (memopv4f32 addr:$src)))]>;
1887 /// sse1_fp_unop_p_y_int - AVX 256-bit intrinsics unops in packed forms.
1888 multiclass sse1_fp_unop_p_y_int<bits<8> opc, string OpcodeStr,
1889 Intrinsic V4F32Int> {
1890 def PSYr_Int : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1891 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1892 [(set VR256:$dst, (V4F32Int VR256:$src))]>;
1893 def PSYm_Int : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1894 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1895 [(set VR256:$dst, (V4F32Int (memopv8f32 addr:$src)))]>;
1898 /// sse2_fp_unop_s - SSE2 unops in scalar form.
1899 multiclass sse2_fp_unop_s<bits<8> opc, string OpcodeStr,
1900 SDNode OpNode, Intrinsic F64Int> {
1901 def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
1902 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1903 [(set FR64:$dst, (OpNode FR64:$src))]>;
1904 // See the comments in sse1_fp_unop_s for why this is OptForSize.
1905 def SDm : I<opc, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
1906 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1907 [(set FR64:$dst, (OpNode (load addr:$src)))]>, XD,
1908 Requires<[HasSSE2, OptForSize]>;
1909 def SDr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1910 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1911 [(set VR128:$dst, (F64Int VR128:$src))]>;
1912 def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins sdmem:$src),
1913 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1914 [(set VR128:$dst, (F64Int sse_load_f64:$src))]>;
1917 /// sse2_fp_unop_s_avx - AVX SSE2 unops in scalar form.
1918 multiclass sse2_fp_unop_s_avx<bits<8> opc, string OpcodeStr,
1919 SDNode OpNode, Intrinsic F64Int> {
1920 def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
1921 !strconcat(OpcodeStr,
1922 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1923 def SDm : SDI<opc, MRMSrcMem, (outs FR64:$dst),
1924 (ins FR64:$src1, f64mem:$src2),
1925 !strconcat(OpcodeStr,
1926 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1927 def SDr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1928 !strconcat(OpcodeStr, "sd\t{$src, $dst, $dst|$dst, $dst, $src}"),
1929 [(set VR128:$dst, (F64Int VR128:$src))]>;
1930 def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins sdmem:$src),
1931 !strconcat(OpcodeStr, "sd\t{$src, $dst, $dst|$dst, $dst, $src}"),
1932 [(set VR128:$dst, (F64Int sse_load_f64:$src))]>;
1935 /// sse2_fp_unop_p - SSE2 unops in vector forms.
1936 multiclass sse2_fp_unop_p<bits<8> opc, string OpcodeStr,
1938 def PDr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1939 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1940 [(set VR128:$dst, (v2f64 (OpNode VR128:$src)))]>;
1941 def PDm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1942 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1943 [(set VR128:$dst, (OpNode (memopv2f64 addr:$src)))]>;
1946 /// sse2_fp_unop_p_y - AVX SSE2 256-bit unops in vector forms.
1947 multiclass sse2_fp_unop_p_y<bits<8> opc, string OpcodeStr, SDNode OpNode> {
1948 def PDYr : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1949 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1950 [(set VR256:$dst, (v4f64 (OpNode VR256:$src)))]>;
1951 def PDYm : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1952 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1953 [(set VR256:$dst, (OpNode (memopv4f64 addr:$src)))]>;
1956 /// sse2_fp_unop_p_int - SSE2 intrinsic unops in vector forms.
1957 multiclass sse2_fp_unop_p_int<bits<8> opc, string OpcodeStr,
1958 Intrinsic V2F64Int> {
1959 def PDr_Int : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1960 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1961 [(set VR128:$dst, (V2F64Int VR128:$src))]>;
1962 def PDm_Int : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1963 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1964 [(set VR128:$dst, (V2F64Int (memopv2f64 addr:$src)))]>;
1967 /// sse2_fp_unop_p_y_int - AVX 256-bit intrinsic unops in vector forms.
1968 multiclass sse2_fp_unop_p_y_int<bits<8> opc, string OpcodeStr,
1969 Intrinsic V2F64Int> {
1970 def PDYr_Int : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1971 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1972 [(set VR256:$dst, (V2F64Int VR256:$src))]>;
1973 def PDYm_Int : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1974 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1975 [(set VR256:$dst, (V2F64Int (memopv4f64 addr:$src)))]>;
1978 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
1980 defm VSQRT : sse1_fp_unop_s_avx<0x51, "vsqrt", fsqrt, int_x86_sse_sqrt_ss>,
1981 sse2_fp_unop_s_avx<0x51, "vsqrt", fsqrt, int_x86_sse2_sqrt_sd>,
1984 defm VSQRT : sse1_fp_unop_p<0x51, "vsqrt", fsqrt>,
1985 sse2_fp_unop_p<0x51, "vsqrt", fsqrt>,
1986 sse1_fp_unop_p_y<0x51, "vsqrt", fsqrt>,
1987 sse2_fp_unop_p_y<0x51, "vsqrt", fsqrt>,
1988 sse1_fp_unop_p_int<0x51, "vsqrt", int_x86_sse_sqrt_ps>,
1989 sse2_fp_unop_p_int<0x51, "vsqrt", int_x86_sse2_sqrt_pd>,
1990 sse1_fp_unop_p_y_int<0x51, "vsqrt", int_x86_avx_sqrt_ps_256>,
1991 sse2_fp_unop_p_y_int<0x51, "vsqrt", int_x86_avx_sqrt_pd_256>,
1994 // Reciprocal approximations. Note that these typically require refinement
1995 // in order to obtain suitable precision.
1996 defm VRSQRT : sse1_fp_unop_s_avx<0x52, "vrsqrt", X86frsqrt,
1997 int_x86_sse_rsqrt_ss>, VEX_4V;
1998 defm VRSQRT : sse1_fp_unop_p<0x52, "vrsqrt", X86frsqrt>,
1999 sse1_fp_unop_p_y<0x52, "vrsqrt", X86frsqrt>,
2000 sse1_fp_unop_p_y_int<0x52, "vrsqrt", int_x86_avx_rsqrt_ps_256>,
2001 sse1_fp_unop_p_int<0x52, "vrsqrt", int_x86_sse_rsqrt_ps>, VEX;
2003 defm VRCP : sse1_fp_unop_s_avx<0x53, "vrcp", X86frcp, int_x86_sse_rcp_ss>,
2005 defm VRCP : sse1_fp_unop_p<0x53, "vrcp", X86frcp>,
2006 sse1_fp_unop_p_y<0x53, "vrcp", X86frcp>,
2007 sse1_fp_unop_p_y_int<0x53, "vrcp", int_x86_avx_rcp_ps_256>,
2008 sse1_fp_unop_p_int<0x53, "vrcp", int_x86_sse_rcp_ps>, VEX;
2012 defm SQRT : sse1_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse_sqrt_ss>,
2013 sse1_fp_unop_p<0x51, "sqrt", fsqrt>,
2014 sse1_fp_unop_p_int<0x51, "sqrt", int_x86_sse_sqrt_ps>,
2015 sse2_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse2_sqrt_sd>,
2016 sse2_fp_unop_p<0x51, "sqrt", fsqrt>,
2017 sse2_fp_unop_p_int<0x51, "sqrt", int_x86_sse2_sqrt_pd>;
2019 // Reciprocal approximations. Note that these typically require refinement
2020 // in order to obtain suitable precision.
2021 defm RSQRT : sse1_fp_unop_s<0x52, "rsqrt", X86frsqrt, int_x86_sse_rsqrt_ss>,
2022 sse1_fp_unop_p<0x52, "rsqrt", X86frsqrt>,
2023 sse1_fp_unop_p_int<0x52, "rsqrt", int_x86_sse_rsqrt_ps>;
2024 defm RCP : sse1_fp_unop_s<0x53, "rcp", X86frcp, int_x86_sse_rcp_ss>,
2025 sse1_fp_unop_p<0x53, "rcp", X86frcp>,
2026 sse1_fp_unop_p_int<0x53, "rcp", int_x86_sse_rcp_ps>;
2028 // There is no f64 version of the reciprocal approximation instructions.
2030 //===----------------------------------------------------------------------===//
2031 // SSE 1 & 2 - Non-temporal stores
2032 //===----------------------------------------------------------------------===//
2034 let isAsmParserOnly = 1 in {
2035 def VMOVNTPSmr_Int : VPSI<0x2B, MRMDestMem, (outs),
2036 (ins i128mem:$dst, VR128:$src),
2037 "movntps\t{$src, $dst|$dst, $src}",
2038 [(int_x86_sse_movnt_ps addr:$dst, VR128:$src)]>, VEX;
2039 def VMOVNTPDmr_Int : VPDI<0x2B, MRMDestMem, (outs),
2040 (ins i128mem:$dst, VR128:$src),
2041 "movntpd\t{$src, $dst|$dst, $src}",
2042 [(int_x86_sse2_movnt_pd addr:$dst, VR128:$src)]>, VEX;
2044 let ExeDomain = SSEPackedInt in
2045 def VMOVNTDQmr_Int : VPDI<0xE7, MRMDestMem, (outs),
2046 (ins f128mem:$dst, VR128:$src),
2047 "movntdq\t{$src, $dst|$dst, $src}",
2048 [(int_x86_sse2_movnt_dq addr:$dst, VR128:$src)]>, VEX;
2050 let AddedComplexity = 400 in { // Prefer non-temporal versions
2051 def VMOVNTPSmr : VPSI<0x2B, MRMDestMem, (outs),
2052 (ins f128mem:$dst, VR128:$src),
2053 "movntps\t{$src, $dst|$dst, $src}",
2054 [(alignednontemporalstore (v4f32 VR128:$src),
2056 def VMOVNTPDmr : VPDI<0x2B, MRMDestMem, (outs),
2057 (ins f128mem:$dst, VR128:$src),
2058 "movntpd\t{$src, $dst|$dst, $src}",
2059 [(alignednontemporalstore (v2f64 VR128:$src),
2061 def VMOVNTDQ_64mr : VPDI<0xE7, MRMDestMem, (outs),
2062 (ins f128mem:$dst, VR128:$src),
2063 "movntdq\t{$src, $dst|$dst, $src}",
2064 [(alignednontemporalstore (v2f64 VR128:$src),
2066 let ExeDomain = SSEPackedInt in
2067 def VMOVNTDQmr : VPDI<0xE7, MRMDestMem, (outs),
2068 (ins f128mem:$dst, VR128:$src),
2069 "movntdq\t{$src, $dst|$dst, $src}",
2070 [(alignednontemporalstore (v4f32 VR128:$src),
2073 def VMOVNTPSYmr : VPSI<0x2B, MRMDestMem, (outs),
2074 (ins f256mem:$dst, VR256:$src),
2075 "movntps\t{$src, $dst|$dst, $src}",
2076 [(alignednontemporalstore (v8f32 VR256:$src),
2078 def VMOVNTPDYmr : VPDI<0x2B, MRMDestMem, (outs),
2079 (ins f256mem:$dst, VR256:$src),
2080 "movntpd\t{$src, $dst|$dst, $src}",
2081 [(alignednontemporalstore (v4f64 VR256:$src),
2083 def VMOVNTDQY_64mr : VPDI<0xE7, MRMDestMem, (outs),
2084 (ins f256mem:$dst, VR256:$src),
2085 "movntdq\t{$src, $dst|$dst, $src}",
2086 [(alignednontemporalstore (v4f64 VR256:$src),
2088 let ExeDomain = SSEPackedInt in
2089 def VMOVNTDQYmr : VPDI<0xE7, MRMDestMem, (outs),
2090 (ins f256mem:$dst, VR256:$src),
2091 "movntdq\t{$src, $dst|$dst, $src}",
2092 [(alignednontemporalstore (v8f32 VR256:$src),
2097 def : Pat<(int_x86_avx_movnt_dq_256 addr:$dst, VR256:$src),
2098 (VMOVNTDQYmr addr:$dst, VR256:$src)>;
2099 def : Pat<(int_x86_avx_movnt_pd_256 addr:$dst, VR256:$src),
2100 (VMOVNTPDYmr addr:$dst, VR256:$src)>;
2101 def : Pat<(int_x86_avx_movnt_ps_256 addr:$dst, VR256:$src),
2102 (VMOVNTPSYmr addr:$dst, VR256:$src)>;
2104 def MOVNTPSmr_Int : PSI<0x2B, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2105 "movntps\t{$src, $dst|$dst, $src}",
2106 [(int_x86_sse_movnt_ps addr:$dst, VR128:$src)]>;
2107 def MOVNTPDmr_Int : PDI<0x2B, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2108 "movntpd\t{$src, $dst|$dst, $src}",
2109 [(int_x86_sse2_movnt_pd addr:$dst, VR128:$src)]>;
2111 let ExeDomain = SSEPackedInt in
2112 def MOVNTDQmr_Int : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2113 "movntdq\t{$src, $dst|$dst, $src}",
2114 [(int_x86_sse2_movnt_dq addr:$dst, VR128:$src)]>;
2116 let AddedComplexity = 400 in { // Prefer non-temporal versions
2117 def MOVNTPSmr : PSI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2118 "movntps\t{$src, $dst|$dst, $src}",
2119 [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)]>;
2120 def MOVNTPDmr : PDI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2121 "movntpd\t{$src, $dst|$dst, $src}",
2122 [(alignednontemporalstore(v2f64 VR128:$src), addr:$dst)]>;
2124 def MOVNTDQ_64mr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2125 "movntdq\t{$src, $dst|$dst, $src}",
2126 [(alignednontemporalstore (v2f64 VR128:$src), addr:$dst)]>;
2128 let ExeDomain = SSEPackedInt in
2129 def MOVNTDQmr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2130 "movntdq\t{$src, $dst|$dst, $src}",
2131 [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)]>;
2133 // There is no AVX form for instructions below this point
2134 def MOVNTImr : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
2135 "movnti\t{$src, $dst|$dst, $src}",
2136 [(nontemporalstore (i32 GR32:$src), addr:$dst)]>,
2137 TB, Requires<[HasSSE2]>;
2139 def MOVNTI_64mr : RI<0xC3, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
2140 "movnti\t{$src, $dst|$dst, $src}",
2141 [(nontemporalstore (i64 GR64:$src), addr:$dst)]>,
2142 TB, Requires<[HasSSE2]>;
2145 def MOVNTImr_Int : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
2146 "movnti\t{$src, $dst|$dst, $src}",
2147 [(int_x86_sse2_movnt_i addr:$dst, GR32:$src)]>,
2148 TB, Requires<[HasSSE2]>;
2150 //===----------------------------------------------------------------------===//
2151 // SSE 1 & 2 - Misc Instructions (No AVX form)
2152 //===----------------------------------------------------------------------===//
2154 // Prefetch intrinsic.
2155 def PREFETCHT0 : PSI<0x18, MRM1m, (outs), (ins i8mem:$src),
2156 "prefetcht0\t$src", [(prefetch addr:$src, imm, (i32 3))]>;
2157 def PREFETCHT1 : PSI<0x18, MRM2m, (outs), (ins i8mem:$src),
2158 "prefetcht1\t$src", [(prefetch addr:$src, imm, (i32 2))]>;
2159 def PREFETCHT2 : PSI<0x18, MRM3m, (outs), (ins i8mem:$src),
2160 "prefetcht2\t$src", [(prefetch addr:$src, imm, (i32 1))]>;
2161 def PREFETCHNTA : PSI<0x18, MRM0m, (outs), (ins i8mem:$src),
2162 "prefetchnta\t$src", [(prefetch addr:$src, imm, (i32 0))]>;
2164 // Load, store, and memory fence
2165 def SFENCE : I<0xAE, MRM_F8, (outs), (ins), "sfence", [(int_x86_sse_sfence)]>,
2166 TB, Requires<[HasSSE1]>;
2167 def : Pat<(X86SFence), (SFENCE)>;
2169 // Alias instructions that map zero vector to pxor / xorp* for sse.
2170 // We set canFoldAsLoad because this can be converted to a constant-pool
2171 // load of an all-zeros value if folding it would be beneficial.
2172 // FIXME: Change encoding to pseudo! This is blocked right now by the x86
2173 // JIT implementatioan, it does not expand the instructions below like
2174 // X86MCInstLower does.
2175 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
2176 isCodeGenOnly = 1 in {
2177 def V_SET0PS : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2178 [(set VR128:$dst, (v4f32 immAllZerosV))]>;
2179 def V_SET0PD : PDI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2180 [(set VR128:$dst, (v2f64 immAllZerosV))]>;
2181 let ExeDomain = SSEPackedInt in
2182 def V_SET0PI : PDI<0xEF, MRMInitReg, (outs VR128:$dst), (ins), "",
2183 [(set VR128:$dst, (v4i32 immAllZerosV))]>;
2186 // The same as done above but for AVX. The 128-bit versions are the
2187 // same, but re-encoded. The 256-bit does not support PI version.
2188 // FIXME: Change encoding to pseudo! This is blocked right now by the x86
2189 // JIT implementatioan, it does not expand the instructions below like
2190 // X86MCInstLower does.
2191 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
2192 isCodeGenOnly = 1, Predicates = [HasAVX] in {
2193 def AVX_SET0PS : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2194 [(set VR128:$dst, (v4f32 immAllZerosV))]>, VEX_4V;
2195 def AVX_SET0PD : PDI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2196 [(set VR128:$dst, (v2f64 immAllZerosV))]>, VEX_4V;
2197 def AVX_SET0PSY : PSI<0x57, MRMInitReg, (outs VR256:$dst), (ins), "",
2198 [(set VR256:$dst, (v8f32 immAllZerosV))]>, VEX_4V;
2199 def AVX_SET0PDY : PDI<0x57, MRMInitReg, (outs VR256:$dst), (ins), "",
2200 [(set VR256:$dst, (v4f64 immAllZerosV))]>, VEX_4V;
2201 let ExeDomain = SSEPackedInt in
2202 def AVX_SET0PI : PDI<0xEF, MRMInitReg, (outs VR128:$dst), (ins), "",
2203 [(set VR128:$dst, (v4i32 immAllZerosV))]>;
2206 def : Pat<(v2i64 immAllZerosV), (V_SET0PI)>;
2207 def : Pat<(v8i16 immAllZerosV), (V_SET0PI)>;
2208 def : Pat<(v16i8 immAllZerosV), (V_SET0PI)>;
2210 def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
2211 (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
2213 //===----------------------------------------------------------------------===//
2214 // SSE 1 & 2 - Load/Store XCSR register
2215 //===----------------------------------------------------------------------===//
2217 let isAsmParserOnly = 1 in {
2218 def VLDMXCSR : VPSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
2219 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>, VEX;
2220 def VSTMXCSR : VPSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
2221 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>, VEX;
2224 def LDMXCSR : PSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
2225 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>;
2226 def STMXCSR : PSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
2227 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>;
2229 //===---------------------------------------------------------------------===//
2230 // SSE2 - Move Aligned/Unaligned Packed Integer Instructions
2231 //===---------------------------------------------------------------------===//
2233 let ExeDomain = SSEPackedInt in { // SSE integer instructions
2235 let isAsmParserOnly = 1 in {
2236 let neverHasSideEffects = 1 in {
2237 def VMOVDQArr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2238 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2239 def VMOVDQAYrr : VPDI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2240 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2242 def VMOVDQUrr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2243 "movdqu\t{$src, $dst|$dst, $src}", []>, XS, VEX;
2244 def VMOVDQUYrr : VPDI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2245 "movdqu\t{$src, $dst|$dst, $src}", []>, XS, VEX;
2247 let canFoldAsLoad = 1, mayLoad = 1 in {
2248 def VMOVDQArm : VPDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2249 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2250 def VMOVDQAYrm : VPDI<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
2251 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2252 let Predicates = [HasAVX] in {
2253 def VMOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2254 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2255 def VMOVDQUYrm : I<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
2256 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2260 let mayStore = 1 in {
2261 def VMOVDQAmr : VPDI<0x7F, MRMDestMem, (outs),
2262 (ins i128mem:$dst, VR128:$src),
2263 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2264 def VMOVDQAYmr : VPDI<0x7F, MRMDestMem, (outs),
2265 (ins i256mem:$dst, VR256:$src),
2266 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2267 let Predicates = [HasAVX] in {
2268 def VMOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2269 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2270 def VMOVDQUYmr : I<0x7F, MRMDestMem, (outs), (ins i256mem:$dst, VR256:$src),
2271 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2276 let neverHasSideEffects = 1 in
2277 def MOVDQArr : PDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2278 "movdqa\t{$src, $dst|$dst, $src}", []>;
2280 let canFoldAsLoad = 1, mayLoad = 1 in {
2281 def MOVDQArm : PDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2282 "movdqa\t{$src, $dst|$dst, $src}",
2283 [/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/]>;
2284 def MOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2285 "movdqu\t{$src, $dst|$dst, $src}",
2286 [/*(set VR128:$dst, (loadv2i64 addr:$src))*/]>,
2287 XS, Requires<[HasSSE2]>;
2290 let mayStore = 1 in {
2291 def MOVDQAmr : PDI<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2292 "movdqa\t{$src, $dst|$dst, $src}",
2293 [/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/]>;
2294 def MOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2295 "movdqu\t{$src, $dst|$dst, $src}",
2296 [/*(store (v2i64 VR128:$src), addr:$dst)*/]>,
2297 XS, Requires<[HasSSE2]>;
2300 // Intrinsic forms of MOVDQU load and store
2301 let isAsmParserOnly = 1 in {
2302 let canFoldAsLoad = 1 in
2303 def VMOVDQUrm_Int : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2304 "vmovdqu\t{$src, $dst|$dst, $src}",
2305 [(set VR128:$dst, (int_x86_sse2_loadu_dq addr:$src))]>,
2306 XS, VEX, Requires<[HasAVX]>;
2307 def VMOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2308 "vmovdqu\t{$src, $dst|$dst, $src}",
2309 [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
2310 XS, VEX, Requires<[HasAVX]>;
2313 let canFoldAsLoad = 1 in
2314 def MOVDQUrm_Int : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2315 "movdqu\t{$src, $dst|$dst, $src}",
2316 [(set VR128:$dst, (int_x86_sse2_loadu_dq addr:$src))]>,
2317 XS, Requires<[HasSSE2]>;
2318 def MOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2319 "movdqu\t{$src, $dst|$dst, $src}",
2320 [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
2321 XS, Requires<[HasSSE2]>;
2323 } // ExeDomain = SSEPackedInt
2325 def : Pat<(int_x86_avx_loadu_dq_256 addr:$src), (VMOVDQUYrm addr:$src)>;
2326 def : Pat<(int_x86_avx_storeu_dq_256 addr:$dst, VR256:$src),
2327 (VMOVDQUYmr addr:$dst, VR256:$src)>;
2329 //===---------------------------------------------------------------------===//
2330 // SSE2 - Packed Integer Arithmetic Instructions
2331 //===---------------------------------------------------------------------===//
2333 let ExeDomain = SSEPackedInt in { // SSE integer instructions
2335 multiclass PDI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId,
2336 bit IsCommutable = 0, bit Is2Addr = 1> {
2337 let isCommutable = IsCommutable in
2338 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2339 (ins VR128:$src1, VR128:$src2),
2341 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2342 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2343 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
2344 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2345 (ins VR128:$src1, i128mem:$src2),
2347 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2348 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2349 [(set VR128:$dst, (IntId VR128:$src1,
2350 (bitconvert (memopv2i64 addr:$src2))))]>;
2353 multiclass PDI_binop_rmi_int<bits<8> opc, bits<8> opc2, Format ImmForm,
2354 string OpcodeStr, Intrinsic IntId,
2355 Intrinsic IntId2, bit Is2Addr = 1> {
2356 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2357 (ins VR128:$src1, VR128:$src2),
2359 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2360 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2361 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
2362 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2363 (ins VR128:$src1, i128mem:$src2),
2365 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2366 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2367 [(set VR128:$dst, (IntId VR128:$src1,
2368 (bitconvert (memopv2i64 addr:$src2))))]>;
2369 def ri : PDIi8<opc2, ImmForm, (outs VR128:$dst),
2370 (ins VR128:$src1, i32i8imm:$src2),
2372 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2373 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2374 [(set VR128:$dst, (IntId2 VR128:$src1, (i32 imm:$src2)))]>;
2377 /// PDI_binop_rm - Simple SSE2 binary operator.
2378 multiclass PDI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
2379 ValueType OpVT, bit IsCommutable = 0, bit Is2Addr = 1> {
2380 let isCommutable = IsCommutable in
2381 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2382 (ins VR128:$src1, VR128:$src2),
2384 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2385 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2386 [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]>;
2387 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2388 (ins VR128:$src1, i128mem:$src2),
2390 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2391 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2392 [(set VR128:$dst, (OpVT (OpNode VR128:$src1,
2393 (bitconvert (memopv2i64 addr:$src2)))))]>;
2396 /// PDI_binop_rm_v2i64 - Simple SSE2 binary operator whose type is v2i64.
2398 /// FIXME: we could eliminate this and use PDI_binop_rm instead if tblgen knew
2399 /// to collapse (bitconvert VT to VT) into its operand.
2401 multiclass PDI_binop_rm_v2i64<bits<8> opc, string OpcodeStr, SDNode OpNode,
2402 bit IsCommutable = 0, bit Is2Addr = 1> {
2403 let isCommutable = IsCommutable in
2404 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2405 (ins VR128:$src1, VR128:$src2),
2407 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2408 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2409 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))]>;
2410 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2411 (ins VR128:$src1, i128mem:$src2),
2413 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2414 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2415 [(set VR128:$dst, (OpNode VR128:$src1, (memopv2i64 addr:$src2)))]>;
2418 } // ExeDomain = SSEPackedInt
2420 // 128-bit Integer Arithmetic
2422 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
2423 defm VPADDB : PDI_binop_rm<0xFC, "vpaddb", add, v16i8, 1, 0 /*3addr*/>, VEX_4V;
2424 defm VPADDW : PDI_binop_rm<0xFD, "vpaddw", add, v8i16, 1, 0>, VEX_4V;
2425 defm VPADDD : PDI_binop_rm<0xFE, "vpaddd", add, v4i32, 1, 0>, VEX_4V;
2426 defm VPADDQ : PDI_binop_rm_v2i64<0xD4, "vpaddq", add, 1, 0>, VEX_4V;
2427 defm VPMULLW : PDI_binop_rm<0xD5, "vpmullw", mul, v8i16, 1, 0>, VEX_4V;
2428 defm VPSUBB : PDI_binop_rm<0xF8, "vpsubb", sub, v16i8, 0, 0>, VEX_4V;
2429 defm VPSUBW : PDI_binop_rm<0xF9, "vpsubw", sub, v8i16, 0, 0>, VEX_4V;
2430 defm VPSUBD : PDI_binop_rm<0xFA, "vpsubd", sub, v4i32, 0, 0>, VEX_4V;
2431 defm VPSUBQ : PDI_binop_rm_v2i64<0xFB, "vpsubq", sub, 0, 0>, VEX_4V;
2434 defm VPSUBSB : PDI_binop_rm_int<0xE8, "vpsubsb" , int_x86_sse2_psubs_b, 0, 0>,
2436 defm VPSUBSW : PDI_binop_rm_int<0xE9, "vpsubsw" , int_x86_sse2_psubs_w, 0, 0>,
2438 defm VPSUBUSB : PDI_binop_rm_int<0xD8, "vpsubusb", int_x86_sse2_psubus_b, 0, 0>,
2440 defm VPSUBUSW : PDI_binop_rm_int<0xD9, "vpsubusw", int_x86_sse2_psubus_w, 0, 0>,
2442 defm VPADDSB : PDI_binop_rm_int<0xEC, "vpaddsb" , int_x86_sse2_padds_b, 1, 0>,
2444 defm VPADDSW : PDI_binop_rm_int<0xED, "vpaddsw" , int_x86_sse2_padds_w, 1, 0>,
2446 defm VPADDUSB : PDI_binop_rm_int<0xDC, "vpaddusb", int_x86_sse2_paddus_b, 1, 0>,
2448 defm VPADDUSW : PDI_binop_rm_int<0xDD, "vpaddusw", int_x86_sse2_paddus_w, 1, 0>,
2450 defm VPMULHUW : PDI_binop_rm_int<0xE4, "vpmulhuw", int_x86_sse2_pmulhu_w, 1, 0>,
2452 defm VPMULHW : PDI_binop_rm_int<0xE5, "vpmulhw" , int_x86_sse2_pmulh_w, 1, 0>,
2454 defm VPMULUDQ : PDI_binop_rm_int<0xF4, "vpmuludq", int_x86_sse2_pmulu_dq, 1, 0>,
2456 defm VPMADDWD : PDI_binop_rm_int<0xF5, "vpmaddwd", int_x86_sse2_pmadd_wd, 1, 0>,
2458 defm VPAVGB : PDI_binop_rm_int<0xE0, "vpavgb", int_x86_sse2_pavg_b, 1, 0>,
2460 defm VPAVGW : PDI_binop_rm_int<0xE3, "vpavgw", int_x86_sse2_pavg_w, 1, 0>,
2462 defm VPMINUB : PDI_binop_rm_int<0xDA, "vpminub", int_x86_sse2_pminu_b, 1, 0>,
2464 defm VPMINSW : PDI_binop_rm_int<0xEA, "vpminsw", int_x86_sse2_pmins_w, 1, 0>,
2466 defm VPMAXUB : PDI_binop_rm_int<0xDE, "vpmaxub", int_x86_sse2_pmaxu_b, 1, 0>,
2468 defm VPMAXSW : PDI_binop_rm_int<0xEE, "vpmaxsw", int_x86_sse2_pmaxs_w, 1, 0>,
2470 defm VPSADBW : PDI_binop_rm_int<0xF6, "vpsadbw", int_x86_sse2_psad_bw, 1, 0>,
2474 let Constraints = "$src1 = $dst" in {
2475 defm PADDB : PDI_binop_rm<0xFC, "paddb", add, v16i8, 1>;
2476 defm PADDW : PDI_binop_rm<0xFD, "paddw", add, v8i16, 1>;
2477 defm PADDD : PDI_binop_rm<0xFE, "paddd", add, v4i32, 1>;
2478 defm PADDQ : PDI_binop_rm_v2i64<0xD4, "paddq", add, 1>;
2479 defm PMULLW : PDI_binop_rm<0xD5, "pmullw", mul, v8i16, 1>;
2480 defm PSUBB : PDI_binop_rm<0xF8, "psubb", sub, v16i8>;
2481 defm PSUBW : PDI_binop_rm<0xF9, "psubw", sub, v8i16>;
2482 defm PSUBD : PDI_binop_rm<0xFA, "psubd", sub, v4i32>;
2483 defm PSUBQ : PDI_binop_rm_v2i64<0xFB, "psubq", sub>;
2486 defm PSUBSB : PDI_binop_rm_int<0xE8, "psubsb" , int_x86_sse2_psubs_b>;
2487 defm PSUBSW : PDI_binop_rm_int<0xE9, "psubsw" , int_x86_sse2_psubs_w>;
2488 defm PSUBUSB : PDI_binop_rm_int<0xD8, "psubusb", int_x86_sse2_psubus_b>;
2489 defm PSUBUSW : PDI_binop_rm_int<0xD9, "psubusw", int_x86_sse2_psubus_w>;
2490 defm PADDSB : PDI_binop_rm_int<0xEC, "paddsb" , int_x86_sse2_padds_b, 1>;
2491 defm PADDSW : PDI_binop_rm_int<0xED, "paddsw" , int_x86_sse2_padds_w, 1>;
2492 defm PADDUSB : PDI_binop_rm_int<0xDC, "paddusb", int_x86_sse2_paddus_b, 1>;
2493 defm PADDUSW : PDI_binop_rm_int<0xDD, "paddusw", int_x86_sse2_paddus_w, 1>;
2494 defm PMULHUW : PDI_binop_rm_int<0xE4, "pmulhuw", int_x86_sse2_pmulhu_w, 1>;
2495 defm PMULHW : PDI_binop_rm_int<0xE5, "pmulhw" , int_x86_sse2_pmulh_w, 1>;
2496 defm PMULUDQ : PDI_binop_rm_int<0xF4, "pmuludq", int_x86_sse2_pmulu_dq, 1>;
2497 defm PMADDWD : PDI_binop_rm_int<0xF5, "pmaddwd", int_x86_sse2_pmadd_wd, 1>;
2498 defm PAVGB : PDI_binop_rm_int<0xE0, "pavgb", int_x86_sse2_pavg_b, 1>;
2499 defm PAVGW : PDI_binop_rm_int<0xE3, "pavgw", int_x86_sse2_pavg_w, 1>;
2500 defm PMINUB : PDI_binop_rm_int<0xDA, "pminub", int_x86_sse2_pminu_b, 1>;
2501 defm PMINSW : PDI_binop_rm_int<0xEA, "pminsw", int_x86_sse2_pmins_w, 1>;
2502 defm PMAXUB : PDI_binop_rm_int<0xDE, "pmaxub", int_x86_sse2_pmaxu_b, 1>;
2503 defm PMAXSW : PDI_binop_rm_int<0xEE, "pmaxsw", int_x86_sse2_pmaxs_w, 1>;
2504 defm PSADBW : PDI_binop_rm_int<0xF6, "psadbw", int_x86_sse2_psad_bw, 1>;
2506 } // Constraints = "$src1 = $dst"
2508 //===---------------------------------------------------------------------===//
2509 // SSE2 - Packed Integer Logical Instructions
2510 //===---------------------------------------------------------------------===//
2512 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
2513 defm VPSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "vpsllw",
2514 int_x86_sse2_psll_w, int_x86_sse2_pslli_w, 0>,
2516 defm VPSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "vpslld",
2517 int_x86_sse2_psll_d, int_x86_sse2_pslli_d, 0>,
2519 defm VPSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "vpsllq",
2520 int_x86_sse2_psll_q, int_x86_sse2_pslli_q, 0>,
2523 defm VPSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "vpsrlw",
2524 int_x86_sse2_psrl_w, int_x86_sse2_psrli_w, 0>,
2526 defm VPSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "vpsrld",
2527 int_x86_sse2_psrl_d, int_x86_sse2_psrli_d, 0>,
2529 defm VPSRLQ : PDI_binop_rmi_int<0xD3, 0x73, MRM2r, "vpsrlq",
2530 int_x86_sse2_psrl_q, int_x86_sse2_psrli_q, 0>,
2533 defm VPSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "vpsraw",
2534 int_x86_sse2_psra_w, int_x86_sse2_psrai_w, 0>,
2536 defm VPSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "vpsrad",
2537 int_x86_sse2_psra_d, int_x86_sse2_psrai_d, 0>,
2540 defm VPAND : PDI_binop_rm_v2i64<0xDB, "vpand", and, 1, 0>, VEX_4V;
2541 defm VPOR : PDI_binop_rm_v2i64<0xEB, "vpor" , or, 1, 0>, VEX_4V;
2542 defm VPXOR : PDI_binop_rm_v2i64<0xEF, "vpxor", xor, 1, 0>, VEX_4V;
2544 let ExeDomain = SSEPackedInt in {
2545 let neverHasSideEffects = 1 in {
2546 // 128-bit logical shifts.
2547 def VPSLLDQri : PDIi8<0x73, MRM7r,
2548 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2549 "vpslldq\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
2551 def VPSRLDQri : PDIi8<0x73, MRM3r,
2552 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2553 "vpsrldq\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
2555 // PSRADQri doesn't exist in SSE[1-3].
2557 def VPANDNrr : PDI<0xDF, MRMSrcReg,
2558 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2559 "vpandn\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2560 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
2561 VR128:$src2)))]>, VEX_4V;
2563 def VPANDNrm : PDI<0xDF, MRMSrcMem,
2564 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2565 "vpandn\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2566 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
2567 (memopv2i64 addr:$src2))))]>,
2572 let Constraints = "$src1 = $dst" in {
2573 defm PSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "psllw",
2574 int_x86_sse2_psll_w, int_x86_sse2_pslli_w>;
2575 defm PSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "pslld",
2576 int_x86_sse2_psll_d, int_x86_sse2_pslli_d>;
2577 defm PSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "psllq",
2578 int_x86_sse2_psll_q, int_x86_sse2_pslli_q>;
2580 defm PSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "psrlw",
2581 int_x86_sse2_psrl_w, int_x86_sse2_psrli_w>;
2582 defm PSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "psrld",
2583 int_x86_sse2_psrl_d, int_x86_sse2_psrli_d>;
2584 defm PSRLQ : PDI_binop_rmi_int<0xD3, 0x73, MRM2r, "psrlq",
2585 int_x86_sse2_psrl_q, int_x86_sse2_psrli_q>;
2587 defm PSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "psraw",
2588 int_x86_sse2_psra_w, int_x86_sse2_psrai_w>;
2589 defm PSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "psrad",
2590 int_x86_sse2_psra_d, int_x86_sse2_psrai_d>;
2592 defm PAND : PDI_binop_rm_v2i64<0xDB, "pand", and, 1>;
2593 defm POR : PDI_binop_rm_v2i64<0xEB, "por" , or, 1>;
2594 defm PXOR : PDI_binop_rm_v2i64<0xEF, "pxor", xor, 1>;
2596 let ExeDomain = SSEPackedInt in {
2597 let neverHasSideEffects = 1 in {
2598 // 128-bit logical shifts.
2599 def PSLLDQri : PDIi8<0x73, MRM7r,
2600 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2601 "pslldq\t{$src2, $dst|$dst, $src2}", []>;
2602 def PSRLDQri : PDIi8<0x73, MRM3r,
2603 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2604 "psrldq\t{$src2, $dst|$dst, $src2}", []>;
2605 // PSRADQri doesn't exist in SSE[1-3].
2607 def PANDNrr : PDI<0xDF, MRMSrcReg,
2608 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2609 "pandn\t{$src2, $dst|$dst, $src2}",
2610 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
2613 def PANDNrm : PDI<0xDF, MRMSrcMem,
2614 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2615 "pandn\t{$src2, $dst|$dst, $src2}",
2616 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
2617 (memopv2i64 addr:$src2))))]>;
2619 } // Constraints = "$src1 = $dst"
2621 let Predicates = [HasAVX] in {
2622 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
2623 (v2i64 (VPSLLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2624 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
2625 (v2i64 (VPSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2626 def : Pat<(int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2),
2627 (v2i64 (VPSLLDQri VR128:$src1, imm:$src2))>;
2628 def : Pat<(int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2),
2629 (v2i64 (VPSRLDQri VR128:$src1, imm:$src2))>;
2630 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
2631 (v2f64 (VPSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2633 // Shift up / down and insert zero's.
2634 def : Pat<(v2i64 (X86vshl VR128:$src, (i8 imm:$amt))),
2635 (v2i64 (VPSLLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2636 def : Pat<(v2i64 (X86vshr VR128:$src, (i8 imm:$amt))),
2637 (v2i64 (VPSRLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2640 let Predicates = [HasSSE2] in {
2641 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
2642 (v2i64 (PSLLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2643 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
2644 (v2i64 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2645 def : Pat<(int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2),
2646 (v2i64 (PSLLDQri VR128:$src1, imm:$src2))>;
2647 def : Pat<(int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2),
2648 (v2i64 (PSRLDQri VR128:$src1, imm:$src2))>;
2649 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
2650 (v2f64 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2652 // Shift up / down and insert zero's.
2653 def : Pat<(v2i64 (X86vshl VR128:$src, (i8 imm:$amt))),
2654 (v2i64 (PSLLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2655 def : Pat<(v2i64 (X86vshr VR128:$src, (i8 imm:$amt))),
2656 (v2i64 (PSRLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2659 //===---------------------------------------------------------------------===//
2660 // SSE2 - Packed Integer Comparison Instructions
2661 //===---------------------------------------------------------------------===//
2663 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
2664 defm VPCMPEQB : PDI_binop_rm_int<0x74, "vpcmpeqb", int_x86_sse2_pcmpeq_b, 1,
2666 defm VPCMPEQW : PDI_binop_rm_int<0x75, "vpcmpeqw", int_x86_sse2_pcmpeq_w, 1,
2668 defm VPCMPEQD : PDI_binop_rm_int<0x76, "vpcmpeqd", int_x86_sse2_pcmpeq_d, 1,
2670 defm VPCMPGTB : PDI_binop_rm_int<0x64, "vpcmpgtb", int_x86_sse2_pcmpgt_b, 0,
2672 defm VPCMPGTW : PDI_binop_rm_int<0x65, "vpcmpgtw", int_x86_sse2_pcmpgt_w, 0,
2674 defm VPCMPGTD : PDI_binop_rm_int<0x66, "vpcmpgtd", int_x86_sse2_pcmpgt_d, 0,
2678 let Constraints = "$src1 = $dst" in {
2679 defm PCMPEQB : PDI_binop_rm_int<0x74, "pcmpeqb", int_x86_sse2_pcmpeq_b, 1>;
2680 defm PCMPEQW : PDI_binop_rm_int<0x75, "pcmpeqw", int_x86_sse2_pcmpeq_w, 1>;
2681 defm PCMPEQD : PDI_binop_rm_int<0x76, "pcmpeqd", int_x86_sse2_pcmpeq_d, 1>;
2682 defm PCMPGTB : PDI_binop_rm_int<0x64, "pcmpgtb", int_x86_sse2_pcmpgt_b>;
2683 defm PCMPGTW : PDI_binop_rm_int<0x65, "pcmpgtw", int_x86_sse2_pcmpgt_w>;
2684 defm PCMPGTD : PDI_binop_rm_int<0x66, "pcmpgtd", int_x86_sse2_pcmpgt_d>;
2685 } // Constraints = "$src1 = $dst"
2687 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, VR128:$src2)),
2688 (PCMPEQBrr VR128:$src1, VR128:$src2)>;
2689 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, (memop addr:$src2))),
2690 (PCMPEQBrm VR128:$src1, addr:$src2)>;
2691 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, VR128:$src2)),
2692 (PCMPEQWrr VR128:$src1, VR128:$src2)>;
2693 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, (memop addr:$src2))),
2694 (PCMPEQWrm VR128:$src1, addr:$src2)>;
2695 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, VR128:$src2)),
2696 (PCMPEQDrr VR128:$src1, VR128:$src2)>;
2697 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, (memop addr:$src2))),
2698 (PCMPEQDrm VR128:$src1, addr:$src2)>;
2700 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, VR128:$src2)),
2701 (PCMPGTBrr VR128:$src1, VR128:$src2)>;
2702 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, (memop addr:$src2))),
2703 (PCMPGTBrm VR128:$src1, addr:$src2)>;
2704 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, VR128:$src2)),
2705 (PCMPGTWrr VR128:$src1, VR128:$src2)>;
2706 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, (memop addr:$src2))),
2707 (PCMPGTWrm VR128:$src1, addr:$src2)>;
2708 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, VR128:$src2)),
2709 (PCMPGTDrr VR128:$src1, VR128:$src2)>;
2710 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, (memop addr:$src2))),
2711 (PCMPGTDrm VR128:$src1, addr:$src2)>;
2713 //===---------------------------------------------------------------------===//
2714 // SSE2 - Packed Integer Pack Instructions
2715 //===---------------------------------------------------------------------===//
2717 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
2718 defm VPACKSSWB : PDI_binop_rm_int<0x63, "vpacksswb", int_x86_sse2_packsswb_128,
2720 defm VPACKSSDW : PDI_binop_rm_int<0x6B, "vpackssdw", int_x86_sse2_packssdw_128,
2722 defm VPACKUSWB : PDI_binop_rm_int<0x67, "vpackuswb", int_x86_sse2_packuswb_128,
2726 let Constraints = "$src1 = $dst" in {
2727 defm PACKSSWB : PDI_binop_rm_int<0x63, "packsswb", int_x86_sse2_packsswb_128>;
2728 defm PACKSSDW : PDI_binop_rm_int<0x6B, "packssdw", int_x86_sse2_packssdw_128>;
2729 defm PACKUSWB : PDI_binop_rm_int<0x67, "packuswb", int_x86_sse2_packuswb_128>;
2730 } // Constraints = "$src1 = $dst"
2732 //===---------------------------------------------------------------------===//
2733 // SSE2 - Packed Integer Shuffle Instructions
2734 //===---------------------------------------------------------------------===//
2736 let ExeDomain = SSEPackedInt in {
2737 multiclass sse2_pshuffle<string OpcodeStr, ValueType vt, PatFrag pshuf_frag,
2739 def ri : Ii8<0x70, MRMSrcReg,
2740 (outs VR128:$dst), (ins VR128:$src1, i8imm:$src2),
2741 !strconcat(OpcodeStr,
2742 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2743 [(set VR128:$dst, (vt (pshuf_frag:$src2 VR128:$src1,
2745 def mi : Ii8<0x70, MRMSrcMem,
2746 (outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
2747 !strconcat(OpcodeStr,
2748 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2749 [(set VR128:$dst, (vt (pshuf_frag:$src2
2750 (bc_frag (memopv2i64 addr:$src1)),
2753 } // ExeDomain = SSEPackedInt
2755 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
2756 let AddedComplexity = 5 in
2757 defm VPSHUFD : sse2_pshuffle<"vpshufd", v4i32, pshufd, bc_v4i32>, OpSize,
2760 // SSE2 with ImmT == Imm8 and XS prefix.
2761 defm VPSHUFHW : sse2_pshuffle<"vpshufhw", v8i16, pshufhw, bc_v8i16>, XS,
2764 // SSE2 with ImmT == Imm8 and XD prefix.
2765 defm VPSHUFLW : sse2_pshuffle<"vpshuflw", v8i16, pshuflw, bc_v8i16>, XD,
2769 let Predicates = [HasSSE2] in {
2770 let AddedComplexity = 5 in
2771 defm PSHUFD : sse2_pshuffle<"pshufd", v4i32, pshufd, bc_v4i32>, TB, OpSize;
2773 // SSE2 with ImmT == Imm8 and XS prefix.
2774 defm PSHUFHW : sse2_pshuffle<"pshufhw", v8i16, pshufhw, bc_v8i16>, XS;
2776 // SSE2 with ImmT == Imm8 and XD prefix.
2777 defm PSHUFLW : sse2_pshuffle<"pshuflw", v8i16, pshuflw, bc_v8i16>, XD;
2780 //===---------------------------------------------------------------------===//
2781 // SSE2 - Packed Integer Unpack Instructions
2782 //===---------------------------------------------------------------------===//
2784 let ExeDomain = SSEPackedInt in {
2785 multiclass sse2_unpack<bits<8> opc, string OpcodeStr, ValueType vt,
2786 PatFrag unp_frag, PatFrag bc_frag, bit Is2Addr = 1> {
2787 def rr : PDI<opc, MRMSrcReg,
2788 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2790 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
2791 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2792 [(set VR128:$dst, (vt (unp_frag VR128:$src1, VR128:$src2)))]>;
2793 def rm : PDI<opc, MRMSrcMem,
2794 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2796 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
2797 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2798 [(set VR128:$dst, (unp_frag VR128:$src1,
2799 (bc_frag (memopv2i64
2803 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
2804 defm VPUNPCKLBW : sse2_unpack<0x60, "vpunpcklbw", v16i8, unpckl, bc_v16i8,
2806 defm VPUNPCKLWD : sse2_unpack<0x61, "vpunpcklwd", v8i16, unpckl, bc_v8i16,
2808 defm VPUNPCKLDQ : sse2_unpack<0x62, "vpunpckldq", v4i32, unpckl, bc_v4i32,
2811 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2812 /// knew to collapse (bitconvert VT to VT) into its operand.
2813 def VPUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
2814 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2815 "vpunpcklqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2817 (v2i64 (unpckl VR128:$src1, VR128:$src2)))]>, VEX_4V;
2818 def VPUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
2819 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2820 "vpunpcklqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2822 (v2i64 (unpckl VR128:$src1,
2823 (memopv2i64 addr:$src2))))]>, VEX_4V;
2825 defm VPUNPCKHBW : sse2_unpack<0x68, "vpunpckhbw", v16i8, unpckh, bc_v16i8,
2827 defm VPUNPCKHWD : sse2_unpack<0x69, "vpunpckhwd", v8i16, unpckh, bc_v8i16,
2829 defm VPUNPCKHDQ : sse2_unpack<0x6A, "vpunpckhdq", v4i32, unpckh, bc_v4i32,
2832 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2833 /// knew to collapse (bitconvert VT to VT) into its operand.
2834 def VPUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
2835 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2836 "vpunpckhqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2838 (v2i64 (unpckh VR128:$src1, VR128:$src2)))]>, VEX_4V;
2839 def VPUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
2840 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2841 "vpunpckhqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2843 (v2i64 (unpckh VR128:$src1,
2844 (memopv2i64 addr:$src2))))]>, VEX_4V;
2847 let Constraints = "$src1 = $dst" in {
2848 defm PUNPCKLBW : sse2_unpack<0x60, "punpcklbw", v16i8, unpckl, bc_v16i8>;
2849 defm PUNPCKLWD : sse2_unpack<0x61, "punpcklwd", v8i16, unpckl, bc_v8i16>;
2850 defm PUNPCKLDQ : sse2_unpack<0x62, "punpckldq", v4i32, unpckl, bc_v4i32>;
2852 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2853 /// knew to collapse (bitconvert VT to VT) into its operand.
2854 def PUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
2855 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2856 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
2858 (v2i64 (unpckl VR128:$src1, VR128:$src2)))]>;
2859 def PUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
2860 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2861 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
2863 (v2i64 (unpckl VR128:$src1,
2864 (memopv2i64 addr:$src2))))]>;
2866 defm PUNPCKHBW : sse2_unpack<0x68, "punpckhbw", v16i8, unpckh, bc_v16i8>;
2867 defm PUNPCKHWD : sse2_unpack<0x69, "punpckhwd", v8i16, unpckh, bc_v8i16>;
2868 defm PUNPCKHDQ : sse2_unpack<0x6A, "punpckhdq", v4i32, unpckh, bc_v4i32>;
2870 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2871 /// knew to collapse (bitconvert VT to VT) into its operand.
2872 def PUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
2873 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2874 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
2876 (v2i64 (unpckh VR128:$src1, VR128:$src2)))]>;
2877 def PUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
2878 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2879 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
2881 (v2i64 (unpckh VR128:$src1,
2882 (memopv2i64 addr:$src2))))]>;
2885 } // ExeDomain = SSEPackedInt
2887 //===---------------------------------------------------------------------===//
2888 // SSE2 - Packed Integer Extract and Insert
2889 //===---------------------------------------------------------------------===//
2891 let ExeDomain = SSEPackedInt in {
2892 multiclass sse2_pinsrw<bit Is2Addr = 1> {
2893 def rri : Ii8<0xC4, MRMSrcReg,
2894 (outs VR128:$dst), (ins VR128:$src1,
2895 GR32:$src2, i32i8imm:$src3),
2897 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2898 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
2900 (X86pinsrw VR128:$src1, GR32:$src2, imm:$src3))]>;
2901 def rmi : Ii8<0xC4, MRMSrcMem,
2902 (outs VR128:$dst), (ins VR128:$src1,
2903 i16mem:$src2, i32i8imm:$src3),
2905 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2906 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
2908 (X86pinsrw VR128:$src1, (extloadi16 addr:$src2),
2913 let isAsmParserOnly = 1, Predicates = [HasAVX] in
2914 def VPEXTRWri : Ii8<0xC5, MRMSrcReg,
2915 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
2916 "vpextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2917 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
2918 imm:$src2))]>, OpSize, VEX;
2919 def PEXTRWri : PDIi8<0xC5, MRMSrcReg,
2920 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
2921 "pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2922 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
2926 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
2927 defm VPINSRW : sse2_pinsrw<0>, OpSize, VEX_4V;
2928 def VPINSRWrr64i : Ii8<0xC4, MRMSrcReg, (outs VR128:$dst),
2929 (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
2930 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2931 []>, OpSize, VEX_4V;
2934 let Constraints = "$src1 = $dst" in
2935 defm PINSRW : sse2_pinsrw, TB, OpSize, Requires<[HasSSE2]>;
2937 } // ExeDomain = SSEPackedInt
2939 //===---------------------------------------------------------------------===//
2940 // SSE2 - Packed Mask Creation
2941 //===---------------------------------------------------------------------===//
2943 let ExeDomain = SSEPackedInt in {
2945 let isAsmParserOnly = 1 in {
2946 def VPMOVMSKBrr : VPDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
2947 "pmovmskb\t{$src, $dst|$dst, $src}",
2948 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>, VEX;
2949 def VPMOVMSKBr64r : VPDI<0xD7, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
2950 "pmovmskb\t{$src, $dst|$dst, $src}", []>, VEX;
2952 def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
2953 "pmovmskb\t{$src, $dst|$dst, $src}",
2954 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>;
2956 } // ExeDomain = SSEPackedInt
2958 //===---------------------------------------------------------------------===//
2959 // SSE2 - Conditional Store
2960 //===---------------------------------------------------------------------===//
2962 let ExeDomain = SSEPackedInt in {
2964 let isAsmParserOnly = 1 in {
2966 def VMASKMOVDQU : VPDI<0xF7, MRMSrcReg, (outs),
2967 (ins VR128:$src, VR128:$mask),
2968 "maskmovdqu\t{$mask, $src|$src, $mask}",
2969 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>, VEX;
2971 def VMASKMOVDQU64 : VPDI<0xF7, MRMSrcReg, (outs),
2972 (ins VR128:$src, VR128:$mask),
2973 "maskmovdqu\t{$mask, $src|$src, $mask}",
2974 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>, VEX;
2978 def MASKMOVDQU : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
2979 "maskmovdqu\t{$mask, $src|$src, $mask}",
2980 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>;
2982 def MASKMOVDQU64 : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
2983 "maskmovdqu\t{$mask, $src|$src, $mask}",
2984 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>;
2986 } // ExeDomain = SSEPackedInt
2988 //===---------------------------------------------------------------------===//
2989 // SSE2 - Move Doubleword
2990 //===---------------------------------------------------------------------===//
2992 // Move Int Doubleword to Packed Double Int
2993 let isAsmParserOnly = 1 in {
2994 def VMOVDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
2995 "movd\t{$src, $dst|$dst, $src}",
2997 (v4i32 (scalar_to_vector GR32:$src)))]>, VEX;
2998 def VMOVDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
2999 "movd\t{$src, $dst|$dst, $src}",
3001 (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>,
3004 def MOVDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
3005 "movd\t{$src, $dst|$dst, $src}",
3007 (v4i32 (scalar_to_vector GR32:$src)))]>;
3008 def MOVDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
3009 "movd\t{$src, $dst|$dst, $src}",
3011 (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>;
3014 // Move Int Doubleword to Single Scalar
3015 let isAsmParserOnly = 1 in {
3016 def VMOVDI2SSrr : VPDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
3017 "movd\t{$src, $dst|$dst, $src}",
3018 [(set FR32:$dst, (bitconvert GR32:$src))]>, VEX;
3020 def VMOVDI2SSrm : VPDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
3021 "movd\t{$src, $dst|$dst, $src}",
3022 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>,
3025 def MOVDI2SSrr : PDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
3026 "movd\t{$src, $dst|$dst, $src}",
3027 [(set FR32:$dst, (bitconvert GR32:$src))]>;
3029 def MOVDI2SSrm : PDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
3030 "movd\t{$src, $dst|$dst, $src}",
3031 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>;
3033 // Move Packed Doubleword Int to Packed Double Int
3034 let isAsmParserOnly = 1 in {
3035 def VMOVPDI2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
3036 "movd\t{$src, $dst|$dst, $src}",
3037 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
3039 def VMOVPDI2DImr : VPDI<0x7E, MRMDestMem, (outs),
3040 (ins i32mem:$dst, VR128:$src),
3041 "movd\t{$src, $dst|$dst, $src}",
3042 [(store (i32 (vector_extract (v4i32 VR128:$src),
3043 (iPTR 0))), addr:$dst)]>, VEX;
3045 def MOVPDI2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
3046 "movd\t{$src, $dst|$dst, $src}",
3047 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
3049 def MOVPDI2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src),
3050 "movd\t{$src, $dst|$dst, $src}",
3051 [(store (i32 (vector_extract (v4i32 VR128:$src),
3052 (iPTR 0))), addr:$dst)]>;
3054 // Move Scalar Single to Double Int
3055 let isAsmParserOnly = 1 in {
3056 def VMOVSS2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
3057 "movd\t{$src, $dst|$dst, $src}",
3058 [(set GR32:$dst, (bitconvert FR32:$src))]>, VEX;
3059 def VMOVSS2DImr : VPDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
3060 "movd\t{$src, $dst|$dst, $src}",
3061 [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>, VEX;
3063 def MOVSS2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
3064 "movd\t{$src, $dst|$dst, $src}",
3065 [(set GR32:$dst, (bitconvert FR32:$src))]>;
3066 def MOVSS2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
3067 "movd\t{$src, $dst|$dst, $src}",
3068 [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>;
3070 // movd / movq to XMM register zero-extends
3071 let AddedComplexity = 15, isAsmParserOnly = 1 in {
3072 def VMOVZDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
3073 "movd\t{$src, $dst|$dst, $src}",
3074 [(set VR128:$dst, (v4i32 (X86vzmovl
3075 (v4i32 (scalar_to_vector GR32:$src)))))]>,
3077 def VMOVZQI2PQIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3078 "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
3079 [(set VR128:$dst, (v2i64 (X86vzmovl
3080 (v2i64 (scalar_to_vector GR64:$src)))))]>,
3083 let AddedComplexity = 15 in {
3084 def MOVZDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
3085 "movd\t{$src, $dst|$dst, $src}",
3086 [(set VR128:$dst, (v4i32 (X86vzmovl
3087 (v4i32 (scalar_to_vector GR32:$src)))))]>;
3088 def MOVZQI2PQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3089 "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
3090 [(set VR128:$dst, (v2i64 (X86vzmovl
3091 (v2i64 (scalar_to_vector GR64:$src)))))]>;
3094 let AddedComplexity = 20 in {
3095 let isAsmParserOnly = 1 in
3096 def VMOVZDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
3097 "movd\t{$src, $dst|$dst, $src}",
3099 (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
3100 (loadi32 addr:$src))))))]>,
3102 def MOVZDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
3103 "movd\t{$src, $dst|$dst, $src}",
3105 (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
3106 (loadi32 addr:$src))))))]>;
3108 def : Pat<(v4i32 (X86vzmovl (loadv4i32 addr:$src))),
3109 (MOVZDI2PDIrm addr:$src)>;
3110 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
3111 (MOVZDI2PDIrm addr:$src)>;
3112 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
3113 (MOVZDI2PDIrm addr:$src)>;
3116 //===---------------------------------------------------------------------===//
3117 // SSE2 - Move Quadword
3118 //===---------------------------------------------------------------------===//
3120 // Move Quadword Int to Packed Quadword Int
3121 let isAsmParserOnly = 1 in
3122 def VMOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3123 "vmovq\t{$src, $dst|$dst, $src}",
3125 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
3126 VEX, Requires<[HasAVX]>;
3127 def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3128 "movq\t{$src, $dst|$dst, $src}",
3130 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
3131 Requires<[HasSSE2]>; // SSE2 instruction with XS Prefix
3133 // Move Packed Quadword Int to Quadword Int
3134 let isAsmParserOnly = 1 in
3135 def VMOVPQI2QImr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3136 "movq\t{$src, $dst|$dst, $src}",
3137 [(store (i64 (vector_extract (v2i64 VR128:$src),
3138 (iPTR 0))), addr:$dst)]>, VEX;
3139 def MOVPQI2QImr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3140 "movq\t{$src, $dst|$dst, $src}",
3141 [(store (i64 (vector_extract (v2i64 VR128:$src),
3142 (iPTR 0))), addr:$dst)]>;
3144 def : Pat<(f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
3145 (f64 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
3147 // Store / copy lower 64-bits of a XMM register.
3148 let isAsmParserOnly = 1 in
3149 def VMOVLQ128mr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3150 "movq\t{$src, $dst|$dst, $src}",
3151 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>, VEX;
3152 def MOVLQ128mr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3153 "movq\t{$src, $dst|$dst, $src}",
3154 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>;
3156 let AddedComplexity = 20, isAsmParserOnly = 1 in
3157 def VMOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3158 "vmovq\t{$src, $dst|$dst, $src}",
3160 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
3161 (loadi64 addr:$src))))))]>,
3162 XS, VEX, Requires<[HasAVX]>;
3164 let AddedComplexity = 20 in {
3165 def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3166 "movq\t{$src, $dst|$dst, $src}",
3168 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
3169 (loadi64 addr:$src))))))]>,
3170 XS, Requires<[HasSSE2]>;
3172 def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
3173 (MOVZQI2PQIrm addr:$src)>;
3174 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4f32 addr:$src)))),
3175 (MOVZQI2PQIrm addr:$src)>;
3176 def : Pat<(v2i64 (X86vzload addr:$src)), (MOVZQI2PQIrm addr:$src)>;
3179 // Moving from XMM to XMM and clear upper 64 bits. Note, there is a bug in
3180 // IA32 document. movq xmm1, xmm2 does clear the high bits.
3181 let isAsmParserOnly = 1, AddedComplexity = 15 in
3182 def VMOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3183 "vmovq\t{$src, $dst|$dst, $src}",
3184 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
3185 XS, VEX, Requires<[HasAVX]>;
3186 let AddedComplexity = 15 in
3187 def MOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3188 "movq\t{$src, $dst|$dst, $src}",
3189 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
3190 XS, Requires<[HasSSE2]>;
3192 let AddedComplexity = 20, isAsmParserOnly = 1 in
3193 def VMOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3194 "vmovq\t{$src, $dst|$dst, $src}",
3195 [(set VR128:$dst, (v2i64 (X86vzmovl
3196 (loadv2i64 addr:$src))))]>,
3197 XS, VEX, Requires<[HasAVX]>;
3198 let AddedComplexity = 20 in {
3199 def MOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3200 "movq\t{$src, $dst|$dst, $src}",
3201 [(set VR128:$dst, (v2i64 (X86vzmovl
3202 (loadv2i64 addr:$src))))]>,
3203 XS, Requires<[HasSSE2]>;
3205 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4i32 addr:$src)))),
3206 (MOVZPQILo2PQIrm addr:$src)>;
3209 // Instructions to match in the assembler
3210 let isAsmParserOnly = 1 in {
3211 def VMOVQs64rr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3212 "movq\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
3213 def VMOVQd64rr : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
3214 "movq\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
3215 // Recognize "movd" with GR64 destination, but encode as a "movq"
3216 def VMOVQd64rr_alt : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
3217 "movd\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
3220 // Instructions for the disassembler
3221 // xr = XMM register
3224 let isAsmParserOnly = 1, Predicates = [HasAVX] in
3225 def VMOVQxrxr: I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3226 "vmovq\t{$src, $dst|$dst, $src}", []>, VEX, XS;
3227 def MOVQxrxr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3228 "movq\t{$src, $dst|$dst, $src}", []>, XS;
3230 //===---------------------------------------------------------------------===//
3231 // SSE2 - Misc Instructions
3232 //===---------------------------------------------------------------------===//
3235 def CLFLUSH : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
3236 "clflush\t$src", [(int_x86_sse2_clflush addr:$src)]>,
3237 TB, Requires<[HasSSE2]>;
3239 // Load, store, and memory fence
3240 def LFENCE : I<0xAE, MRM_E8, (outs), (ins),
3241 "lfence", [(int_x86_sse2_lfence)]>, TB, Requires<[HasSSE2]>;
3242 def MFENCE : I<0xAE, MRM_F0, (outs), (ins),
3243 "mfence", [(int_x86_sse2_mfence)]>, TB, Requires<[HasSSE2]>;
3244 def : Pat<(X86LFence), (LFENCE)>;
3245 def : Pat<(X86MFence), (MFENCE)>;
3248 // Pause. This "instruction" is encoded as "rep; nop", so even though it
3249 // was introduced with SSE2, it's backward compatible.
3250 def PAUSE : I<0x90, RawFrm, (outs), (ins), "pause", []>, REP;
3252 // Alias instructions that map zero vector to pxor / xorp* for sse.
3253 // We set canFoldAsLoad because this can be converted to a constant-pool
3254 // load of an all-ones value if folding it would be beneficial.
3255 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
3256 isCodeGenOnly = 1, ExeDomain = SSEPackedInt in
3257 // FIXME: Change encoding to pseudo.
3258 def V_SETALLONES : PDI<0x76, MRMInitReg, (outs VR128:$dst), (ins), "",
3259 [(set VR128:$dst, (v4i32 immAllOnesV))]>;
3261 //===---------------------------------------------------------------------===//
3262 // SSE3 - Conversion Instructions
3263 //===---------------------------------------------------------------------===//
3265 // Convert Packed Double FP to Packed DW Integers
3266 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3267 // The assembler can recognize rr 256-bit instructions by seeing a ymm
3268 // register, but the same isn't true when using memory operands instead.
3269 // Provide other assembly rr and rm forms to address this explicitly.
3270 def VCVTPD2DQrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3271 "vcvtpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
3272 def VCVTPD2DQXrYr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
3273 "vcvtpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
3276 def VCVTPD2DQXrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3277 "vcvtpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
3278 def VCVTPD2DQXrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3279 "vcvtpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
3282 def VCVTPD2DQYrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
3283 "vcvtpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX;
3284 def VCVTPD2DQYrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
3285 "vcvtpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
3288 def CVTPD2DQrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3289 "cvtpd2dq\t{$src, $dst|$dst, $src}", []>;
3290 def CVTPD2DQrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3291 "cvtpd2dq\t{$src, $dst|$dst, $src}", []>;
3293 // Convert Packed DW Integers to Packed Double FP
3294 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3295 def VCVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3296 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3297 def VCVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3298 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3299 def VCVTDQ2PDYrm : S3SI<0xE6, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
3300 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3301 def VCVTDQ2PDYrr : S3SI<0xE6, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
3302 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3305 def CVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3306 "cvtdq2pd\t{$src, $dst|$dst, $src}", []>;
3307 def CVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3308 "cvtdq2pd\t{$src, $dst|$dst, $src}", []>;
3310 // AVX 256-bit register conversion intrinsics
3311 def : Pat<(int_x86_avx_cvtdq2_pd_256 VR128:$src),
3312 (VCVTDQ2PDYrr VR128:$src)>;
3313 def : Pat<(int_x86_avx_cvtdq2_pd_256 (memopv4i32 addr:$src)),
3314 (VCVTDQ2PDYrm addr:$src)>;
3316 def : Pat<(int_x86_avx_cvt_pd2dq_256 VR256:$src),
3317 (VCVTPD2DQYrr VR256:$src)>;
3318 def : Pat<(int_x86_avx_cvt_pd2dq_256 (memopv4f64 addr:$src)),
3319 (VCVTPD2DQYrm addr:$src)>;
3321 //===---------------------------------------------------------------------===//
3322 // SSE3 - Move Instructions
3323 //===---------------------------------------------------------------------===//
3325 // Replicate Single FP
3326 multiclass sse3_replicate_sfp<bits<8> op, PatFrag rep_frag, string OpcodeStr> {
3327 def rr : S3SI<op, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3328 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3329 [(set VR128:$dst, (v4f32 (rep_frag
3330 VR128:$src, (undef))))]>;
3331 def rm : S3SI<op, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3332 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3333 [(set VR128:$dst, (rep_frag
3334 (memopv4f32 addr:$src), (undef)))]>;
3337 multiclass sse3_replicate_sfp_y<bits<8> op, PatFrag rep_frag,
3339 def rr : S3SI<op, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3340 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>;
3341 def rm : S3SI<op, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
3342 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>;
3345 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3346 // FIXME: Merge above classes when we have patterns for the ymm version
3347 defm VMOVSHDUP : sse3_replicate_sfp<0x16, movshdup, "vmovshdup">, VEX;
3348 defm VMOVSLDUP : sse3_replicate_sfp<0x12, movsldup, "vmovsldup">, VEX;
3349 defm VMOVSHDUPY : sse3_replicate_sfp_y<0x16, movshdup, "vmovshdup">, VEX;
3350 defm VMOVSLDUPY : sse3_replicate_sfp_y<0x12, movsldup, "vmovsldup">, VEX;
3352 defm MOVSHDUP : sse3_replicate_sfp<0x16, movshdup, "movshdup">;
3353 defm MOVSLDUP : sse3_replicate_sfp<0x12, movsldup, "movsldup">;
3355 // Replicate Double FP
3356 multiclass sse3_replicate_dfp<string OpcodeStr> {
3357 def rr : S3DI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3358 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3359 [(set VR128:$dst,(v2f64 (movddup VR128:$src, (undef))))]>;
3360 def rm : S3DI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
3361 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3363 (v2f64 (movddup (scalar_to_vector (loadf64 addr:$src)),
3367 multiclass sse3_replicate_dfp_y<string OpcodeStr> {
3368 def rr : S3DI<0x12, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3369 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3371 def rm : S3DI<0x12, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
3372 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3376 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3377 // FIXME: Merge above classes when we have patterns for the ymm version
3378 defm VMOVDDUP : sse3_replicate_dfp<"vmovddup">, VEX;
3379 defm VMOVDDUPY : sse3_replicate_dfp_y<"vmovddup">, VEX;
3381 defm MOVDDUP : sse3_replicate_dfp<"movddup">;
3383 // Move Unaligned Integer
3384 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3385 def VLDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3386 "vlddqu\t{$src, $dst|$dst, $src}",
3387 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>, VEX;
3388 def VLDDQUYrm : S3DI<0xF0, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
3389 "vlddqu\t{$src, $dst|$dst, $src}",
3390 [(set VR256:$dst, (int_x86_avx_ldu_dq_256 addr:$src))]>, VEX;
3392 def LDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3393 "lddqu\t{$src, $dst|$dst, $src}",
3394 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>;
3396 def : Pat<(movddup (bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src)))),
3398 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3400 // Several Move patterns
3401 let AddedComplexity = 5 in {
3402 def : Pat<(movddup (memopv2f64 addr:$src), (undef)),
3403 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3404 def : Pat<(movddup (bc_v4f32 (memopv2f64 addr:$src)), (undef)),
3405 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3406 def : Pat<(movddup (memopv2i64 addr:$src), (undef)),
3407 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3408 def : Pat<(movddup (bc_v4i32 (memopv2i64 addr:$src)), (undef)),
3409 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3412 // vector_shuffle v1, <undef> <1, 1, 3, 3>
3413 let AddedComplexity = 15 in
3414 def : Pat<(v4i32 (movshdup VR128:$src, (undef))),
3415 (MOVSHDUPrr VR128:$src)>, Requires<[HasSSE3]>;
3416 let AddedComplexity = 20 in
3417 def : Pat<(v4i32 (movshdup (bc_v4i32 (memopv2i64 addr:$src)), (undef))),
3418 (MOVSHDUPrm addr:$src)>, Requires<[HasSSE3]>;
3420 // vector_shuffle v1, <undef> <0, 0, 2, 2>
3421 let AddedComplexity = 15 in
3422 def : Pat<(v4i32 (movsldup VR128:$src, (undef))),
3423 (MOVSLDUPrr VR128:$src)>, Requires<[HasSSE3]>;
3424 let AddedComplexity = 20 in
3425 def : Pat<(v4i32 (movsldup (bc_v4i32 (memopv2i64 addr:$src)), (undef))),
3426 (MOVSLDUPrm addr:$src)>, Requires<[HasSSE3]>;
3428 //===---------------------------------------------------------------------===//
3429 // SSE3 - Arithmetic
3430 //===---------------------------------------------------------------------===//
3432 multiclass sse3_addsub<Intrinsic Int, string OpcodeStr, RegisterClass RC,
3433 X86MemOperand x86memop, bit Is2Addr = 1> {
3434 def rr : I<0xD0, MRMSrcReg,
3435 (outs RC:$dst), (ins RC:$src1, RC:$src2),
3437 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3438 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3439 [(set RC:$dst, (Int RC:$src1, RC:$src2))]>;
3440 def rm : I<0xD0, MRMSrcMem,
3441 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
3443 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3444 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3445 [(set RC:$dst, (Int RC:$src1, (memop addr:$src2)))]>;
3448 let isAsmParserOnly = 1, Predicates = [HasAVX],
3449 ExeDomain = SSEPackedDouble in {
3450 defm VADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "vaddsubps", VR128,
3451 f128mem, 0>, XD, VEX_4V;
3452 defm VADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "vaddsubpd", VR128,
3453 f128mem, 0>, OpSize, VEX_4V;
3454 defm VADDSUBPSY : sse3_addsub<int_x86_avx_addsub_ps_256, "vaddsubps", VR256,
3455 f256mem, 0>, XD, VEX_4V;
3456 defm VADDSUBPDY : sse3_addsub<int_x86_avx_addsub_pd_256, "vaddsubpd", VR256,
3457 f256mem, 0>, OpSize, VEX_4V;
3459 let Constraints = "$src1 = $dst", Predicates = [HasSSE3],
3460 ExeDomain = SSEPackedDouble in {
3461 defm ADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "addsubps", VR128,
3463 defm ADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "addsubpd", VR128,
3464 f128mem>, TB, OpSize;
3467 //===---------------------------------------------------------------------===//
3468 // SSE3 Instructions
3469 //===---------------------------------------------------------------------===//
3472 multiclass S3D_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
3473 X86MemOperand x86memop, Intrinsic IntId, bit Is2Addr = 1> {
3474 def rr : S3DI<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
3476 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3477 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3478 [(set RC:$dst, (vt (IntId RC:$src1, RC:$src2)))]>;
3480 def rm : S3DI<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
3482 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3483 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3484 [(set RC:$dst, (vt (IntId RC:$src1, (memop addr:$src2))))]>;
3486 multiclass S3_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
3487 X86MemOperand x86memop, Intrinsic IntId, bit Is2Addr = 1> {
3488 def rr : S3I<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
3490 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3491 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3492 [(set RC:$dst, (vt (IntId RC:$src1, RC:$src2)))]>;
3494 def rm : S3I<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
3496 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3497 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3498 [(set RC:$dst, (vt (IntId RC:$src1, (memop addr:$src2))))]>;
3501 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3502 defm VHADDPS : S3D_Int<0x7C, "vhaddps", v4f32, VR128, f128mem,
3503 int_x86_sse3_hadd_ps, 0>, VEX_4V;
3504 defm VHADDPD : S3_Int <0x7C, "vhaddpd", v2f64, VR128, f128mem,
3505 int_x86_sse3_hadd_pd, 0>, VEX_4V;
3506 defm VHSUBPS : S3D_Int<0x7D, "vhsubps", v4f32, VR128, f128mem,
3507 int_x86_sse3_hsub_ps, 0>, VEX_4V;
3508 defm VHSUBPD : S3_Int <0x7D, "vhsubpd", v2f64, VR128, f128mem,
3509 int_x86_sse3_hsub_pd, 0>, VEX_4V;
3510 defm VHADDPSY : S3D_Int<0x7C, "vhaddps", v8f32, VR256, f256mem,
3511 int_x86_avx_hadd_ps_256, 0>, VEX_4V;
3512 defm VHADDPDY : S3_Int <0x7C, "vhaddpd", v4f64, VR256, f256mem,
3513 int_x86_avx_hadd_pd_256, 0>, VEX_4V;
3514 defm VHSUBPSY : S3D_Int<0x7D, "vhsubps", v8f32, VR256, f256mem,
3515 int_x86_avx_hsub_ps_256, 0>, VEX_4V;
3516 defm VHSUBPDY : S3_Int <0x7D, "vhsubpd", v4f64, VR256, f256mem,
3517 int_x86_avx_hsub_pd_256, 0>, VEX_4V;
3520 let Constraints = "$src1 = $dst" in {
3521 defm HADDPS : S3D_Int<0x7C, "haddps", v4f32, VR128, f128mem,
3522 int_x86_sse3_hadd_ps>;
3523 defm HADDPD : S3_Int<0x7C, "haddpd", v2f64, VR128, f128mem,
3524 int_x86_sse3_hadd_pd>;
3525 defm HSUBPS : S3D_Int<0x7D, "hsubps", v4f32, VR128, f128mem,
3526 int_x86_sse3_hsub_ps>;
3527 defm HSUBPD : S3_Int<0x7D, "hsubpd", v2f64, VR128, f128mem,
3528 int_x86_sse3_hsub_pd>;
3531 //===---------------------------------------------------------------------===//
3532 // SSSE3 - Packed Absolute Instructions
3533 //===---------------------------------------------------------------------===//
3535 /// SS3I_unop_rm_int_mm - Simple SSSE3 unary whose type can be v*{i8,i16,i32}.
3536 multiclass SS3I_unop_rm_int_mm<bits<8> opc, string OpcodeStr,
3537 PatFrag mem_frag64, Intrinsic IntId64> {
3538 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst), (ins VR64:$src),
3539 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3540 [(set VR64:$dst, (IntId64 VR64:$src))]>;
3542 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst), (ins i64mem:$src),
3543 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3545 (IntId64 (bitconvert (mem_frag64 addr:$src))))]>;
3548 /// SS3I_unop_rm_int - Simple SSSE3 unary op whose type can be v*{i8,i16,i32}.
3549 multiclass SS3I_unop_rm_int<bits<8> opc, string OpcodeStr,
3550 PatFrag mem_frag128, Intrinsic IntId128> {
3551 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
3553 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3554 [(set VR128:$dst, (IntId128 VR128:$src))]>,
3557 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
3559 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3562 (bitconvert (mem_frag128 addr:$src))))]>, OpSize;
3565 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3566 defm VPABSB : SS3I_unop_rm_int<0x1C, "vpabsb", memopv16i8,
3567 int_x86_ssse3_pabs_b_128>, VEX;
3568 defm VPABSW : SS3I_unop_rm_int<0x1D, "vpabsw", memopv8i16,
3569 int_x86_ssse3_pabs_w_128>, VEX;
3570 defm VPABSD : SS3I_unop_rm_int<0x1E, "vpabsd", memopv4i32,
3571 int_x86_ssse3_pabs_d_128>, VEX;
3574 defm PABSB : SS3I_unop_rm_int<0x1C, "pabsb", memopv16i8,
3575 int_x86_ssse3_pabs_b_128>,
3576 SS3I_unop_rm_int_mm<0x1C, "pabsb", memopv8i8,
3577 int_x86_ssse3_pabs_b>;
3579 defm PABSW : SS3I_unop_rm_int<0x1D, "pabsw", memopv8i16,
3580 int_x86_ssse3_pabs_w_128>,
3581 SS3I_unop_rm_int_mm<0x1D, "pabsw", memopv4i16,
3582 int_x86_ssse3_pabs_w>;
3584 defm PABSD : SS3I_unop_rm_int<0x1E, "pabsd", memopv4i32,
3585 int_x86_ssse3_pabs_d_128>,
3586 SS3I_unop_rm_int_mm<0x1E, "pabsd", memopv2i32,
3587 int_x86_ssse3_pabs_d>;
3589 //===---------------------------------------------------------------------===//
3590 // SSSE3 - Packed Binary Operator Instructions
3591 //===---------------------------------------------------------------------===//
3593 /// SS3I_binop_rm_int - Simple SSSE3 bin op whose type can be v*{i8,i16,i32}.
3594 multiclass SS3I_binop_rm_int<bits<8> opc, string OpcodeStr,
3595 PatFrag mem_frag128, Intrinsic IntId128,
3597 let isCommutable = 1 in
3598 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
3599 (ins VR128:$src1, VR128:$src2),
3601 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3602 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3603 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
3605 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
3606 (ins VR128:$src1, i128mem:$src2),
3608 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3609 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3611 (IntId128 VR128:$src1,
3612 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
3614 multiclass SS3I_binop_rm_int_mm<bits<8> opc, string OpcodeStr,
3615 PatFrag mem_frag64, Intrinsic IntId64> {
3616 let isCommutable = 1 in
3617 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
3618 (ins VR64:$src1, VR64:$src2),
3619 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3620 [(set VR64:$dst, (IntId64 VR64:$src1, VR64:$src2))]>;
3621 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
3622 (ins VR64:$src1, i64mem:$src2),
3623 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3625 (IntId64 VR64:$src1,
3626 (bitconvert (memopv8i8 addr:$src2))))]>;
3629 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3630 let isCommutable = 0 in {
3631 defm VPHADDW : SS3I_binop_rm_int<0x01, "vphaddw", memopv8i16,
3632 int_x86_ssse3_phadd_w_128, 0>, VEX_4V;
3633 defm VPHADDD : SS3I_binop_rm_int<0x02, "vphaddd", memopv4i32,
3634 int_x86_ssse3_phadd_d_128, 0>, VEX_4V;
3635 defm VPHADDSW : SS3I_binop_rm_int<0x03, "vphaddsw", memopv8i16,
3636 int_x86_ssse3_phadd_sw_128, 0>, VEX_4V;
3637 defm VPHSUBW : SS3I_binop_rm_int<0x05, "vphsubw", memopv8i16,
3638 int_x86_ssse3_phsub_w_128, 0>, VEX_4V;
3639 defm VPHSUBD : SS3I_binop_rm_int<0x06, "vphsubd", memopv4i32,
3640 int_x86_ssse3_phsub_d_128, 0>, VEX_4V;
3641 defm VPHSUBSW : SS3I_binop_rm_int<0x07, "vphsubsw", memopv8i16,
3642 int_x86_ssse3_phsub_sw_128, 0>, VEX_4V;
3643 defm VPMADDUBSW : SS3I_binop_rm_int<0x04, "vpmaddubsw", memopv16i8,
3644 int_x86_ssse3_pmadd_ub_sw_128, 0>, VEX_4V;
3645 defm VPSHUFB : SS3I_binop_rm_int<0x00, "vpshufb", memopv16i8,
3646 int_x86_ssse3_pshuf_b_128, 0>, VEX_4V;
3647 defm VPSIGNB : SS3I_binop_rm_int<0x08, "vpsignb", memopv16i8,
3648 int_x86_ssse3_psign_b_128, 0>, VEX_4V;
3649 defm VPSIGNW : SS3I_binop_rm_int<0x09, "vpsignw", memopv8i16,
3650 int_x86_ssse3_psign_w_128, 0>, VEX_4V;
3651 defm VPSIGND : SS3I_binop_rm_int<0x0A, "vpsignd", memopv4i32,
3652 int_x86_ssse3_psign_d_128, 0>, VEX_4V;
3654 defm VPMULHRSW : SS3I_binop_rm_int<0x0B, "vpmulhrsw", memopv8i16,
3655 int_x86_ssse3_pmul_hr_sw_128, 0>, VEX_4V;
3658 // None of these have i8 immediate fields.
3659 let ImmT = NoImm, Constraints = "$src1 = $dst" in {
3660 let isCommutable = 0 in {
3661 defm PHADDW : SS3I_binop_rm_int<0x01, "phaddw", memopv8i16,
3662 int_x86_ssse3_phadd_w_128>,
3663 SS3I_binop_rm_int_mm<0x01, "phaddw", memopv4i16,
3664 int_x86_ssse3_phadd_w>;
3665 defm PHADDD : SS3I_binop_rm_int<0x02, "phaddd", memopv4i32,
3666 int_x86_ssse3_phadd_d_128>,
3667 SS3I_binop_rm_int_mm<0x02, "phaddd", memopv2i32,
3668 int_x86_ssse3_phadd_d>;
3669 defm PHADDSW : SS3I_binop_rm_int<0x03, "phaddsw", memopv8i16,
3670 int_x86_ssse3_phadd_sw_128>,
3671 SS3I_binop_rm_int_mm<0x03, "phaddsw", memopv4i16,
3672 int_x86_ssse3_phadd_sw>;
3673 defm PHSUBW : SS3I_binop_rm_int<0x05, "phsubw", memopv8i16,
3674 int_x86_ssse3_phsub_w_128>,
3675 SS3I_binop_rm_int_mm<0x05, "phsubw", memopv4i16,
3676 int_x86_ssse3_phsub_w>;
3677 defm PHSUBD : SS3I_binop_rm_int<0x06, "phsubd", memopv4i32,
3678 int_x86_ssse3_phsub_d_128>,
3679 SS3I_binop_rm_int_mm<0x06, "phsubd", memopv2i32,
3680 int_x86_ssse3_phsub_d>;
3681 defm PHSUBSW : SS3I_binop_rm_int<0x07, "phsubsw", memopv8i16,
3682 int_x86_ssse3_phsub_sw_128>,
3683 SS3I_binop_rm_int_mm<0x07, "phsubsw", memopv4i16,
3684 int_x86_ssse3_phsub_sw>;
3685 defm PMADDUBSW : SS3I_binop_rm_int<0x04, "pmaddubsw", memopv16i8,
3686 int_x86_ssse3_pmadd_ub_sw_128>,
3687 SS3I_binop_rm_int_mm<0x04, "pmaddubsw", memopv8i8,
3688 int_x86_ssse3_pmadd_ub_sw>;
3689 defm PSHUFB : SS3I_binop_rm_int<0x00, "pshufb", memopv8i8,
3690 int_x86_ssse3_pshuf_b_128>,
3691 SS3I_binop_rm_int_mm<0x00, "pshufb", memopv8i8,
3692 int_x86_ssse3_pshuf_b>;
3693 defm PSIGNB : SS3I_binop_rm_int<0x08, "psignb", memopv16i8,
3694 int_x86_ssse3_psign_b_128>,
3695 SS3I_binop_rm_int_mm<0x08, "psignb", memopv8i8,
3696 int_x86_ssse3_psign_b>;
3697 defm PSIGNW : SS3I_binop_rm_int<0x09, "psignw", memopv8i16,
3698 int_x86_ssse3_psign_w_128>,
3699 SS3I_binop_rm_int_mm<0x09, "psignw", memopv4i16,
3700 int_x86_ssse3_psign_w>;
3701 defm PSIGND : SS3I_binop_rm_int<0x0A, "psignd", memopv4i32,
3702 int_x86_ssse3_psign_d_128>,
3703 SS3I_binop_rm_int_mm<0x0A, "psignd", memopv2i32,
3704 int_x86_ssse3_psign_d>;
3706 defm PMULHRSW : SS3I_binop_rm_int<0x0B, "pmulhrsw", memopv8i16,
3707 int_x86_ssse3_pmul_hr_sw_128>,
3708 SS3I_binop_rm_int_mm<0x0B, "pmulhrsw", memopv4i16,
3709 int_x86_ssse3_pmul_hr_sw>;
3712 def : Pat<(X86pshufb VR128:$src, VR128:$mask),
3713 (PSHUFBrr128 VR128:$src, VR128:$mask)>, Requires<[HasSSSE3]>;
3714 def : Pat<(X86pshufb VR128:$src, (bc_v16i8 (memopv2i64 addr:$mask))),
3715 (PSHUFBrm128 VR128:$src, addr:$mask)>, Requires<[HasSSSE3]>;
3717 //===---------------------------------------------------------------------===//
3718 // SSSE3 - Packed Align Instruction Patterns
3719 //===---------------------------------------------------------------------===//
3721 multiclass ssse3_palign_mm<string asm, Intrinsic IntId> {
3722 def R64rr : SS3AI<0x0F, MRMSrcReg, (outs VR64:$dst),
3723 (ins VR64:$src1, VR64:$src2, i8imm:$src3),
3724 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), []>;
3725 def R64rm : SS3AI<0x0F, MRMSrcMem, (outs VR64:$dst),
3726 (ins VR64:$src1, i64mem:$src2, i8imm:$src3),
3727 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), []>;
3728 def R64irr : SS3AI<0x0F, MRMSrcReg, (outs VR64:$dst),
3729 (ins VR64:$src1, VR64:$src2, i8imm:$src3),
3730 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3731 [(set VR64:$dst, (IntId VR64:$src1, VR64:$src2, (i8 imm:$src3)))]>;
3732 def R64irm : SS3AI<0x0F, MRMSrcMem, (outs VR64:$dst),
3733 (ins VR64:$src1, i64mem:$src2, i8imm:$src3),
3734 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3735 [(set VR64:$dst, (IntId VR64:$src1,
3736 (bitconvert (load_mmx addr:$src2)), (i8 imm:$src3)))]>;
3739 multiclass ssse3_palign<string asm, bit Is2Addr = 1> {
3740 def R128rr : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
3741 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
3743 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3745 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
3747 def R128rm : SS3AI<0x0F, MRMSrcMem, (outs VR128:$dst),
3748 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
3750 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3752 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
3756 let isAsmParserOnly = 1, Predicates = [HasAVX] in
3757 defm VPALIGN : ssse3_palign<"vpalignr", 0>, VEX_4V;
3758 let Constraints = "$src1 = $dst" in
3759 defm PALIGN : ssse3_palign<"palignr">,
3760 ssse3_palign_mm<"palignr", int_x86_mmx_palignr_b>;
3762 let AddedComplexity = 5 in {
3764 def : Pat<(v1i64 (palign:$src3 VR64:$src1, VR64:$src2)),
3765 (PALIGNR64rr VR64:$src2, VR64:$src1,
3766 (SHUFFLE_get_palign_imm VR64:$src3))>,
3767 Requires<[HasSSSE3]>;
3768 def : Pat<(v2i32 (palign:$src3 VR64:$src1, VR64:$src2)),
3769 (PALIGNR64rr VR64:$src2, VR64:$src1,
3770 (SHUFFLE_get_palign_imm VR64:$src3))>,
3771 Requires<[HasSSSE3]>;
3772 def : Pat<(v4i16 (palign:$src3 VR64:$src1, VR64:$src2)),
3773 (PALIGNR64rr VR64:$src2, VR64:$src1,
3774 (SHUFFLE_get_palign_imm VR64:$src3))>,
3775 Requires<[HasSSSE3]>;
3776 def : Pat<(v8i8 (palign:$src3 VR64:$src1, VR64:$src2)),
3777 (PALIGNR64rr VR64:$src2, VR64:$src1,
3778 (SHUFFLE_get_palign_imm VR64:$src3))>,
3779 Requires<[HasSSSE3]>;
3781 def : Pat<(v4i32 (palign:$src3 VR128:$src1, VR128:$src2)),
3782 (PALIGNR128rr VR128:$src2, VR128:$src1,
3783 (SHUFFLE_get_palign_imm VR128:$src3))>,
3784 Requires<[HasSSSE3]>;
3785 def : Pat<(v4f32 (palign:$src3 VR128:$src1, VR128:$src2)),
3786 (PALIGNR128rr VR128:$src2, VR128:$src1,
3787 (SHUFFLE_get_palign_imm VR128:$src3))>,
3788 Requires<[HasSSSE3]>;
3789 def : Pat<(v8i16 (palign:$src3 VR128:$src1, VR128:$src2)),
3790 (PALIGNR128rr VR128:$src2, VR128:$src1,
3791 (SHUFFLE_get_palign_imm VR128:$src3))>,
3792 Requires<[HasSSSE3]>;
3793 def : Pat<(v16i8 (palign:$src3 VR128:$src1, VR128:$src2)),
3794 (PALIGNR128rr VR128:$src2, VR128:$src1,
3795 (SHUFFLE_get_palign_imm VR128:$src3))>,
3796 Requires<[HasSSSE3]>;
3799 //===---------------------------------------------------------------------===//
3800 // SSSE3 Misc Instructions
3801 //===---------------------------------------------------------------------===//
3803 // Thread synchronization
3804 def MONITOR : I<0x01, MRM_C8, (outs), (ins), "monitor",
3805 [(int_x86_sse3_monitor EAX, ECX, EDX)]>,TB, Requires<[HasSSE3]>;
3806 def MWAIT : I<0x01, MRM_C9, (outs), (ins), "mwait",
3807 [(int_x86_sse3_mwait ECX, EAX)]>, TB, Requires<[HasSSE3]>;
3809 //===---------------------------------------------------------------------===//
3810 // Non-Instruction Patterns
3811 //===---------------------------------------------------------------------===//
3813 // extload f32 -> f64. This matches load+fextend because we have a hack in
3814 // the isel (PreprocessForFPConvert) that can introduce loads after dag
3816 // Since these loads aren't folded into the fextend, we have to match it
3818 let Predicates = [HasSSE2] in
3819 def : Pat<(fextend (loadf32 addr:$src)),
3820 (CVTSS2SDrm addr:$src)>;
3823 let Predicates = [HasSSE2] in {
3824 def : Pat<(v2i64 (bitconvert (v4i32 VR128:$src))), (v2i64 VR128:$src)>;
3825 def : Pat<(v2i64 (bitconvert (v8i16 VR128:$src))), (v2i64 VR128:$src)>;
3826 def : Pat<(v2i64 (bitconvert (v16i8 VR128:$src))), (v2i64 VR128:$src)>;
3827 def : Pat<(v2i64 (bitconvert (v2f64 VR128:$src))), (v2i64 VR128:$src)>;
3828 def : Pat<(v2i64 (bitconvert (v4f32 VR128:$src))), (v2i64 VR128:$src)>;
3829 def : Pat<(v4i32 (bitconvert (v2i64 VR128:$src))), (v4i32 VR128:$src)>;
3830 def : Pat<(v4i32 (bitconvert (v8i16 VR128:$src))), (v4i32 VR128:$src)>;
3831 def : Pat<(v4i32 (bitconvert (v16i8 VR128:$src))), (v4i32 VR128:$src)>;
3832 def : Pat<(v4i32 (bitconvert (v2f64 VR128:$src))), (v4i32 VR128:$src)>;
3833 def : Pat<(v4i32 (bitconvert (v4f32 VR128:$src))), (v4i32 VR128:$src)>;
3834 def : Pat<(v8i16 (bitconvert (v2i64 VR128:$src))), (v8i16 VR128:$src)>;
3835 def : Pat<(v8i16 (bitconvert (v4i32 VR128:$src))), (v8i16 VR128:$src)>;
3836 def : Pat<(v8i16 (bitconvert (v16i8 VR128:$src))), (v8i16 VR128:$src)>;
3837 def : Pat<(v8i16 (bitconvert (v2f64 VR128:$src))), (v8i16 VR128:$src)>;
3838 def : Pat<(v8i16 (bitconvert (v4f32 VR128:$src))), (v8i16 VR128:$src)>;
3839 def : Pat<(v16i8 (bitconvert (v2i64 VR128:$src))), (v16i8 VR128:$src)>;
3840 def : Pat<(v16i8 (bitconvert (v4i32 VR128:$src))), (v16i8 VR128:$src)>;
3841 def : Pat<(v16i8 (bitconvert (v8i16 VR128:$src))), (v16i8 VR128:$src)>;
3842 def : Pat<(v16i8 (bitconvert (v2f64 VR128:$src))), (v16i8 VR128:$src)>;
3843 def : Pat<(v16i8 (bitconvert (v4f32 VR128:$src))), (v16i8 VR128:$src)>;
3844 def : Pat<(v4f32 (bitconvert (v2i64 VR128:$src))), (v4f32 VR128:$src)>;
3845 def : Pat<(v4f32 (bitconvert (v4i32 VR128:$src))), (v4f32 VR128:$src)>;
3846 def : Pat<(v4f32 (bitconvert (v8i16 VR128:$src))), (v4f32 VR128:$src)>;
3847 def : Pat<(v4f32 (bitconvert (v16i8 VR128:$src))), (v4f32 VR128:$src)>;
3848 def : Pat<(v4f32 (bitconvert (v2f64 VR128:$src))), (v4f32 VR128:$src)>;
3849 def : Pat<(v2f64 (bitconvert (v2i64 VR128:$src))), (v2f64 VR128:$src)>;
3850 def : Pat<(v2f64 (bitconvert (v4i32 VR128:$src))), (v2f64 VR128:$src)>;
3851 def : Pat<(v2f64 (bitconvert (v8i16 VR128:$src))), (v2f64 VR128:$src)>;
3852 def : Pat<(v2f64 (bitconvert (v16i8 VR128:$src))), (v2f64 VR128:$src)>;
3853 def : Pat<(v2f64 (bitconvert (v4f32 VR128:$src))), (v2f64 VR128:$src)>;
3856 // Move scalar to XMM zero-extended
3857 // movd to XMM register zero-extends
3858 let AddedComplexity = 15 in {
3859 // Zeroing a VR128 then do a MOVS{S|D} to the lower bits.
3860 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))),
3861 (MOVSDrr (v2f64 (V_SET0PS)), FR64:$src)>;
3862 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),
3863 (MOVSSrr (v4f32 (V_SET0PS)), FR32:$src)>;
3864 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
3865 (MOVSSrr (v4f32 (V_SET0PS)),
3866 (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss)))>;
3867 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
3868 (MOVSSrr (v4i32 (V_SET0PI)),
3869 (EXTRACT_SUBREG (v4i32 VR128:$src), sub_ss))>;
3872 // Splat v2f64 / v2i64
3873 let AddedComplexity = 10 in {
3874 def : Pat<(splat_lo (v2f64 VR128:$src), (undef)),
3875 (UNPCKLPDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3876 def : Pat<(unpckh (v2f64 VR128:$src), (undef)),
3877 (UNPCKHPDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3878 def : Pat<(splat_lo (v2i64 VR128:$src), (undef)),
3879 (PUNPCKLQDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3880 def : Pat<(unpckh (v2i64 VR128:$src), (undef)),
3881 (PUNPCKHQDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3884 // Special unary SHUFPSrri case.
3885 def : Pat<(v4f32 (pshufd:$src3 VR128:$src1, (undef))),
3886 (SHUFPSrri VR128:$src1, VR128:$src1,
3887 (SHUFFLE_get_shuf_imm VR128:$src3))>;
3888 let AddedComplexity = 5 in
3889 def : Pat<(v4f32 (pshufd:$src2 VR128:$src1, (undef))),
3890 (PSHUFDri VR128:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3891 Requires<[HasSSE2]>;
3892 // Special unary SHUFPDrri case.
3893 def : Pat<(v2i64 (pshufd:$src3 VR128:$src1, (undef))),
3894 (SHUFPDrri VR128:$src1, VR128:$src1,
3895 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3896 Requires<[HasSSE2]>;
3897 // Special unary SHUFPDrri case.
3898 def : Pat<(v2f64 (pshufd:$src3 VR128:$src1, (undef))),
3899 (SHUFPDrri VR128:$src1, VR128:$src1,
3900 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3901 Requires<[HasSSE2]>;
3902 // Unary v4f32 shuffle with PSHUF* in order to fold a load.
3903 def : Pat<(pshufd:$src2 (bc_v4i32 (memopv4f32 addr:$src1)), (undef)),
3904 (PSHUFDmi addr:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3905 Requires<[HasSSE2]>;
3907 // Special binary v4i32 shuffle cases with SHUFPS.
3908 def : Pat<(v4i32 (shufp:$src3 VR128:$src1, (v4i32 VR128:$src2))),
3909 (SHUFPSrri VR128:$src1, VR128:$src2,
3910 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3911 Requires<[HasSSE2]>;
3912 def : Pat<(v4i32 (shufp:$src3 VR128:$src1, (bc_v4i32 (memopv2i64 addr:$src2)))),
3913 (SHUFPSrmi VR128:$src1, addr:$src2,
3914 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3915 Requires<[HasSSE2]>;
3916 // Special binary v2i64 shuffle cases using SHUFPDrri.
3917 def : Pat<(v2i64 (shufp:$src3 VR128:$src1, VR128:$src2)),
3918 (SHUFPDrri VR128:$src1, VR128:$src2,
3919 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3920 Requires<[HasSSE2]>;
3922 // vector_shuffle v1, <undef>, <0, 0, 1, 1, ...>
3923 let AddedComplexity = 15 in {
3924 def : Pat<(v4i32 (unpckl_undef:$src2 VR128:$src, (undef))),
3925 (PSHUFDri VR128:$src, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3926 Requires<[OptForSpeed, HasSSE2]>;
3927 def : Pat<(v4f32 (unpckl_undef:$src2 VR128:$src, (undef))),
3928 (PSHUFDri VR128:$src, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3929 Requires<[OptForSpeed, HasSSE2]>;
3931 let AddedComplexity = 10 in {
3932 def : Pat<(v4f32 (unpckl_undef VR128:$src, (undef))),
3933 (UNPCKLPSrr VR128:$src, VR128:$src)>;
3934 def : Pat<(v16i8 (unpckl_undef VR128:$src, (undef))),
3935 (PUNPCKLBWrr VR128:$src, VR128:$src)>;
3936 def : Pat<(v8i16 (unpckl_undef VR128:$src, (undef))),
3937 (PUNPCKLWDrr VR128:$src, VR128:$src)>;
3938 def : Pat<(v4i32 (unpckl_undef VR128:$src, (undef))),
3939 (PUNPCKLDQrr VR128:$src, VR128:$src)>;
3942 // vector_shuffle v1, <undef>, <2, 2, 3, 3, ...>
3943 let AddedComplexity = 15 in {
3944 def : Pat<(v4i32 (unpckh_undef:$src2 VR128:$src, (undef))),
3945 (PSHUFDri VR128:$src, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3946 Requires<[OptForSpeed, HasSSE2]>;
3947 def : Pat<(v4f32 (unpckh_undef:$src2 VR128:$src, (undef))),
3948 (PSHUFDri VR128:$src, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3949 Requires<[OptForSpeed, HasSSE2]>;
3951 let AddedComplexity = 10 in {
3952 def : Pat<(v4f32 (unpckh_undef VR128:$src, (undef))),
3953 (UNPCKHPSrr VR128:$src, VR128:$src)>;
3954 def : Pat<(v16i8 (unpckh_undef VR128:$src, (undef))),
3955 (PUNPCKHBWrr VR128:$src, VR128:$src)>;
3956 def : Pat<(v8i16 (unpckh_undef VR128:$src, (undef))),
3957 (PUNPCKHWDrr VR128:$src, VR128:$src)>;
3958 def : Pat<(v4i32 (unpckh_undef VR128:$src, (undef))),
3959 (PUNPCKHDQrr VR128:$src, VR128:$src)>;
3962 let AddedComplexity = 20 in {
3963 // vector_shuffle v1, v2 <0, 1, 4, 5> using MOVLHPS
3964 def : Pat<(v4i32 (movlhps VR128:$src1, VR128:$src2)),
3965 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
3967 // vector_shuffle v1, v2 <6, 7, 2, 3> using MOVHLPS
3968 def : Pat<(v4i32 (movhlps VR128:$src1, VR128:$src2)),
3969 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
3971 // vector_shuffle v1, undef <2, ?, ?, ?> using MOVHLPS
3972 def : Pat<(v4f32 (movhlps_undef VR128:$src1, (undef))),
3973 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
3974 def : Pat<(v4i32 (movhlps_undef VR128:$src1, (undef))),
3975 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
3978 let AddedComplexity = 20 in {
3979 // vector_shuffle v1, (load v2) <4, 5, 2, 3> using MOVLPS
3980 def : Pat<(v4f32 (movlp VR128:$src1, (load addr:$src2))),
3981 (MOVLPSrm VR128:$src1, addr:$src2)>;
3982 def : Pat<(v2f64 (movlp VR128:$src1, (load addr:$src2))),
3983 (MOVLPDrm VR128:$src1, addr:$src2)>;
3984 def : Pat<(v4i32 (movlp VR128:$src1, (load addr:$src2))),
3985 (MOVLPSrm VR128:$src1, addr:$src2)>;
3986 def : Pat<(v2i64 (movlp VR128:$src1, (load addr:$src2))),
3987 (MOVLPDrm VR128:$src1, addr:$src2)>;
3990 // (store (vector_shuffle (load addr), v2, <4, 5, 2, 3>), addr) using MOVLPS
3991 def : Pat<(store (v4f32 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
3992 (MOVLPSmr addr:$src1, VR128:$src2)>;
3993 def : Pat<(store (v2f64 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
3994 (MOVLPDmr addr:$src1, VR128:$src2)>;
3995 def : Pat<(store (v4i32 (movlp (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)),
3997 (MOVLPSmr addr:$src1, VR128:$src2)>;
3998 def : Pat<(store (v2i64 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
3999 (MOVLPDmr addr:$src1, VR128:$src2)>;
4001 let AddedComplexity = 15 in {
4002 // Setting the lowest element in the vector.
4003 def : Pat<(v4i32 (movl VR128:$src1, VR128:$src2)),
4004 (MOVSSrr (v4i32 VR128:$src1),
4005 (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_ss))>;
4006 def : Pat<(v2i64 (movl VR128:$src1, VR128:$src2)),
4007 (MOVSDrr (v2i64 VR128:$src1),
4008 (EXTRACT_SUBREG (v2i64 VR128:$src2), sub_sd))>;
4010 // vector_shuffle v1, v2 <4, 5, 2, 3> using movsd
4011 def : Pat<(v4f32 (movlp VR128:$src1, VR128:$src2)),
4012 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, sub_sd))>,
4013 Requires<[HasSSE2]>;
4014 def : Pat<(v4i32 (movlp VR128:$src1, VR128:$src2)),
4015 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, sub_sd))>,
4016 Requires<[HasSSE2]>;
4019 // vector_shuffle v1, v2 <4, 5, 2, 3> using SHUFPSrri (we prefer movsd, but
4020 // fall back to this for SSE1)
4021 def : Pat<(v4f32 (movlp:$src3 VR128:$src1, (v4f32 VR128:$src2))),
4022 (SHUFPSrri VR128:$src2, VR128:$src1,
4023 (SHUFFLE_get_shuf_imm VR128:$src3))>;
4025 // Set lowest element and zero upper elements.
4026 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
4027 (MOVZPQILo2PQIrr VR128:$src)>, Requires<[HasSSE2]>;
4029 // Some special case pandn patterns.
4030 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v4i32 immAllOnesV))),
4032 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
4033 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v8i16 immAllOnesV))),
4035 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
4036 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v16i8 immAllOnesV))),
4038 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
4040 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v4i32 immAllOnesV))),
4041 (memop addr:$src2))),
4042 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
4043 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v8i16 immAllOnesV))),
4044 (memop addr:$src2))),
4045 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
4046 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v16i8 immAllOnesV))),
4047 (memop addr:$src2))),
4048 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
4050 // vector -> vector casts
4051 def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
4052 (Int_CVTDQ2PSrr VR128:$src)>, Requires<[HasSSE2]>;
4053 def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
4054 (Int_CVTTPS2DQrr VR128:$src)>, Requires<[HasSSE2]>;
4055 def : Pat<(v2f64 (sint_to_fp (v2i32 VR64:$src))),
4056 (Int_CVTPI2PDrr VR64:$src)>, Requires<[HasSSE2]>;
4057 def : Pat<(v2i32 (fp_to_sint (v2f64 VR128:$src))),
4058 (Int_CVTTPD2PIrr VR128:$src)>, Requires<[HasSSE2]>;
4060 // Use movaps / movups for SSE integer load / store (one byte shorter).
4061 let Predicates = [HasSSE1] in {
4062 def : Pat<(alignedloadv4i32 addr:$src),
4063 (MOVAPSrm addr:$src)>;
4064 def : Pat<(loadv4i32 addr:$src),
4065 (MOVUPSrm addr:$src)>;
4066 def : Pat<(alignedloadv2i64 addr:$src),
4067 (MOVAPSrm addr:$src)>;
4068 def : Pat<(loadv2i64 addr:$src),
4069 (MOVUPSrm addr:$src)>;
4071 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
4072 (MOVAPSmr addr:$dst, VR128:$src)>;
4073 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
4074 (MOVAPSmr addr:$dst, VR128:$src)>;
4075 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
4076 (MOVAPSmr addr:$dst, VR128:$src)>;
4077 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
4078 (MOVAPSmr addr:$dst, VR128:$src)>;
4079 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
4080 (MOVUPSmr addr:$dst, VR128:$src)>;
4081 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
4082 (MOVUPSmr addr:$dst, VR128:$src)>;
4083 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
4084 (MOVUPSmr addr:$dst, VR128:$src)>;
4085 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
4086 (MOVUPSmr addr:$dst, VR128:$src)>;
4089 // Use vmovaps/vmovups for AVX 128-bit integer load/store (one byte shorter).
4090 let Predicates = [HasAVX] in {
4091 def : Pat<(alignedloadv4i32 addr:$src),
4092 (VMOVAPSrm addr:$src)>;
4093 def : Pat<(loadv4i32 addr:$src),
4094 (VMOVUPSrm addr:$src)>;
4095 def : Pat<(alignedloadv2i64 addr:$src),
4096 (VMOVAPSrm addr:$src)>;
4097 def : Pat<(loadv2i64 addr:$src),
4098 (VMOVUPSrm addr:$src)>;
4100 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
4101 (VMOVAPSmr addr:$dst, VR128:$src)>;
4102 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
4103 (VMOVAPSmr addr:$dst, VR128:$src)>;
4104 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
4105 (VMOVAPSmr addr:$dst, VR128:$src)>;
4106 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
4107 (VMOVAPSmr addr:$dst, VR128:$src)>;
4108 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
4109 (VMOVUPSmr addr:$dst, VR128:$src)>;
4110 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
4111 (VMOVUPSmr addr:$dst, VR128:$src)>;
4112 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
4113 (VMOVUPSmr addr:$dst, VR128:$src)>;
4114 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
4115 (VMOVUPSmr addr:$dst, VR128:$src)>;
4118 //===----------------------------------------------------------------------===//
4119 // SSE4.1 - Packed Move with Sign/Zero Extend
4120 //===----------------------------------------------------------------------===//
4122 multiclass SS41I_binop_rm_int8<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4123 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4124 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4125 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
4127 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
4128 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4130 (IntId (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))]>,
4134 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4135 defm VPMOVSXBW : SS41I_binop_rm_int8<0x20, "vpmovsxbw", int_x86_sse41_pmovsxbw>,
4137 defm VPMOVSXWD : SS41I_binop_rm_int8<0x23, "vpmovsxwd", int_x86_sse41_pmovsxwd>,
4139 defm VPMOVSXDQ : SS41I_binop_rm_int8<0x25, "vpmovsxdq", int_x86_sse41_pmovsxdq>,
4141 defm VPMOVZXBW : SS41I_binop_rm_int8<0x30, "vpmovzxbw", int_x86_sse41_pmovzxbw>,
4143 defm VPMOVZXWD : SS41I_binop_rm_int8<0x33, "vpmovzxwd", int_x86_sse41_pmovzxwd>,
4145 defm VPMOVZXDQ : SS41I_binop_rm_int8<0x35, "vpmovzxdq", int_x86_sse41_pmovzxdq>,
4149 defm PMOVSXBW : SS41I_binop_rm_int8<0x20, "pmovsxbw", int_x86_sse41_pmovsxbw>;
4150 defm PMOVSXWD : SS41I_binop_rm_int8<0x23, "pmovsxwd", int_x86_sse41_pmovsxwd>;
4151 defm PMOVSXDQ : SS41I_binop_rm_int8<0x25, "pmovsxdq", int_x86_sse41_pmovsxdq>;
4152 defm PMOVZXBW : SS41I_binop_rm_int8<0x30, "pmovzxbw", int_x86_sse41_pmovzxbw>;
4153 defm PMOVZXWD : SS41I_binop_rm_int8<0x33, "pmovzxwd", int_x86_sse41_pmovzxwd>;
4154 defm PMOVZXDQ : SS41I_binop_rm_int8<0x35, "pmovzxdq", int_x86_sse41_pmovzxdq>;
4156 // Common patterns involving scalar load.
4157 def : Pat<(int_x86_sse41_pmovsxbw (vzmovl_v2i64 addr:$src)),
4158 (PMOVSXBWrm addr:$src)>, Requires<[HasSSE41]>;
4159 def : Pat<(int_x86_sse41_pmovsxbw (vzload_v2i64 addr:$src)),
4160 (PMOVSXBWrm addr:$src)>, Requires<[HasSSE41]>;
4162 def : Pat<(int_x86_sse41_pmovsxwd (vzmovl_v2i64 addr:$src)),
4163 (PMOVSXWDrm addr:$src)>, Requires<[HasSSE41]>;
4164 def : Pat<(int_x86_sse41_pmovsxwd (vzload_v2i64 addr:$src)),
4165 (PMOVSXWDrm addr:$src)>, Requires<[HasSSE41]>;
4167 def : Pat<(int_x86_sse41_pmovsxdq (vzmovl_v2i64 addr:$src)),
4168 (PMOVSXDQrm addr:$src)>, Requires<[HasSSE41]>;
4169 def : Pat<(int_x86_sse41_pmovsxdq (vzload_v2i64 addr:$src)),
4170 (PMOVSXDQrm addr:$src)>, Requires<[HasSSE41]>;
4172 def : Pat<(int_x86_sse41_pmovzxbw (vzmovl_v2i64 addr:$src)),
4173 (PMOVZXBWrm addr:$src)>, Requires<[HasSSE41]>;
4174 def : Pat<(int_x86_sse41_pmovzxbw (vzload_v2i64 addr:$src)),
4175 (PMOVZXBWrm addr:$src)>, Requires<[HasSSE41]>;
4177 def : Pat<(int_x86_sse41_pmovzxwd (vzmovl_v2i64 addr:$src)),
4178 (PMOVZXWDrm addr:$src)>, Requires<[HasSSE41]>;
4179 def : Pat<(int_x86_sse41_pmovzxwd (vzload_v2i64 addr:$src)),
4180 (PMOVZXWDrm addr:$src)>, Requires<[HasSSE41]>;
4182 def : Pat<(int_x86_sse41_pmovzxdq (vzmovl_v2i64 addr:$src)),
4183 (PMOVZXDQrm addr:$src)>, Requires<[HasSSE41]>;
4184 def : Pat<(int_x86_sse41_pmovzxdq (vzload_v2i64 addr:$src)),
4185 (PMOVZXDQrm addr:$src)>, Requires<[HasSSE41]>;
4188 multiclass SS41I_binop_rm_int4<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4189 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4190 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4191 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
4193 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
4194 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4196 (IntId (bitconvert (v4i32 (scalar_to_vector (loadi32 addr:$src))))))]>,
4200 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4201 defm VPMOVSXBD : SS41I_binop_rm_int4<0x21, "vpmovsxbd", int_x86_sse41_pmovsxbd>,
4203 defm VPMOVSXWQ : SS41I_binop_rm_int4<0x24, "vpmovsxwq", int_x86_sse41_pmovsxwq>,
4205 defm VPMOVZXBD : SS41I_binop_rm_int4<0x31, "vpmovzxbd", int_x86_sse41_pmovzxbd>,
4207 defm VPMOVZXWQ : SS41I_binop_rm_int4<0x34, "vpmovzxwq", int_x86_sse41_pmovzxwq>,
4211 defm PMOVSXBD : SS41I_binop_rm_int4<0x21, "pmovsxbd", int_x86_sse41_pmovsxbd>;
4212 defm PMOVSXWQ : SS41I_binop_rm_int4<0x24, "pmovsxwq", int_x86_sse41_pmovsxwq>;
4213 defm PMOVZXBD : SS41I_binop_rm_int4<0x31, "pmovzxbd", int_x86_sse41_pmovzxbd>;
4214 defm PMOVZXWQ : SS41I_binop_rm_int4<0x34, "pmovzxwq", int_x86_sse41_pmovzxwq>;
4216 // Common patterns involving scalar load
4217 def : Pat<(int_x86_sse41_pmovsxbd (vzmovl_v4i32 addr:$src)),
4218 (PMOVSXBDrm addr:$src)>, Requires<[HasSSE41]>;
4219 def : Pat<(int_x86_sse41_pmovsxwq (vzmovl_v4i32 addr:$src)),
4220 (PMOVSXWQrm addr:$src)>, Requires<[HasSSE41]>;
4222 def : Pat<(int_x86_sse41_pmovzxbd (vzmovl_v4i32 addr:$src)),
4223 (PMOVZXBDrm addr:$src)>, Requires<[HasSSE41]>;
4224 def : Pat<(int_x86_sse41_pmovzxwq (vzmovl_v4i32 addr:$src)),
4225 (PMOVZXWQrm addr:$src)>, Requires<[HasSSE41]>;
4228 multiclass SS41I_binop_rm_int2<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4229 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4230 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4231 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
4233 // Expecting a i16 load any extended to i32 value.
4234 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i16mem:$src),
4235 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4236 [(set VR128:$dst, (IntId (bitconvert
4237 (v4i32 (scalar_to_vector (loadi16_anyext addr:$src))))))]>,
4241 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4242 defm VPMOVSXBQ : SS41I_binop_rm_int2<0x22, "vpmovsxbq", int_x86_sse41_pmovsxbq>,
4244 defm VPMOVZXBQ : SS41I_binop_rm_int2<0x32, "vpmovzxbq", int_x86_sse41_pmovzxbq>,
4247 defm PMOVSXBQ : SS41I_binop_rm_int2<0x22, "pmovsxbq", int_x86_sse41_pmovsxbq>;
4248 defm PMOVZXBQ : SS41I_binop_rm_int2<0x32, "pmovzxbq", int_x86_sse41_pmovzxbq>;
4250 // Common patterns involving scalar load
4251 def : Pat<(int_x86_sse41_pmovsxbq
4252 (bitconvert (v4i32 (X86vzmovl
4253 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
4254 (PMOVSXBQrm addr:$src)>, Requires<[HasSSE41]>;
4256 def : Pat<(int_x86_sse41_pmovzxbq
4257 (bitconvert (v4i32 (X86vzmovl
4258 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
4259 (PMOVZXBQrm addr:$src)>, Requires<[HasSSE41]>;
4261 //===----------------------------------------------------------------------===//
4262 // SSE4.1 - Extract Instructions
4263 //===----------------------------------------------------------------------===//
4265 /// SS41I_binop_ext8 - SSE 4.1 extract 8 bits to 32 bit reg or 8 bit mem
4266 multiclass SS41I_extract8<bits<8> opc, string OpcodeStr> {
4267 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4268 (ins VR128:$src1, i32i8imm:$src2),
4269 !strconcat(OpcodeStr,
4270 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4271 [(set GR32:$dst, (X86pextrb (v16i8 VR128:$src1), imm:$src2))]>,
4273 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4274 (ins i8mem:$dst, VR128:$src1, i32i8imm:$src2),
4275 !strconcat(OpcodeStr,
4276 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4279 // There's an AssertZext in the way of writing the store pattern
4280 // (store (i8 (trunc (X86pextrb (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
4283 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4284 defm VPEXTRB : SS41I_extract8<0x14, "vpextrb">, VEX;
4285 def VPEXTRBrr64 : SS4AIi8<0x14, MRMDestReg, (outs GR64:$dst),
4286 (ins VR128:$src1, i32i8imm:$src2),
4287 "vpextrb\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>, OpSize, VEX;
4290 defm PEXTRB : SS41I_extract8<0x14, "pextrb">;
4293 /// SS41I_extract16 - SSE 4.1 extract 16 bits to memory destination
4294 multiclass SS41I_extract16<bits<8> opc, string OpcodeStr> {
4295 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4296 (ins i16mem:$dst, VR128:$src1, i32i8imm:$src2),
4297 !strconcat(OpcodeStr,
4298 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4301 // There's an AssertZext in the way of writing the store pattern
4302 // (store (i16 (trunc (X86pextrw (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
4305 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4306 defm VPEXTRW : SS41I_extract16<0x15, "vpextrw">, VEX;
4308 defm PEXTRW : SS41I_extract16<0x15, "pextrw">;
4311 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
4312 multiclass SS41I_extract32<bits<8> opc, string OpcodeStr> {
4313 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4314 (ins VR128:$src1, i32i8imm:$src2),
4315 !strconcat(OpcodeStr,
4316 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4318 (extractelt (v4i32 VR128:$src1), imm:$src2))]>, OpSize;
4319 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4320 (ins i32mem:$dst, VR128:$src1, i32i8imm:$src2),
4321 !strconcat(OpcodeStr,
4322 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4323 [(store (extractelt (v4i32 VR128:$src1), imm:$src2),
4324 addr:$dst)]>, OpSize;
4327 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4328 defm VPEXTRD : SS41I_extract32<0x16, "vpextrd">, VEX;
4330 defm PEXTRD : SS41I_extract32<0x16, "pextrd">;
4332 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
4333 multiclass SS41I_extract64<bits<8> opc, string OpcodeStr> {
4334 def rr : SS4AIi8<opc, MRMDestReg, (outs GR64:$dst),
4335 (ins VR128:$src1, i32i8imm:$src2),
4336 !strconcat(OpcodeStr,
4337 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4339 (extractelt (v2i64 VR128:$src1), imm:$src2))]>, OpSize, REX_W;
4340 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4341 (ins i64mem:$dst, VR128:$src1, i32i8imm:$src2),
4342 !strconcat(OpcodeStr,
4343 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4344 [(store (extractelt (v2i64 VR128:$src1), imm:$src2),
4345 addr:$dst)]>, OpSize, REX_W;
4348 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4349 defm VPEXTRQ : SS41I_extract64<0x16, "vpextrq">, VEX, VEX_W;
4351 defm PEXTRQ : SS41I_extract64<0x16, "pextrq">;
4353 /// SS41I_extractf32 - SSE 4.1 extract 32 bits fp value to int reg or memory
4355 multiclass SS41I_extractf32<bits<8> opc, string OpcodeStr> {
4356 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4357 (ins VR128:$src1, i32i8imm:$src2),
4358 !strconcat(OpcodeStr,
4359 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4361 (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2))]>,
4363 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4364 (ins f32mem:$dst, VR128:$src1, i32i8imm:$src2),
4365 !strconcat(OpcodeStr,
4366 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4367 [(store (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2),
4368 addr:$dst)]>, OpSize;
4371 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4372 defm VEXTRACTPS : SS41I_extractf32<0x17, "vextractps">, VEX;
4373 def VEXTRACTPSrr64 : SS4AIi8<0x17, MRMDestReg, (outs GR64:$dst),
4374 (ins VR128:$src1, i32i8imm:$src2),
4375 "vextractps \t{$src2, $src1, $dst|$dst, $src1, $src2}",
4378 defm EXTRACTPS : SS41I_extractf32<0x17, "extractps">;
4380 // Also match an EXTRACTPS store when the store is done as f32 instead of i32.
4381 def : Pat<(store (f32 (bitconvert (extractelt (bc_v4i32 (v4f32 VR128:$src1)),
4384 (EXTRACTPSmr addr:$dst, VR128:$src1, imm:$src2)>,
4385 Requires<[HasSSE41]>;
4387 //===----------------------------------------------------------------------===//
4388 // SSE4.1 - Insert Instructions
4389 //===----------------------------------------------------------------------===//
4391 multiclass SS41I_insert8<bits<8> opc, string asm, bit Is2Addr = 1> {
4392 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4393 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
4395 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4397 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4399 (X86pinsrb VR128:$src1, GR32:$src2, imm:$src3))]>, OpSize;
4400 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4401 (ins VR128:$src1, i8mem:$src2, i32i8imm:$src3),
4403 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4405 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4407 (X86pinsrb VR128:$src1, (extloadi8 addr:$src2),
4408 imm:$src3))]>, OpSize;
4411 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4412 defm VPINSRB : SS41I_insert8<0x20, "vpinsrb", 0>, VEX_4V;
4413 let Constraints = "$src1 = $dst" in
4414 defm PINSRB : SS41I_insert8<0x20, "pinsrb">;
4416 multiclass SS41I_insert32<bits<8> opc, string asm, bit Is2Addr = 1> {
4417 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4418 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
4420 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4422 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4424 (v4i32 (insertelt VR128:$src1, GR32:$src2, imm:$src3)))]>,
4426 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4427 (ins VR128:$src1, i32mem:$src2, i32i8imm:$src3),
4429 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4431 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4433 (v4i32 (insertelt VR128:$src1, (loadi32 addr:$src2),
4434 imm:$src3)))]>, OpSize;
4437 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4438 defm VPINSRD : SS41I_insert32<0x22, "vpinsrd", 0>, VEX_4V;
4439 let Constraints = "$src1 = $dst" in
4440 defm PINSRD : SS41I_insert32<0x22, "pinsrd">;
4442 multiclass SS41I_insert64<bits<8> opc, string asm, bit Is2Addr = 1> {
4443 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4444 (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
4446 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4448 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4450 (v2i64 (insertelt VR128:$src1, GR64:$src2, imm:$src3)))]>,
4452 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4453 (ins VR128:$src1, i64mem:$src2, i32i8imm:$src3),
4455 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4457 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4459 (v2i64 (insertelt VR128:$src1, (loadi64 addr:$src2),
4460 imm:$src3)))]>, OpSize;
4463 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4464 defm VPINSRQ : SS41I_insert64<0x22, "vpinsrq", 0>, VEX_4V, VEX_W;
4465 let Constraints = "$src1 = $dst" in
4466 defm PINSRQ : SS41I_insert64<0x22, "pinsrq">, REX_W;
4468 // insertps has a few different modes, there's the first two here below which
4469 // are optimized inserts that won't zero arbitrary elements in the destination
4470 // vector. The next one matches the intrinsic and could zero arbitrary elements
4471 // in the target vector.
4472 multiclass SS41I_insertf32<bits<8> opc, string asm, bit Is2Addr = 1> {
4473 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4474 (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4476 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4478 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4480 (X86insrtps VR128:$src1, VR128:$src2, imm:$src3))]>,
4482 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4483 (ins VR128:$src1, f32mem:$src2, i32i8imm:$src3),
4485 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4487 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4489 (X86insrtps VR128:$src1,
4490 (v4f32 (scalar_to_vector (loadf32 addr:$src2))),
4491 imm:$src3))]>, OpSize;
4494 let Constraints = "$src1 = $dst" in
4495 defm INSERTPS : SS41I_insertf32<0x21, "insertps">;
4496 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4497 defm VINSERTPS : SS41I_insertf32<0x21, "vinsertps", 0>, VEX_4V;
4499 def : Pat<(int_x86_sse41_insertps VR128:$src1, VR128:$src2, imm:$src3),
4500 (VINSERTPSrr VR128:$src1, VR128:$src2, imm:$src3)>,
4502 def : Pat<(int_x86_sse41_insertps VR128:$src1, VR128:$src2, imm:$src3),
4503 (INSERTPSrr VR128:$src1, VR128:$src2, imm:$src3)>,
4504 Requires<[HasSSE41]>;
4506 //===----------------------------------------------------------------------===//
4507 // SSE4.1 - Round Instructions
4508 //===----------------------------------------------------------------------===//
4510 multiclass sse41_fp_unop_rm<bits<8> opcps, bits<8> opcpd, string OpcodeStr,
4511 X86MemOperand x86memop, RegisterClass RC,
4512 PatFrag mem_frag32, PatFrag mem_frag64,
4513 Intrinsic V4F32Int, Intrinsic V2F64Int> {
4514 // Intrinsic operation, reg.
4515 // Vector intrinsic operation, reg
4516 def PSr_Int : SS4AIi8<opcps, MRMSrcReg,
4517 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4518 !strconcat(OpcodeStr,
4519 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4520 [(set RC:$dst, (V4F32Int RC:$src1, imm:$src2))]>,
4523 // Vector intrinsic operation, mem
4524 def PSm_Int : Ii8<opcps, MRMSrcMem,
4525 (outs RC:$dst), (ins f256mem:$src1, i32i8imm:$src2),
4526 !strconcat(OpcodeStr,
4527 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4529 (V4F32Int (mem_frag32 addr:$src1),imm:$src2))]>,
4531 Requires<[HasSSE41]>;
4533 // Vector intrinsic operation, reg
4534 def PDr_Int : SS4AIi8<opcpd, MRMSrcReg,
4535 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4536 !strconcat(OpcodeStr,
4537 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4538 [(set RC:$dst, (V2F64Int RC:$src1, imm:$src2))]>,
4541 // Vector intrinsic operation, mem
4542 def PDm_Int : SS4AIi8<opcpd, MRMSrcMem,
4543 (outs RC:$dst), (ins f256mem:$src1, i32i8imm:$src2),
4544 !strconcat(OpcodeStr,
4545 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4547 (V2F64Int (mem_frag64 addr:$src1),imm:$src2))]>,
4551 multiclass sse41_fp_unop_rm_avx_p<bits<8> opcps, bits<8> opcpd,
4552 RegisterClass RC, X86MemOperand x86memop, string OpcodeStr> {
4553 // Intrinsic operation, reg.
4554 // Vector intrinsic operation, reg
4555 def PSr : SS4AIi8<opcps, MRMSrcReg,
4556 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4557 !strconcat(OpcodeStr,
4558 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4561 // Vector intrinsic operation, mem
4562 def PSm : Ii8<opcps, MRMSrcMem,
4563 (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
4564 !strconcat(OpcodeStr,
4565 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4566 []>, TA, OpSize, Requires<[HasSSE41]>;
4568 // Vector intrinsic operation, reg
4569 def PDr : SS4AIi8<opcpd, MRMSrcReg,
4570 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4571 !strconcat(OpcodeStr,
4572 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4575 // Vector intrinsic operation, mem
4576 def PDm : SS4AIi8<opcpd, MRMSrcMem,
4577 (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
4578 !strconcat(OpcodeStr,
4579 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4583 multiclass sse41_fp_binop_rm<bits<8> opcss, bits<8> opcsd,
4586 Intrinsic F64Int, bit Is2Addr = 1> {
4587 // Intrinsic operation, reg.
4588 def SSr_Int : SS4AIi8<opcss, MRMSrcReg,
4589 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4591 !strconcat(OpcodeStr,
4592 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4593 !strconcat(OpcodeStr,
4594 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4595 [(set VR128:$dst, (F32Int VR128:$src1, VR128:$src2, imm:$src3))]>,
4598 // Intrinsic operation, mem.
4599 def SSm_Int : SS4AIi8<opcss, MRMSrcMem,
4600 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
4602 !strconcat(OpcodeStr,
4603 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4604 !strconcat(OpcodeStr,
4605 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4607 (F32Int VR128:$src1, sse_load_f32:$src2, imm:$src3))]>,
4610 // Intrinsic operation, reg.
4611 def SDr_Int : SS4AIi8<opcsd, MRMSrcReg,
4612 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4614 !strconcat(OpcodeStr,
4615 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4616 !strconcat(OpcodeStr,
4617 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4618 [(set VR128:$dst, (F64Int VR128:$src1, VR128:$src2, imm:$src3))]>,
4621 // Intrinsic operation, mem.
4622 def SDm_Int : SS4AIi8<opcsd, MRMSrcMem,
4623 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
4625 !strconcat(OpcodeStr,
4626 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4627 !strconcat(OpcodeStr,
4628 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4630 (F64Int VR128:$src1, sse_load_f64:$src2, imm:$src3))]>,
4634 multiclass sse41_fp_binop_rm_avx_s<bits<8> opcss, bits<8> opcsd,
4636 // Intrinsic operation, reg.
4637 def SSr : SS4AIi8<opcss, MRMSrcReg,
4638 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4639 !strconcat(OpcodeStr,
4640 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4643 // Intrinsic operation, mem.
4644 def SSm : SS4AIi8<opcss, MRMSrcMem,
4645 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
4646 !strconcat(OpcodeStr,
4647 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4650 // Intrinsic operation, reg.
4651 def SDr : SS4AIi8<opcsd, MRMSrcReg,
4652 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4653 !strconcat(OpcodeStr,
4654 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4657 // Intrinsic operation, mem.
4658 def SDm : SS4AIi8<opcsd, MRMSrcMem,
4659 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
4660 !strconcat(OpcodeStr,
4661 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4665 // FP round - roundss, roundps, roundsd, roundpd
4666 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4668 defm VROUND : sse41_fp_unop_rm<0x08, 0x09, "vround", f128mem, VR128,
4669 memopv4f32, memopv2f64,
4670 int_x86_sse41_round_ps,
4671 int_x86_sse41_round_pd>, VEX;
4672 defm VROUNDY : sse41_fp_unop_rm<0x08, 0x09, "vround", f256mem, VR256,
4673 memopv8f32, memopv4f64,
4674 int_x86_avx_round_ps_256,
4675 int_x86_avx_round_pd_256>, VEX;
4676 defm VROUND : sse41_fp_binop_rm<0x0A, 0x0B, "vround",
4677 int_x86_sse41_round_ss,
4678 int_x86_sse41_round_sd, 0>, VEX_4V;
4680 // Instructions for the assembler
4681 defm VROUND : sse41_fp_unop_rm_avx_p<0x08, 0x09, VR128, f128mem, "vround">,
4683 defm VROUNDY : sse41_fp_unop_rm_avx_p<0x08, 0x09, VR256, f256mem, "vround">,
4685 defm VROUND : sse41_fp_binop_rm_avx_s<0x0A, 0x0B, "vround">, VEX_4V;
4688 defm ROUND : sse41_fp_unop_rm<0x08, 0x09, "round", f128mem, VR128,
4689 memopv4f32, memopv2f64,
4690 int_x86_sse41_round_ps, int_x86_sse41_round_pd>;
4691 let Constraints = "$src1 = $dst" in
4692 defm ROUND : sse41_fp_binop_rm<0x0A, 0x0B, "round",
4693 int_x86_sse41_round_ss, int_x86_sse41_round_sd>;
4695 //===----------------------------------------------------------------------===//
4696 // SSE4.1 - Packed Bit Test
4697 //===----------------------------------------------------------------------===//
4699 // ptest instruction we'll lower to this in X86ISelLowering primarily from
4700 // the intel intrinsic that corresponds to this.
4701 let Defs = [EFLAGS], isAsmParserOnly = 1, Predicates = [HasAVX] in {
4702 def VPTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
4703 "vptest\t{$src2, $src1|$src1, $src2}",
4704 [(set EFLAGS, (X86ptest VR128:$src1, (v4f32 VR128:$src2)))]>,
4706 def VPTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
4707 "vptest\t{$src2, $src1|$src1, $src2}",
4708 [(set EFLAGS,(X86ptest VR128:$src1, (memopv4f32 addr:$src2)))]>,
4711 def VPTESTYrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR256:$src1, VR256:$src2),
4712 "vptest\t{$src2, $src1|$src1, $src2}",
4713 [(set EFLAGS, (X86ptest VR256:$src1, (v4i64 VR256:$src2)))]>,
4715 def VPTESTYrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR256:$src1, i256mem:$src2),
4716 "vptest\t{$src2, $src1|$src1, $src2}",
4717 [(set EFLAGS,(X86ptest VR256:$src1, (memopv4i64 addr:$src2)))]>,
4721 let Defs = [EFLAGS] in {
4722 def PTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
4723 "ptest \t{$src2, $src1|$src1, $src2}",
4724 [(set EFLAGS, (X86ptest VR128:$src1, (v4f32 VR128:$src2)))]>,
4726 def PTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
4727 "ptest \t{$src2, $src1|$src1, $src2}",
4728 [(set EFLAGS, (X86ptest VR128:$src1, (memopv4f32 addr:$src2)))]>,
4732 // The bit test instructions below are AVX only
4733 multiclass avx_bittest<bits<8> opc, string OpcodeStr, RegisterClass RC,
4734 X86MemOperand x86memop, PatFrag mem_frag, ValueType vt> {
4735 def rr : SS48I<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
4736 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
4737 [(set EFLAGS, (X86testp RC:$src1, (vt RC:$src2)))]>, OpSize, VEX;
4738 def rm : SS48I<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
4739 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
4740 [(set EFLAGS, (X86testp RC:$src1, (mem_frag addr:$src2)))]>,
4744 let Defs = [EFLAGS], isAsmParserOnly = 1, Predicates = [HasAVX] in {
4745 defm VTESTPS : avx_bittest<0x0E, "vtestps", VR128, f128mem, memopv4f32, v4f32>;
4746 defm VTESTPSY : avx_bittest<0x0E, "vtestps", VR256, f256mem, memopv8f32, v8f32>;
4747 defm VTESTPD : avx_bittest<0x0F, "vtestpd", VR128, f128mem, memopv2f64, v2f64>;
4748 defm VTESTPDY : avx_bittest<0x0F, "vtestpd", VR256, f256mem, memopv4f64, v4f64>;
4751 //===----------------------------------------------------------------------===//
4752 // SSE4.1 - Misc Instructions
4753 //===----------------------------------------------------------------------===//
4755 // SS41I_unop_rm_int_v16 - SSE 4.1 unary operator whose type is v8i16.
4756 multiclass SS41I_unop_rm_int_v16<bits<8> opc, string OpcodeStr,
4757 Intrinsic IntId128> {
4758 def rr128 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4760 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4761 [(set VR128:$dst, (IntId128 VR128:$src))]>, OpSize;
4762 def rm128 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4764 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4767 (bitconvert (memopv8i16 addr:$src))))]>, OpSize;
4770 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4771 defm VPHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "vphminposuw",
4772 int_x86_sse41_phminposuw>, VEX;
4773 defm PHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "phminposuw",
4774 int_x86_sse41_phminposuw>;
4776 /// SS41I_binop_rm_int - Simple SSE 4.1 binary operator
4777 multiclass SS41I_binop_rm_int<bits<8> opc, string OpcodeStr,
4778 Intrinsic IntId128, bit Is2Addr = 1> {
4779 let isCommutable = 1 in
4780 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4781 (ins VR128:$src1, VR128:$src2),
4783 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4784 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4785 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>, OpSize;
4786 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4787 (ins VR128:$src1, i128mem:$src2),
4789 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4790 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4792 (IntId128 VR128:$src1,
4793 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
4796 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4797 let isCommutable = 0 in
4798 defm VPACKUSDW : SS41I_binop_rm_int<0x2B, "vpackusdw", int_x86_sse41_packusdw,
4800 defm VPCMPEQQ : SS41I_binop_rm_int<0x29, "vpcmpeqq", int_x86_sse41_pcmpeqq,
4802 defm VPMINSB : SS41I_binop_rm_int<0x38, "vpminsb", int_x86_sse41_pminsb,
4804 defm VPMINSD : SS41I_binop_rm_int<0x39, "vpminsd", int_x86_sse41_pminsd,
4806 defm VPMINUD : SS41I_binop_rm_int<0x3B, "vpminud", int_x86_sse41_pminud,
4808 defm VPMINUW : SS41I_binop_rm_int<0x3A, "vpminuw", int_x86_sse41_pminuw,
4810 defm VPMAXSB : SS41I_binop_rm_int<0x3C, "vpmaxsb", int_x86_sse41_pmaxsb,
4812 defm VPMAXSD : SS41I_binop_rm_int<0x3D, "vpmaxsd", int_x86_sse41_pmaxsd,
4814 defm VPMAXUD : SS41I_binop_rm_int<0x3F, "vpmaxud", int_x86_sse41_pmaxud,
4816 defm VPMAXUW : SS41I_binop_rm_int<0x3E, "vpmaxuw", int_x86_sse41_pmaxuw,
4818 defm VPMULDQ : SS41I_binop_rm_int<0x28, "vpmuldq", int_x86_sse41_pmuldq,
4822 let Constraints = "$src1 = $dst" in {
4823 let isCommutable = 0 in
4824 defm PACKUSDW : SS41I_binop_rm_int<0x2B, "packusdw", int_x86_sse41_packusdw>;
4825 defm PCMPEQQ : SS41I_binop_rm_int<0x29, "pcmpeqq", int_x86_sse41_pcmpeqq>;
4826 defm PMINSB : SS41I_binop_rm_int<0x38, "pminsb", int_x86_sse41_pminsb>;
4827 defm PMINSD : SS41I_binop_rm_int<0x39, "pminsd", int_x86_sse41_pminsd>;
4828 defm PMINUD : SS41I_binop_rm_int<0x3B, "pminud", int_x86_sse41_pminud>;
4829 defm PMINUW : SS41I_binop_rm_int<0x3A, "pminuw", int_x86_sse41_pminuw>;
4830 defm PMAXSB : SS41I_binop_rm_int<0x3C, "pmaxsb", int_x86_sse41_pmaxsb>;
4831 defm PMAXSD : SS41I_binop_rm_int<0x3D, "pmaxsd", int_x86_sse41_pmaxsd>;
4832 defm PMAXUD : SS41I_binop_rm_int<0x3F, "pmaxud", int_x86_sse41_pmaxud>;
4833 defm PMAXUW : SS41I_binop_rm_int<0x3E, "pmaxuw", int_x86_sse41_pmaxuw>;
4834 defm PMULDQ : SS41I_binop_rm_int<0x28, "pmuldq", int_x86_sse41_pmuldq>;
4837 def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, VR128:$src2)),
4838 (PCMPEQQrr VR128:$src1, VR128:$src2)>;
4839 def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, (memop addr:$src2))),
4840 (PCMPEQQrm VR128:$src1, addr:$src2)>;
4842 /// SS48I_binop_rm - Simple SSE41 binary operator.
4843 multiclass SS48I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
4844 ValueType OpVT, bit Is2Addr = 1> {
4845 let isCommutable = 1 in
4846 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4847 (ins VR128:$src1, VR128:$src2),
4849 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4850 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4851 [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]>,
4853 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4854 (ins VR128:$src1, i128mem:$src2),
4856 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4857 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4858 [(set VR128:$dst, (OpNode VR128:$src1,
4859 (bc_v4i32 (memopv2i64 addr:$src2))))]>,
4863 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4864 defm VPMULLD : SS48I_binop_rm<0x40, "vpmulld", mul, v4i32, 0>, VEX_4V;
4865 let Constraints = "$src1 = $dst" in
4866 defm PMULLD : SS48I_binop_rm<0x40, "pmulld", mul, v4i32>;
4868 /// SS41I_binop_rmi_int - SSE 4.1 binary operator with 8-bit immediate
4869 multiclass SS41I_binop_rmi_int<bits<8> opc, string OpcodeStr,
4870 Intrinsic IntId, RegisterClass RC, PatFrag memop_frag,
4871 X86MemOperand x86memop, bit Is2Addr = 1> {
4872 let isCommutable = 1 in
4873 def rri : SS4AIi8<opc, MRMSrcReg, (outs RC:$dst),
4874 (ins RC:$src1, RC:$src2, i32i8imm:$src3),
4876 !strconcat(OpcodeStr,
4877 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4878 !strconcat(OpcodeStr,
4879 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4880 [(set RC:$dst, (IntId RC:$src1, RC:$src2, imm:$src3))]>,
4882 def rmi : SS4AIi8<opc, MRMSrcMem, (outs RC:$dst),
4883 (ins RC:$src1, x86memop:$src2, i32i8imm:$src3),
4885 !strconcat(OpcodeStr,
4886 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4887 !strconcat(OpcodeStr,
4888 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4891 (bitconvert (memop_frag addr:$src2)), imm:$src3))]>,
4895 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4896 let isCommutable = 0 in {
4897 defm VBLENDPS : SS41I_binop_rmi_int<0x0C, "vblendps", int_x86_sse41_blendps,
4898 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4899 defm VBLENDPD : SS41I_binop_rmi_int<0x0D, "vblendpd", int_x86_sse41_blendpd,
4900 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4901 defm VBLENDPSY : SS41I_binop_rmi_int<0x0C, "vblendps",
4902 int_x86_avx_blend_ps_256, VR256, memopv32i8, i256mem, 0>, VEX_4V;
4903 defm VBLENDPDY : SS41I_binop_rmi_int<0x0D, "vblendpd",
4904 int_x86_avx_blend_pd_256, VR256, memopv32i8, i256mem, 0>, VEX_4V;
4905 defm VPBLENDW : SS41I_binop_rmi_int<0x0E, "vpblendw", int_x86_sse41_pblendw,
4906 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4907 defm VMPSADBW : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_sse41_mpsadbw,
4908 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4910 defm VDPPS : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_sse41_dpps,
4911 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4912 defm VDPPD : SS41I_binop_rmi_int<0x41, "vdppd", int_x86_sse41_dppd,
4913 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4914 defm VDPPSY : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_avx_dp_ps_256,
4915 VR256, memopv32i8, i256mem, 0>, VEX_4V;
4918 let Constraints = "$src1 = $dst" in {
4919 let isCommutable = 0 in {
4920 defm BLENDPS : SS41I_binop_rmi_int<0x0C, "blendps", int_x86_sse41_blendps,
4921 VR128, memopv16i8, i128mem>;
4922 defm BLENDPD : SS41I_binop_rmi_int<0x0D, "blendpd", int_x86_sse41_blendpd,
4923 VR128, memopv16i8, i128mem>;
4924 defm PBLENDW : SS41I_binop_rmi_int<0x0E, "pblendw", int_x86_sse41_pblendw,
4925 VR128, memopv16i8, i128mem>;
4926 defm MPSADBW : SS41I_binop_rmi_int<0x42, "mpsadbw", int_x86_sse41_mpsadbw,
4927 VR128, memopv16i8, i128mem>;
4929 defm DPPS : SS41I_binop_rmi_int<0x40, "dpps", int_x86_sse41_dpps,
4930 VR128, memopv16i8, i128mem>;
4931 defm DPPD : SS41I_binop_rmi_int<0x41, "dppd", int_x86_sse41_dppd,
4932 VR128, memopv16i8, i128mem>;
4935 /// SS41I_quaternary_int_avx - AVX SSE 4.1 with 4 operators
4936 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4937 multiclass SS41I_quaternary_int_avx<bits<8> opc, string OpcodeStr,
4938 RegisterClass RC, X86MemOperand x86memop,
4939 PatFrag mem_frag, Intrinsic IntId> {
4940 def rr : I<opc, MRMSrcReg, (outs RC:$dst),
4941 (ins RC:$src1, RC:$src2, RC:$src3),
4942 !strconcat(OpcodeStr,
4943 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4944 [(set RC:$dst, (IntId RC:$src1, RC:$src2, RC:$src3))],
4945 SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
4947 def rm : I<opc, MRMSrcMem, (outs RC:$dst),
4948 (ins RC:$src1, x86memop:$src2, RC:$src3),
4949 !strconcat(OpcodeStr,
4950 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4952 (IntId RC:$src1, (bitconvert (mem_frag addr:$src2)),
4954 SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
4958 defm VBLENDVPD : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR128, i128mem,
4959 memopv16i8, int_x86_sse41_blendvpd>;
4960 defm VBLENDVPS : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR128, i128mem,
4961 memopv16i8, int_x86_sse41_blendvps>;
4962 defm VPBLENDVB : SS41I_quaternary_int_avx<0x4C, "vpblendvb", VR128, i128mem,
4963 memopv16i8, int_x86_sse41_pblendvb>;
4964 defm VBLENDVPDY : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR256, i256mem,
4965 memopv32i8, int_x86_avx_blendv_pd_256>;
4966 defm VBLENDVPSY : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR256, i256mem,
4967 memopv32i8, int_x86_avx_blendv_ps_256>;
4969 /// SS41I_ternary_int - SSE 4.1 ternary operator
4970 let Uses = [XMM0], Constraints = "$src1 = $dst" in {
4971 multiclass SS41I_ternary_int<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4972 def rr0 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4973 (ins VR128:$src1, VR128:$src2),
4974 !strconcat(OpcodeStr,
4975 "\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}"),
4976 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2, XMM0))]>,
4979 def rm0 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4980 (ins VR128:$src1, i128mem:$src2),
4981 !strconcat(OpcodeStr,
4982 "\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}"),
4985 (bitconvert (memopv16i8 addr:$src2)), XMM0))]>, OpSize;
4989 defm BLENDVPD : SS41I_ternary_int<0x15, "blendvpd", int_x86_sse41_blendvpd>;
4990 defm BLENDVPS : SS41I_ternary_int<0x14, "blendvps", int_x86_sse41_blendvps>;
4991 defm PBLENDVB : SS41I_ternary_int<0x10, "pblendvb", int_x86_sse41_pblendvb>;
4993 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4994 def VMOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4995 "vmovntdqa\t{$src, $dst|$dst, $src}",
4996 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
4998 def MOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4999 "movntdqa\t{$src, $dst|$dst, $src}",
5000 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
5003 //===----------------------------------------------------------------------===//
5004 // SSE4.2 - Compare Instructions
5005 //===----------------------------------------------------------------------===//
5007 /// SS42I_binop_rm_int - Simple SSE 4.2 binary operator
5008 multiclass SS42I_binop_rm_int<bits<8> opc, string OpcodeStr,
5009 Intrinsic IntId128, bit Is2Addr = 1> {
5010 def rr : SS428I<opc, MRMSrcReg, (outs VR128:$dst),
5011 (ins VR128:$src1, VR128:$src2),
5013 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5014 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5015 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
5017 def rm : SS428I<opc, MRMSrcMem, (outs VR128:$dst),
5018 (ins VR128:$src1, i128mem:$src2),
5020 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5021 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5023 (IntId128 VR128:$src1,
5024 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
5027 let isAsmParserOnly = 1, Predicates = [HasAVX] in
5028 defm VPCMPGTQ : SS42I_binop_rm_int<0x37, "vpcmpgtq", int_x86_sse42_pcmpgtq,
5030 let Constraints = "$src1 = $dst" in
5031 defm PCMPGTQ : SS42I_binop_rm_int<0x37, "pcmpgtq", int_x86_sse42_pcmpgtq>;
5033 def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, VR128:$src2)),
5034 (PCMPGTQrr VR128:$src1, VR128:$src2)>;
5035 def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, (memop addr:$src2))),
5036 (PCMPGTQrm VR128:$src1, addr:$src2)>;
5038 //===----------------------------------------------------------------------===//
5039 // SSE4.2 - String/text Processing Instructions
5040 //===----------------------------------------------------------------------===//
5042 // Packed Compare Implicit Length Strings, Return Mask
5043 multiclass pseudo_pcmpistrm<string asm> {
5044 def REG : Ii8<0, Pseudo, (outs VR128:$dst),
5045 (ins VR128:$src1, VR128:$src2, i8imm:$src3), !strconcat(asm, "rr PSEUDO"),
5046 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128 VR128:$src1, VR128:$src2,
5048 def MEM : Ii8<0, Pseudo, (outs VR128:$dst),
5049 (ins VR128:$src1, i128mem:$src2, i8imm:$src3), !strconcat(asm, "rm PSEUDO"),
5050 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128
5051 VR128:$src1, (load addr:$src2), imm:$src3))]>;
5054 let Defs = [EFLAGS], usesCustomInserter = 1 in {
5055 defm PCMPISTRM128 : pseudo_pcmpistrm<"#PCMPISTRM128">, Requires<[HasSSE42]>;
5056 defm VPCMPISTRM128 : pseudo_pcmpistrm<"#VPCMPISTRM128">, Requires<[HasAVX]>;
5059 let Defs = [XMM0, EFLAGS], isAsmParserOnly = 1,
5060 Predicates = [HasAVX] in {
5061 def VPCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
5062 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5063 "vpcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize, VEX;
5064 def VPCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
5065 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5066 "vpcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize, VEX;
5069 let Defs = [XMM0, EFLAGS] in {
5070 def PCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
5071 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5072 "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
5073 def PCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
5074 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5075 "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
5078 // Packed Compare Explicit Length Strings, Return Mask
5079 multiclass pseudo_pcmpestrm<string asm> {
5080 def REG : Ii8<0, Pseudo, (outs VR128:$dst),
5081 (ins VR128:$src1, VR128:$src3, i8imm:$src5), !strconcat(asm, "rr PSEUDO"),
5082 [(set VR128:$dst, (int_x86_sse42_pcmpestrm128
5083 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5))]>;
5084 def MEM : Ii8<0, Pseudo, (outs VR128:$dst),
5085 (ins VR128:$src1, i128mem:$src3, i8imm:$src5), !strconcat(asm, "rm PSEUDO"),
5086 [(set VR128:$dst, (int_x86_sse42_pcmpestrm128
5087 VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5))]>;
5090 let Defs = [EFLAGS], Uses = [EAX, EDX], usesCustomInserter = 1 in {
5091 defm PCMPESTRM128 : pseudo_pcmpestrm<"#PCMPESTRM128">, Requires<[HasSSE42]>;
5092 defm VPCMPESTRM128 : pseudo_pcmpestrm<"#VPCMPESTRM128">, Requires<[HasAVX]>;
5095 let isAsmParserOnly = 1, Predicates = [HasAVX],
5096 Defs = [XMM0, EFLAGS], Uses = [EAX, EDX] in {
5097 def VPCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
5098 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
5099 "vpcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize, VEX;
5100 def VPCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
5101 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
5102 "vpcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize, VEX;
5105 let Defs = [XMM0, EFLAGS], Uses = [EAX, EDX] in {
5106 def PCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
5107 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
5108 "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
5109 def PCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
5110 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
5111 "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
5114 // Packed Compare Implicit Length Strings, Return Index
5115 let Defs = [ECX, EFLAGS] in {
5116 multiclass SS42AI_pcmpistri<Intrinsic IntId128, string asm = "pcmpistri"> {
5117 def rr : SS42AI<0x63, MRMSrcReg, (outs),
5118 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5119 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
5120 [(set ECX, (IntId128 VR128:$src1, VR128:$src2, imm:$src3)),
5121 (implicit EFLAGS)]>, OpSize;
5122 def rm : SS42AI<0x63, MRMSrcMem, (outs),
5123 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5124 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
5125 [(set ECX, (IntId128 VR128:$src1, (load addr:$src2), imm:$src3)),
5126 (implicit EFLAGS)]>, OpSize;
5130 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
5131 defm VPCMPISTRI : SS42AI_pcmpistri<int_x86_sse42_pcmpistri128, "vpcmpistri">,
5133 defm VPCMPISTRIA : SS42AI_pcmpistri<int_x86_sse42_pcmpistria128, "vpcmpistri">,
5135 defm VPCMPISTRIC : SS42AI_pcmpistri<int_x86_sse42_pcmpistric128, "vpcmpistri">,
5137 defm VPCMPISTRIO : SS42AI_pcmpistri<int_x86_sse42_pcmpistrio128, "vpcmpistri">,
5139 defm VPCMPISTRIS : SS42AI_pcmpistri<int_x86_sse42_pcmpistris128, "vpcmpistri">,
5141 defm VPCMPISTRIZ : SS42AI_pcmpistri<int_x86_sse42_pcmpistriz128, "vpcmpistri">,
5145 defm PCMPISTRI : SS42AI_pcmpistri<int_x86_sse42_pcmpistri128>;
5146 defm PCMPISTRIA : SS42AI_pcmpistri<int_x86_sse42_pcmpistria128>;
5147 defm PCMPISTRIC : SS42AI_pcmpistri<int_x86_sse42_pcmpistric128>;
5148 defm PCMPISTRIO : SS42AI_pcmpistri<int_x86_sse42_pcmpistrio128>;
5149 defm PCMPISTRIS : SS42AI_pcmpistri<int_x86_sse42_pcmpistris128>;
5150 defm PCMPISTRIZ : SS42AI_pcmpistri<int_x86_sse42_pcmpistriz128>;
5152 // Packed Compare Explicit Length Strings, Return Index
5153 let Defs = [ECX, EFLAGS], Uses = [EAX, EDX] in {
5154 multiclass SS42AI_pcmpestri<Intrinsic IntId128, string asm = "pcmpestri"> {
5155 def rr : SS42AI<0x61, MRMSrcReg, (outs),
5156 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
5157 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
5158 [(set ECX, (IntId128 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5)),
5159 (implicit EFLAGS)]>, OpSize;
5160 def rm : SS42AI<0x61, MRMSrcMem, (outs),
5161 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
5162 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
5164 (IntId128 VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5)),
5165 (implicit EFLAGS)]>, OpSize;
5169 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
5170 defm VPCMPESTRI : SS42AI_pcmpestri<int_x86_sse42_pcmpestri128, "vpcmpestri">,
5172 defm VPCMPESTRIA : SS42AI_pcmpestri<int_x86_sse42_pcmpestria128, "vpcmpestri">,
5174 defm VPCMPESTRIC : SS42AI_pcmpestri<int_x86_sse42_pcmpestric128, "vpcmpestri">,
5176 defm VPCMPESTRIO : SS42AI_pcmpestri<int_x86_sse42_pcmpestrio128, "vpcmpestri">,
5178 defm VPCMPESTRIS : SS42AI_pcmpestri<int_x86_sse42_pcmpestris128, "vpcmpestri">,
5180 defm VPCMPESTRIZ : SS42AI_pcmpestri<int_x86_sse42_pcmpestriz128, "vpcmpestri">,
5184 defm PCMPESTRI : SS42AI_pcmpestri<int_x86_sse42_pcmpestri128>;
5185 defm PCMPESTRIA : SS42AI_pcmpestri<int_x86_sse42_pcmpestria128>;
5186 defm PCMPESTRIC : SS42AI_pcmpestri<int_x86_sse42_pcmpestric128>;
5187 defm PCMPESTRIO : SS42AI_pcmpestri<int_x86_sse42_pcmpestrio128>;
5188 defm PCMPESTRIS : SS42AI_pcmpestri<int_x86_sse42_pcmpestris128>;
5189 defm PCMPESTRIZ : SS42AI_pcmpestri<int_x86_sse42_pcmpestriz128>;
5191 //===----------------------------------------------------------------------===//
5192 // SSE4.2 - CRC Instructions
5193 //===----------------------------------------------------------------------===//
5195 // No CRC instructions have AVX equivalents
5197 // crc intrinsic instruction
5198 // This set of instructions are only rm, the only difference is the size
5200 let Constraints = "$src1 = $dst" in {
5201 def CRC32m8 : SS42FI<0xF0, MRMSrcMem, (outs GR32:$dst),
5202 (ins GR32:$src1, i8mem:$src2),
5203 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5205 (int_x86_sse42_crc32_8 GR32:$src1,
5206 (load addr:$src2)))]>;
5207 def CRC32r8 : SS42FI<0xF0, MRMSrcReg, (outs GR32:$dst),
5208 (ins GR32:$src1, GR8:$src2),
5209 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5211 (int_x86_sse42_crc32_8 GR32:$src1, GR8:$src2))]>;
5212 def CRC32m16 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst),
5213 (ins GR32:$src1, i16mem:$src2),
5214 "crc32{w} \t{$src2, $src1|$src1, $src2}",
5216 (int_x86_sse42_crc32_16 GR32:$src1,
5217 (load addr:$src2)))]>,
5219 def CRC32r16 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst),
5220 (ins GR32:$src1, GR16:$src2),
5221 "crc32{w} \t{$src2, $src1|$src1, $src2}",
5223 (int_x86_sse42_crc32_16 GR32:$src1, GR16:$src2))]>,
5225 def CRC32m32 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst),
5226 (ins GR32:$src1, i32mem:$src2),
5227 "crc32{l} \t{$src2, $src1|$src1, $src2}",
5229 (int_x86_sse42_crc32_32 GR32:$src1,
5230 (load addr:$src2)))]>;
5231 def CRC32r32 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst),
5232 (ins GR32:$src1, GR32:$src2),
5233 "crc32{l} \t{$src2, $src1|$src1, $src2}",
5235 (int_x86_sse42_crc32_32 GR32:$src1, GR32:$src2))]>;
5236 def CRC64m8 : SS42FI<0xF0, MRMSrcMem, (outs GR64:$dst),
5237 (ins GR64:$src1, i8mem:$src2),
5238 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5240 (int_x86_sse42_crc64_8 GR64:$src1,
5241 (load addr:$src2)))]>,
5243 def CRC64r8 : SS42FI<0xF0, MRMSrcReg, (outs GR64:$dst),
5244 (ins GR64:$src1, GR8:$src2),
5245 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5247 (int_x86_sse42_crc64_8 GR64:$src1, GR8:$src2))]>,
5249 def CRC64m64 : SS42FI<0xF1, MRMSrcMem, (outs GR64:$dst),
5250 (ins GR64:$src1, i64mem:$src2),
5251 "crc32{q} \t{$src2, $src1|$src1, $src2}",
5253 (int_x86_sse42_crc64_64 GR64:$src1,
5254 (load addr:$src2)))]>,
5256 def CRC64r64 : SS42FI<0xF1, MRMSrcReg, (outs GR64:$dst),
5257 (ins GR64:$src1, GR64:$src2),
5258 "crc32{q} \t{$src2, $src1|$src1, $src2}",
5260 (int_x86_sse42_crc64_64 GR64:$src1, GR64:$src2))]>,
5264 //===----------------------------------------------------------------------===//
5265 // AES-NI Instructions
5266 //===----------------------------------------------------------------------===//
5268 multiclass AESI_binop_rm_int<bits<8> opc, string OpcodeStr,
5269 Intrinsic IntId128, bit Is2Addr = 1> {
5270 def rr : AES8I<opc, MRMSrcReg, (outs VR128:$dst),
5271 (ins VR128:$src1, VR128:$src2),
5273 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5274 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5275 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
5277 def rm : AES8I<opc, MRMSrcMem, (outs VR128:$dst),
5278 (ins VR128:$src1, i128mem:$src2),
5280 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5281 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5283 (IntId128 VR128:$src1,
5284 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
5287 // Perform One Round of an AES Encryption/Decryption Flow
5288 let isAsmParserOnly = 1, Predicates = [HasAVX, HasAES] in {
5289 defm VAESENC : AESI_binop_rm_int<0xDC, "vaesenc",
5290 int_x86_aesni_aesenc, 0>, VEX_4V;
5291 defm VAESENCLAST : AESI_binop_rm_int<0xDD, "vaesenclast",
5292 int_x86_aesni_aesenclast, 0>, VEX_4V;
5293 defm VAESDEC : AESI_binop_rm_int<0xDE, "vaesdec",
5294 int_x86_aesni_aesdec, 0>, VEX_4V;
5295 defm VAESDECLAST : AESI_binop_rm_int<0xDF, "vaesdeclast",
5296 int_x86_aesni_aesdeclast, 0>, VEX_4V;
5299 let Constraints = "$src1 = $dst" in {
5300 defm AESENC : AESI_binop_rm_int<0xDC, "aesenc",
5301 int_x86_aesni_aesenc>;
5302 defm AESENCLAST : AESI_binop_rm_int<0xDD, "aesenclast",
5303 int_x86_aesni_aesenclast>;
5304 defm AESDEC : AESI_binop_rm_int<0xDE, "aesdec",
5305 int_x86_aesni_aesdec>;
5306 defm AESDECLAST : AESI_binop_rm_int<0xDF, "aesdeclast",
5307 int_x86_aesni_aesdeclast>;
5310 def : Pat<(v2i64 (int_x86_aesni_aesenc VR128:$src1, VR128:$src2)),
5311 (AESENCrr VR128:$src1, VR128:$src2)>;
5312 def : Pat<(v2i64 (int_x86_aesni_aesenc VR128:$src1, (memop addr:$src2))),
5313 (AESENCrm VR128:$src1, addr:$src2)>;
5314 def : Pat<(v2i64 (int_x86_aesni_aesenclast VR128:$src1, VR128:$src2)),
5315 (AESENCLASTrr VR128:$src1, VR128:$src2)>;
5316 def : Pat<(v2i64 (int_x86_aesni_aesenclast VR128:$src1, (memop addr:$src2))),
5317 (AESENCLASTrm VR128:$src1, addr:$src2)>;
5318 def : Pat<(v2i64 (int_x86_aesni_aesdec VR128:$src1, VR128:$src2)),
5319 (AESDECrr VR128:$src1, VR128:$src2)>;
5320 def : Pat<(v2i64 (int_x86_aesni_aesdec VR128:$src1, (memop addr:$src2))),
5321 (AESDECrm VR128:$src1, addr:$src2)>;
5322 def : Pat<(v2i64 (int_x86_aesni_aesdeclast VR128:$src1, VR128:$src2)),
5323 (AESDECLASTrr VR128:$src1, VR128:$src2)>;
5324 def : Pat<(v2i64 (int_x86_aesni_aesdeclast VR128:$src1, (memop addr:$src2))),
5325 (AESDECLASTrm VR128:$src1, addr:$src2)>;
5327 // Perform the AES InvMixColumn Transformation
5328 let isAsmParserOnly = 1, Predicates = [HasAVX, HasAES] in {
5329 def VAESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
5331 "vaesimc\t{$src1, $dst|$dst, $src1}",
5333 (int_x86_aesni_aesimc VR128:$src1))]>,
5335 def VAESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
5336 (ins i128mem:$src1),
5337 "vaesimc\t{$src1, $dst|$dst, $src1}",
5339 (int_x86_aesni_aesimc (bitconvert (memopv2i64 addr:$src1))))]>,
5342 def AESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
5344 "aesimc\t{$src1, $dst|$dst, $src1}",
5346 (int_x86_aesni_aesimc VR128:$src1))]>,
5348 def AESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
5349 (ins i128mem:$src1),
5350 "aesimc\t{$src1, $dst|$dst, $src1}",
5352 (int_x86_aesni_aesimc (bitconvert (memopv2i64 addr:$src1))))]>,
5355 // AES Round Key Generation Assist
5356 let isAsmParserOnly = 1, Predicates = [HasAVX, HasAES] in {
5357 def VAESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
5358 (ins VR128:$src1, i8imm:$src2),
5359 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5361 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
5363 def VAESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
5364 (ins i128mem:$src1, i8imm:$src2),
5365 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5367 (int_x86_aesni_aeskeygenassist (bitconvert (memopv2i64 addr:$src1)),
5371 def AESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
5372 (ins VR128:$src1, i8imm:$src2),
5373 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5375 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
5377 def AESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
5378 (ins i128mem:$src1, i8imm:$src2),
5379 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5381 (int_x86_aesni_aeskeygenassist (bitconvert (memopv2i64 addr:$src1)),
5385 //===----------------------------------------------------------------------===//
5386 // CLMUL Instructions
5387 //===----------------------------------------------------------------------===//
5389 // Only the AVX version of CLMUL instructions are described here.
5391 // Carry-less Multiplication instructions
5392 let isAsmParserOnly = 1 in {
5393 def VPCLMULQDQrr : CLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
5394 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5395 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5398 def VPCLMULQDQrm : CLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
5399 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5400 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5404 multiclass avx_vpclmul<string asm> {
5405 def rr : I<0, Pseudo, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
5406 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5409 def rm : I<0, Pseudo, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
5410 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5413 defm VPCLMULHQHQDQ : avx_vpclmul<"vpclmulhqhqdq">;
5414 defm VPCLMULHQLQDQ : avx_vpclmul<"vpclmulhqlqdq">;
5415 defm VPCLMULLQHQDQ : avx_vpclmul<"vpclmullqhqdq">;
5416 defm VPCLMULLQLQDQ : avx_vpclmul<"vpclmullqlqdq">;
5418 } // isAsmParserOnly
5420 //===----------------------------------------------------------------------===//
5422 //===----------------------------------------------------------------------===//
5424 let isAsmParserOnly = 1 in {
5426 // Load from memory and broadcast to all elements of the destination operand
5427 class avx_broadcast<bits<8> opc, string OpcodeStr, RegisterClass RC,
5428 X86MemOperand x86memop, Intrinsic Int> :
5429 AVX8I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
5430 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5431 [(set RC:$dst, (Int addr:$src))]>, VEX;
5433 def VBROADCASTSS : avx_broadcast<0x18, "vbroadcastss", VR128, f32mem,
5434 int_x86_avx_vbroadcastss>;
5435 def VBROADCASTSSY : avx_broadcast<0x18, "vbroadcastss", VR256, f32mem,
5436 int_x86_avx_vbroadcastss_256>;
5437 def VBROADCASTSD : avx_broadcast<0x19, "vbroadcastsd", VR256, f64mem,
5438 int_x86_avx_vbroadcast_sd_256>;
5439 def VBROADCASTF128 : avx_broadcast<0x1A, "vbroadcastf128", VR256, f128mem,
5440 int_x86_avx_vbroadcastf128_pd_256>;
5442 // Insert packed floating-point values
5443 def VINSERTF128rr : AVXAIi8<0x18, MRMSrcReg, (outs VR256:$dst),
5444 (ins VR256:$src1, VR128:$src2, i8imm:$src3),
5445 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5447 def VINSERTF128rm : AVXAIi8<0x18, MRMSrcMem, (outs VR256:$dst),
5448 (ins VR256:$src1, f128mem:$src2, i8imm:$src3),
5449 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5452 // Extract packed floating-point values
5453 def VEXTRACTF128rr : AVXAIi8<0x19, MRMDestReg, (outs VR128:$dst),
5454 (ins VR256:$src1, i8imm:$src2),
5455 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5457 def VEXTRACTF128mr : AVXAIi8<0x19, MRMDestMem, (outs),
5458 (ins f128mem:$dst, VR256:$src1, i8imm:$src2),
5459 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5462 // Conditional SIMD Packed Loads and Stores
5463 multiclass avx_movmask_rm<bits<8> opc_rm, bits<8> opc_mr, string OpcodeStr,
5464 Intrinsic IntLd, Intrinsic IntLd256,
5465 Intrinsic IntSt, Intrinsic IntSt256,
5466 PatFrag pf128, PatFrag pf256> {
5467 def rm : AVX8I<opc_rm, MRMSrcMem, (outs VR128:$dst),
5468 (ins VR128:$src1, f128mem:$src2),
5469 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5470 [(set VR128:$dst, (IntLd addr:$src2, VR128:$src1))]>,
5472 def Yrm : AVX8I<opc_rm, MRMSrcMem, (outs VR256:$dst),
5473 (ins VR256:$src1, f256mem:$src2),
5474 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5475 [(set VR256:$dst, (IntLd256 addr:$src2, VR256:$src1))]>,
5477 def mr : AVX8I<opc_mr, MRMDestMem, (outs),
5478 (ins f128mem:$dst, VR128:$src1, VR128:$src2),
5479 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5480 [(IntSt addr:$dst, VR128:$src1, VR128:$src2)]>, VEX_4V;
5481 def Ymr : AVX8I<opc_mr, MRMDestMem, (outs),
5482 (ins f256mem:$dst, VR256:$src1, VR256:$src2),
5483 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5484 [(IntSt256 addr:$dst, VR256:$src1, VR256:$src2)]>, VEX_4V;
5487 defm VMASKMOVPS : avx_movmask_rm<0x2C, 0x2E, "vmaskmovps",
5488 int_x86_avx_maskload_ps,
5489 int_x86_avx_maskload_ps_256,
5490 int_x86_avx_maskstore_ps,
5491 int_x86_avx_maskstore_ps_256,
5492 memopv4f32, memopv8f32>;
5493 defm VMASKMOVPD : avx_movmask_rm<0x2D, 0x2F, "vmaskmovpd",
5494 int_x86_avx_maskload_pd,
5495 int_x86_avx_maskload_pd_256,
5496 int_x86_avx_maskstore_pd,
5497 int_x86_avx_maskstore_pd_256,
5498 memopv2f64, memopv4f64>;
5500 // Permute Floating-Point Values
5501 multiclass avx_permil<bits<8> opc_rm, bits<8> opc_rmi, string OpcodeStr,
5502 RegisterClass RC, X86MemOperand x86memop_f,
5503 X86MemOperand x86memop_i, PatFrag f_frag, PatFrag i_frag,
5504 Intrinsic IntVar, Intrinsic IntImm> {
5505 def rr : AVX8I<opc_rm, MRMSrcReg, (outs RC:$dst),
5506 (ins RC:$src1, RC:$src2),
5507 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5508 [(set RC:$dst, (IntVar RC:$src1, RC:$src2))]>, VEX_4V;
5509 def rm : AVX8I<opc_rm, MRMSrcMem, (outs RC:$dst),
5510 (ins RC:$src1, x86memop_i:$src2),
5511 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5512 [(set RC:$dst, (IntVar RC:$src1, (i_frag addr:$src2)))]>, VEX_4V;
5514 def ri : AVXAIi8<opc_rmi, MRMSrcReg, (outs RC:$dst),
5515 (ins RC:$src1, i8imm:$src2),
5516 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5517 [(set RC:$dst, (IntImm RC:$src1, imm:$src2))]>, VEX;
5518 def mi : AVXAIi8<opc_rmi, MRMSrcMem, (outs RC:$dst),
5519 (ins x86memop_f:$src1, i8imm:$src2),
5520 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5521 [(set RC:$dst, (IntImm (f_frag addr:$src1), imm:$src2))]>, VEX;
5524 defm VPERMILPS : avx_permil<0x0C, 0x04, "vpermilps", VR128, f128mem, i128mem,
5525 memopv4f32, memopv4i32,
5526 int_x86_avx_vpermilvar_ps,
5527 int_x86_avx_vpermil_ps>;
5528 defm VPERMILPSY : avx_permil<0x0C, 0x04, "vpermilps", VR256, f256mem, i256mem,
5529 memopv8f32, memopv8i32,
5530 int_x86_avx_vpermilvar_ps_256,
5531 int_x86_avx_vpermil_ps_256>;
5532 defm VPERMILPD : avx_permil<0x0D, 0x05, "vpermilpd", VR128, f128mem, i128mem,
5533 memopv2f64, memopv2i64,
5534 int_x86_avx_vpermilvar_pd,
5535 int_x86_avx_vpermil_pd>;
5536 defm VPERMILPDY : avx_permil<0x0D, 0x05, "vpermilpd", VR256, f256mem, i256mem,
5537 memopv4f64, memopv4i64,
5538 int_x86_avx_vpermilvar_pd_256,
5539 int_x86_avx_vpermil_pd_256>;
5541 def VPERM2F128rr : AVXAIi8<0x06, MRMSrcReg, (outs VR256:$dst),
5542 (ins VR256:$src1, VR256:$src2, i8imm:$src3),
5543 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5545 def VPERM2F128rm : AVXAIi8<0x06, MRMSrcMem, (outs VR256:$dst),
5546 (ins VR256:$src1, f256mem:$src2, i8imm:$src3),
5547 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5550 // Zero All YMM registers
5551 def VZEROALL : I<0x77, RawFrm, (outs), (ins), "vzeroall",
5552 [(int_x86_avx_vzeroall)]>, VEX, VEX_L, Requires<[HasAVX]>;
5554 // Zero Upper bits of YMM registers
5555 def VZEROUPPER : I<0x77, RawFrm, (outs), (ins), "vzeroupper",
5556 [(int_x86_avx_vzeroupper)]>, VEX, Requires<[HasAVX]>;
5558 } // isAsmParserOnly
5560 def : Pat<(int_x86_avx_vinsertf128_pd_256 VR256:$src1, VR128:$src2, imm:$src3),
5561 (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
5562 def : Pat<(int_x86_avx_vinsertf128_ps_256 VR256:$src1, VR128:$src2, imm:$src3),
5563 (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
5564 def : Pat<(int_x86_avx_vinsertf128_si_256 VR256:$src1, VR128:$src2, imm:$src3),
5565 (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
5567 def : Pat<(int_x86_avx_vextractf128_pd_256 VR256:$src1, imm:$src2),
5568 (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
5569 def : Pat<(int_x86_avx_vextractf128_ps_256 VR256:$src1, imm:$src2),
5570 (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
5571 def : Pat<(int_x86_avx_vextractf128_si_256 VR256:$src1, imm:$src2),
5572 (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
5574 def : Pat<(int_x86_avx_vbroadcastf128_ps_256 addr:$src),
5575 (VBROADCASTF128 addr:$src)>;
5577 def : Pat<(int_x86_avx_vperm2f128_ps_256 VR256:$src1, VR256:$src2, imm:$src3),
5578 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
5579 def : Pat<(int_x86_avx_vperm2f128_pd_256 VR256:$src1, VR256:$src2, imm:$src3),
5580 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
5581 def : Pat<(int_x86_avx_vperm2f128_si_256 VR256:$src1, VR256:$src2, imm:$src3),
5582 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
5584 def : Pat<(int_x86_avx_vperm2f128_ps_256
5585 VR256:$src1, (memopv8f32 addr:$src2), imm:$src3),
5586 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
5587 def : Pat<(int_x86_avx_vperm2f128_pd_256
5588 VR256:$src1, (memopv4f64 addr:$src2), imm:$src3),
5589 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
5590 def : Pat<(int_x86_avx_vperm2f128_si_256
5591 VR256:$src1, (memopv8i32 addr:$src2), imm:$src3),
5592 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
5594 //===----------------------------------------------------------------------===//
5595 // SSE Shuffle pattern fragments
5596 //===----------------------------------------------------------------------===//
5598 // This is part of a "work in progress" refactoring. The idea is that all
5599 // vector shuffles are going to be translated into target specific nodes and
5600 // directly matched by the patterns below (which can be changed along the way)
5601 // The AVX version of some but not all of them are described here, and more
5602 // should come in a near future.
5604 // Shuffle with PSHUFD instruction folding loads. The first two patterns match
5605 // SSE2 loads, which are always promoted to v2i64. The last one should match
5606 // the SSE1 case, where the only legal load is v4f32, but there is no PSHUFD
5607 // in SSE2, how does it ever worked? Anyway, the pattern will remain here until
5608 // we investigate further.
5609 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv2i64 addr:$src1)),
5611 (VPSHUFDmi addr:$src1, imm:$imm)>, Requires<[HasAVX]>;
5612 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv2i64 addr:$src1)),
5614 (PSHUFDmi addr:$src1, imm:$imm)>;
5615 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv4f32 addr:$src1)),
5617 (PSHUFDmi addr:$src1, imm:$imm)>; // FIXME: has this ever worked?
5619 // Shuffle with PSHUFD instruction.
5620 def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
5621 (VPSHUFDri VR128:$src1, imm:$imm)>, Requires<[HasAVX]>;
5622 def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
5623 (PSHUFDri VR128:$src1, imm:$imm)>;
5625 def : Pat<(v4i32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
5626 (VPSHUFDri VR128:$src1, imm:$imm)>, Requires<[HasAVX]>;
5627 def : Pat<(v4i32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
5628 (PSHUFDri VR128:$src1, imm:$imm)>;
5630 // Shuffle with SHUFPD instruction.
5631 def : Pat<(v2f64 (X86Shufps VR128:$src1,
5632 (memopv2f64 addr:$src2), (i8 imm:$imm))),
5633 (VSHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>, Requires<[HasAVX]>;
5634 def : Pat<(v2f64 (X86Shufps VR128:$src1,
5635 (memopv2f64 addr:$src2), (i8 imm:$imm))),
5636 (SHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>;
5638 def : Pat<(v2i64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5639 (VSHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>, Requires<[HasAVX]>;
5640 def : Pat<(v2i64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5641 (SHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
5643 def : Pat<(v2f64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5644 (VSHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>, Requires<[HasAVX]>;
5645 def : Pat<(v2f64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5646 (SHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
5648 // Shuffle with SHUFPS instruction.
5649 def : Pat<(v4f32 (X86Shufps VR128:$src1,
5650 (memopv4f32 addr:$src2), (i8 imm:$imm))),
5651 (VSHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>, Requires<[HasAVX]>;
5652 def : Pat<(v4f32 (X86Shufps VR128:$src1,
5653 (memopv4f32 addr:$src2), (i8 imm:$imm))),
5654 (SHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
5656 def : Pat<(v4f32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5657 (VSHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>, Requires<[HasAVX]>;
5658 def : Pat<(v4f32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5659 (SHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
5661 def : Pat<(v4i32 (X86Shufps VR128:$src1,
5662 (bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))),
5663 (VSHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>, Requires<[HasAVX]>;
5664 def : Pat<(v4i32 (X86Shufps VR128:$src1,
5665 (bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))),
5666 (SHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
5668 def : Pat<(v4i32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5669 (VSHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>, Requires<[HasAVX]>;
5670 def : Pat<(v4i32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5671 (SHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
5673 // Shuffle with MOVHLPS instruction
5674 def : Pat<(v4f32 (X86Movhlps VR128:$src1, VR128:$src2)),
5675 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
5676 def : Pat<(v4i32 (X86Movhlps VR128:$src1, VR128:$src2)),
5677 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
5679 // Shuffle with MOVDDUP instruction
5680 def : Pat<(X86Movddup (memopv2f64 addr:$src)),
5681 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5682 def : Pat<(X86Movddup (memopv2f64 addr:$src)),
5683 (MOVDDUPrm addr:$src)>;
5685 def : Pat<(X86Movddup (bc_v4f32 (memopv2f64 addr:$src))),
5686 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5687 def : Pat<(X86Movddup (bc_v4f32 (memopv2f64 addr:$src))),
5688 (MOVDDUPrm addr:$src)>;
5690 def : Pat<(X86Movddup (memopv2i64 addr:$src)),
5691 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5692 def : Pat<(X86Movddup (memopv2i64 addr:$src)),
5693 (MOVDDUPrm addr:$src)>;
5695 def : Pat<(X86Movddup (bc_v4i32 (memopv2i64 addr:$src))),
5696 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5697 def : Pat<(X86Movddup (bc_v4i32 (memopv2i64 addr:$src))),
5698 (MOVDDUPrm addr:$src)>;
5700 def : Pat<(X86Movddup (v2f64 (scalar_to_vector (loadf64 addr:$src)))),
5701 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5702 def : Pat<(X86Movddup (v2f64 (scalar_to_vector (loadf64 addr:$src)))),
5703 (MOVDDUPrm addr:$src)>;
5705 def : Pat<(X86Movddup (bc_v2f64
5706 (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
5707 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5708 def : Pat<(X86Movddup (bc_v2f64
5709 (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
5710 (MOVDDUPrm addr:$src)>;
5712 // Shuffle with UNPCKLPS
5713 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, (memopv4f32 addr:$src2))),
5714 (VUNPCKLPSrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
5715 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, (memopv4f32 addr:$src2))),
5716 (UNPCKLPSrm VR128:$src1, addr:$src2)>;
5718 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, VR128:$src2)),
5719 (VUNPCKLPSrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
5720 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, VR128:$src2)),
5721 (UNPCKLPSrr VR128:$src1, VR128:$src2)>;
5723 // Shuffle with UNPCKHPS
5724 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, (memopv4f32 addr:$src2))),
5725 (VUNPCKHPSrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
5726 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, (memopv4f32 addr:$src2))),
5727 (UNPCKHPSrm VR128:$src1, addr:$src2)>;
5729 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, VR128:$src2)),
5730 (VUNPCKHPSrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
5731 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, VR128:$src2)),
5732 (UNPCKHPSrr VR128:$src1, VR128:$src2)>;
5734 // Shuffle with UNPCKLPD
5735 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, (memopv2f64 addr:$src2))),
5736 (VUNPCKLPSrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
5737 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, (memopv2f64 addr:$src2))),
5738 (UNPCKLPSrm VR128:$src1, addr:$src2)>;
5740 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, VR128:$src2)),
5741 (VUNPCKLPDrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
5742 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, VR128:$src2)),
5743 (UNPCKLPDrr VR128:$src1, VR128:$src2)>;
5745 // Shuffle with UNPCKHPD
5746 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, (memopv2f64 addr:$src2))),
5747 (VUNPCKLPSrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
5748 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, (memopv2f64 addr:$src2))),
5749 (UNPCKLPSrm VR128:$src1, addr:$src2)>;
5751 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, VR128:$src2)),
5752 (VUNPCKHPDrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
5753 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, VR128:$src2)),
5754 (UNPCKHPDrr VR128:$src1, VR128:$src2)>;
5756 // Shuffle with PUNPCKLBW
5757 def : Pat<(v16i8 (X86Punpcklbw VR128:$src1,
5758 (bc_v16i8 (memopv2i64 addr:$src2)))),
5759 (PUNPCKLBWrm VR128:$src1, addr:$src2)>;
5760 def : Pat<(v16i8 (X86Punpcklbw VR128:$src1, VR128:$src2)),
5761 (PUNPCKLBWrr VR128:$src1, VR128:$src2)>;
5763 // Shuffle with PUNPCKLWD
5764 def : Pat<(v8i16 (X86Punpcklwd VR128:$src1,
5765 (bc_v8i16 (memopv2i64 addr:$src2)))),
5766 (PUNPCKLWDrm VR128:$src1, addr:$src2)>;
5767 def : Pat<(v8i16 (X86Punpcklwd VR128:$src1, VR128:$src2)),
5768 (PUNPCKLWDrr VR128:$src1, VR128:$src2)>;
5770 // Shuffle with PUNPCKLDQ
5771 def : Pat<(v4i32 (X86Punpckldq VR128:$src1,
5772 (bc_v4i32 (memopv2i64 addr:$src2)))),
5773 (PUNPCKLDQrm VR128:$src1, addr:$src2)>;
5774 def : Pat<(v4i32 (X86Punpckldq VR128:$src1, VR128:$src2)),
5775 (PUNPCKLDQrr VR128:$src1, VR128:$src2)>;
5777 // Shuffle with PUNPCKLQDQ
5778 def : Pat<(v2i64 (X86Punpcklqdq VR128:$src1, (memopv2i64 addr:$src2))),
5779 (PUNPCKLQDQrm VR128:$src1, addr:$src2)>;
5780 def : Pat<(v2i64 (X86Punpcklqdq VR128:$src1, VR128:$src2)),
5781 (PUNPCKLQDQrr VR128:$src1, VR128:$src2)>;
5783 // Shuffle with PUNPCKHBW
5784 def : Pat<(v16i8 (X86Punpckhbw VR128:$src1,
5785 (bc_v16i8 (memopv2i64 addr:$src2)))),
5786 (PUNPCKHBWrm VR128:$src1, addr:$src2)>;
5787 def : Pat<(v16i8 (X86Punpckhbw VR128:$src1, VR128:$src2)),
5788 (PUNPCKHBWrr VR128:$src1, VR128:$src2)>;
5790 // Shuffle with PUNPCKHWD
5791 def : Pat<(v8i16 (X86Punpckhwd VR128:$src1,
5792 (bc_v8i16 (memopv2i64 addr:$src2)))),
5793 (PUNPCKHWDrm VR128:$src1, addr:$src2)>;
5794 def : Pat<(v8i16 (X86Punpckhwd VR128:$src1, VR128:$src2)),
5795 (PUNPCKHWDrr VR128:$src1, VR128:$src2)>;
5797 // Shuffle with PUNPCKHDQ
5798 def : Pat<(v4i32 (X86Punpckhdq VR128:$src1,
5799 (bc_v4i32 (memopv2i64 addr:$src2)))),
5800 (PUNPCKHDQrm VR128:$src1, addr:$src2)>;
5801 def : Pat<(v4i32 (X86Punpckhdq VR128:$src1, VR128:$src2)),
5802 (PUNPCKHDQrr VR128:$src1, VR128:$src2)>;
5804 // Shuffle with PUNPCKHQDQ
5805 def : Pat<(v2i64 (X86Punpckhqdq VR128:$src1, (memopv2i64 addr:$src2))),
5806 (PUNPCKHQDQrm VR128:$src1, addr:$src2)>;
5807 def : Pat<(v2i64 (X86Punpckhqdq VR128:$src1, VR128:$src2)),
5808 (PUNPCKHQDQrr VR128:$src1, VR128:$src2)>;
5810 // Shuffle with MOVLHPS
5811 def : Pat<(X86Movlhps VR128:$src1,
5812 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
5813 (MOVHPSrm VR128:$src1, addr:$src2)>;
5814 def : Pat<(X86Movlhps VR128:$src1,
5815 (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
5816 (MOVHPSrm VR128:$src1, addr:$src2)>;
5817 def : Pat<(v4f32 (X86Movlhps VR128:$src1, VR128:$src2)),
5818 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
5819 def : Pat<(v4i32 (X86Movlhps VR128:$src1, VR128:$src2)),
5820 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
5821 def : Pat<(v2i64 (X86Movlhps VR128:$src1, VR128:$src2)),
5822 (MOVLHPSrr (v2i64 VR128:$src1), VR128:$src2)>;
5824 // Shuffle with MOVLHPD
5825 def : Pat<(v2f64 (X86Movlhpd VR128:$src1,
5826 (scalar_to_vector (loadf64 addr:$src2)))),
5827 (MOVHPDrm VR128:$src1, addr:$src2)>;
5828 // FIXME: Instead of X86Unpcklpd, there should be a X86Movlhpd here, the problem
5829 // is during lowering, where it's not possible to recognize the load fold cause
5830 // it has two uses through a bitcast. One use disappears at isel time and the
5831 // fold opportunity reappears.
5832 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1,
5833 (scalar_to_vector (loadf64 addr:$src2)))),
5834 (MOVHPDrm VR128:$src1, addr:$src2)>;
5836 // Shuffle with MOVSS
5837 def : Pat<(v4f32 (X86Movss VR128:$src1, (scalar_to_vector FR32:$src2))),
5838 (MOVSSrr VR128:$src1, FR32:$src2)>;
5839 def : Pat<(v4i32 (X86Movss VR128:$src1, VR128:$src2)),
5840 (MOVSSrr (v4i32 VR128:$src1),
5841 (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_ss))>;
5842 def : Pat<(v4f32 (X86Movss VR128:$src1, VR128:$src2)),
5843 (MOVSSrr (v4f32 VR128:$src1),
5844 (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>;
5845 // FIXME: Instead of a X86Movss there should be a X86Movlps here, the problem
5846 // is during lowering, where it's not possible to recognize the load fold cause
5847 // it has two uses through a bitcast. One use disappears at isel time and the
5848 // fold opportunity reappears.
5849 def : Pat<(X86Movss VR128:$src1,
5850 (bc_v4i32 (v2i64 (load addr:$src2)))),
5851 (MOVLPSrm VR128:$src1, addr:$src2)>;
5853 // Shuffle with MOVSD
5854 def : Pat<(v2f64 (X86Movsd VR128:$src1, (scalar_to_vector FR64:$src2))),
5855 (MOVSDrr VR128:$src1, FR64:$src2)>;
5856 def : Pat<(v2i64 (X86Movsd VR128:$src1, VR128:$src2)),
5857 (MOVSDrr (v2i64 VR128:$src1),
5858 (EXTRACT_SUBREG (v2i64 VR128:$src2), sub_sd))>;
5859 def : Pat<(v2f64 (X86Movsd VR128:$src1, VR128:$src2)),
5860 (MOVSDrr (v2f64 VR128:$src1),
5861 (EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>;
5862 def : Pat<(v4f32 (X86Movsd VR128:$src1, VR128:$src2)),
5863 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_sd))>;
5864 def : Pat<(v4i32 (X86Movsd VR128:$src1, VR128:$src2)),
5865 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_sd))>;
5867 // Shuffle with MOVSHDUP
5868 def : Pat<(v4i32 (X86Movshdup VR128:$src)),
5869 (MOVSHDUPrr VR128:$src)>;
5870 def : Pat<(X86Movshdup (bc_v4i32 (memopv2i64 addr:$src))),
5871 (MOVSHDUPrm addr:$src)>;
5873 def : Pat<(v4f32 (X86Movshdup VR128:$src)),
5874 (MOVSHDUPrr VR128:$src)>;
5875 def : Pat<(X86Movshdup (memopv4f32 addr:$src)),
5876 (MOVSHDUPrm addr:$src)>;
5878 // Shuffle with MOVSLDUP
5879 def : Pat<(v4i32 (X86Movsldup VR128:$src)),
5880 (MOVSLDUPrr VR128:$src)>;
5881 def : Pat<(X86Movsldup (bc_v4i32 (memopv2i64 addr:$src))),
5882 (MOVSLDUPrm addr:$src)>;
5884 def : Pat<(v4f32 (X86Movsldup VR128:$src)),
5885 (MOVSLDUPrr VR128:$src)>;
5886 def : Pat<(X86Movsldup (memopv4f32 addr:$src)),
5887 (MOVSLDUPrm addr:$src)>;
5889 // Shuffle with PSHUFHW
5890 def : Pat<(v8i16 (X86PShufhw VR128:$src, (i8 imm:$imm))),
5891 (PSHUFHWri VR128:$src, imm:$imm)>;
5892 def : Pat<(v8i16 (X86PShufhw (bc_v8i16 (memopv2i64 addr:$src)), (i8 imm:$imm))),
5893 (PSHUFHWmi addr:$src, imm:$imm)>;
5895 // Shuffle with PSHUFLW
5896 def : Pat<(v8i16 (X86PShuflw VR128:$src, (i8 imm:$imm))),
5897 (PSHUFLWri VR128:$src, imm:$imm)>;
5898 def : Pat<(v8i16 (X86PShuflw (bc_v8i16 (memopv2i64 addr:$src)), (i8 imm:$imm))),
5899 (PSHUFLWmi addr:$src, imm:$imm)>;
5901 // Shuffle with PALIGN
5902 def : Pat<(v1i64 (X86PAlign VR64:$src1, VR64:$src2, (i8 imm:$imm))),
5903 (PALIGNR64rr VR64:$src2, VR64:$src1, imm:$imm)>;
5904 def : Pat<(v2i32 (X86PAlign VR64:$src1, VR64:$src2, (i8 imm:$imm))),
5905 (PALIGNR64rr VR64:$src2, VR64:$src1, imm:$imm)>;
5906 def : Pat<(v4i16 (X86PAlign VR64:$src1, VR64:$src2, (i8 imm:$imm))),
5907 (PALIGNR64rr VR64:$src2, VR64:$src1, imm:$imm)>;
5908 def : Pat<(v8i8 (X86PAlign VR64:$src1, VR64:$src2, (i8 imm:$imm))),
5909 (PALIGNR64rr VR64:$src2, VR64:$src1, imm:$imm)>;
5911 def : Pat<(v4i32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5912 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5913 def : Pat<(v4f32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5914 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5915 def : Pat<(v8i16 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5916 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5917 def : Pat<(v16i8 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5918 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5920 // Shuffle with MOVLPS
5921 def : Pat<(v4f32 (X86Movlps VR128:$src1, (load addr:$src2))),
5922 (MOVLPSrm VR128:$src1, addr:$src2)>;
5923 def : Pat<(v4i32 (X86Movlps VR128:$src1, (load addr:$src2))),
5924 (MOVLPSrm VR128:$src1, addr:$src2)>;
5925 def : Pat<(X86Movlps VR128:$src1,
5926 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
5927 (MOVLPSrm VR128:$src1, addr:$src2)>;
5929 // Shuffle with MOVLPD
5930 def : Pat<(v2f64 (X86Movlpd VR128:$src1, (load addr:$src2))),
5931 (MOVLPDrm VR128:$src1, addr:$src2)>;
5932 def : Pat<(v2i64 (X86Movlpd VR128:$src1, (load addr:$src2))),
5933 (MOVLPDrm VR128:$src1, addr:$src2)>;
5934 def : Pat<(v2f64 (X86Movlpd VR128:$src1,
5935 (scalar_to_vector (loadf64 addr:$src2)))),
5936 (MOVLPDrm VR128:$src1, addr:$src2)>;
5938 // Extra patterns to match stores with MOVHPS/PD and MOVLPS/PD
5939 def : Pat<(store (f64 (vector_extract
5940 (v2f64 (X86Unpckhps VR128:$src, (undef))), (iPTR 0))),addr:$dst),
5941 (MOVHPSmr addr:$dst, VR128:$src)>;
5942 def : Pat<(store (f64 (vector_extract
5943 (v2f64 (X86Unpckhpd VR128:$src, (undef))), (iPTR 0))),addr:$dst),
5944 (MOVHPDmr addr:$dst, VR128:$src)>;
5946 def : Pat<(store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)),addr:$src1),
5947 (MOVLPSmr addr:$src1, VR128:$src2)>;
5948 def : Pat<(store (v4i32 (X86Movlps
5949 (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)), addr:$src1),
5950 (MOVLPSmr addr:$src1, VR128:$src2)>;
5952 def : Pat<(store (v2f64 (X86Movlpd (load addr:$src1), VR128:$src2)),addr:$src1),
5953 (MOVLPDmr addr:$src1, VR128:$src2)>;
5954 def : Pat<(store (v2i64 (X86Movlpd (load addr:$src1), VR128:$src2)),addr:$src1),
5955 (MOVLPDmr addr:$src1, VR128:$src2)>;