1 //====- X86InstrSSE.td - Describe the X86 Instruction Set --*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86 SSE instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
17 //===----------------------------------------------------------------------===//
18 // SSE scalar FP Instructions
19 //===----------------------------------------------------------------------===//
21 // CMOV* - Used to implement the SSE SELECT DAG operation. Expanded after
22 // instruction selection into a branch sequence.
23 let Uses = [EFLAGS], usesCustomInserter = 1 in {
24 def CMOV_FR32 : I<0, Pseudo,
25 (outs FR32:$dst), (ins FR32:$t, FR32:$f, i8imm:$cond),
27 [(set FR32:$dst, (X86cmov FR32:$t, FR32:$f, imm:$cond,
29 def CMOV_FR64 : I<0, Pseudo,
30 (outs FR64:$dst), (ins FR64:$t, FR64:$f, i8imm:$cond),
32 [(set FR64:$dst, (X86cmov FR64:$t, FR64:$f, imm:$cond,
34 def CMOV_V4F32 : I<0, Pseudo,
35 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
36 "#CMOV_V4F32 PSEUDO!",
38 (v4f32 (X86cmov VR128:$t, VR128:$f, imm:$cond,
40 def CMOV_V2F64 : I<0, Pseudo,
41 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
42 "#CMOV_V2F64 PSEUDO!",
44 (v2f64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
46 def CMOV_V2I64 : I<0, Pseudo,
47 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
48 "#CMOV_V2I64 PSEUDO!",
50 (v2i64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
54 //===----------------------------------------------------------------------===//
55 // SSE 1 & 2 Instructions Classes
56 //===----------------------------------------------------------------------===//
58 /// sse12_fp_scalar - SSE 1 & 2 scalar instructions class
59 multiclass sse12_fp_scalar<bits<8> opc, string OpcodeStr, SDNode OpNode,
60 RegisterClass RC, X86MemOperand x86memop,
62 let isCommutable = 1 in {
63 def rr : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
65 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
66 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
67 [(set RC:$dst, (OpNode RC:$src1, RC:$src2))]>;
69 def rm : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
71 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
72 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
73 [(set RC:$dst, (OpNode RC:$src1, (load addr:$src2)))]>;
76 /// sse12_fp_scalar_int - SSE 1 & 2 scalar instructions intrinsics class
77 multiclass sse12_fp_scalar_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
78 string asm, string SSEVer, string FPSizeStr,
79 Operand memopr, ComplexPattern mem_cpat,
81 def rr_Int : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
83 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
84 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
85 [(set RC:$dst, (!nameconcat<Intrinsic>("int_x86_sse",
86 !strconcat(SSEVer, !strconcat("_",
87 !strconcat(OpcodeStr, FPSizeStr))))
88 RC:$src1, RC:$src2))]>;
89 def rm_Int : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, memopr:$src2),
91 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
92 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
93 [(set RC:$dst, (!nameconcat<Intrinsic>("int_x86_sse",
94 !strconcat(SSEVer, !strconcat("_",
95 !strconcat(OpcodeStr, FPSizeStr))))
96 RC:$src1, mem_cpat:$src2))]>;
99 /// sse12_fp_packed - SSE 1 & 2 packed instructions class
100 multiclass sse12_fp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
101 RegisterClass RC, ValueType vt,
102 X86MemOperand x86memop, PatFrag mem_frag,
103 Domain d, bit Is2Addr = 1> {
104 let isCommutable = 1 in
105 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
107 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
108 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
109 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], d>;
111 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
113 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
114 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
115 [(set RC:$dst, (OpNode RC:$src1, (mem_frag addr:$src2)))], d>;
118 /// sse12_fp_packed_logical_rm - SSE 1 & 2 packed instructions class
119 multiclass sse12_fp_packed_logical_rm<bits<8> opc, RegisterClass RC, Domain d,
120 string OpcodeStr, X86MemOperand x86memop,
121 list<dag> pat_rr, list<dag> pat_rm,
123 let isCommutable = 1 in
124 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
126 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
127 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
129 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
131 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
132 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
136 /// sse12_fp_packed_int - SSE 1 & 2 packed instructions intrinsics class
137 multiclass sse12_fp_packed_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
138 string asm, string SSEVer, string FPSizeStr,
139 X86MemOperand x86memop, PatFrag mem_frag,
140 Domain d, bit Is2Addr = 1> {
141 def rr_Int : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
143 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
144 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
145 [(set RC:$dst, (!nameconcat<Intrinsic>("int_x86_sse",
146 !strconcat(SSEVer, !strconcat("_",
147 !strconcat(OpcodeStr, FPSizeStr))))
148 RC:$src1, RC:$src2))], d>;
149 def rm_Int : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1,x86memop:$src2),
151 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
152 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
153 [(set RC:$dst, (!nameconcat<Intrinsic>("int_x86_sse",
154 !strconcat(SSEVer, !strconcat("_",
155 !strconcat(OpcodeStr, FPSizeStr))))
156 RC:$src1, (mem_frag addr:$src2)))], d>;
159 //===----------------------------------------------------------------------===//
160 // SSE 1 & 2 - Move Instructions
161 //===----------------------------------------------------------------------===//
163 class sse12_move_rr<RegisterClass RC, ValueType vt, string asm> :
164 SI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, RC:$src2), asm,
165 [(set (vt VR128:$dst), (movl VR128:$src1, (scalar_to_vector RC:$src2)))]>;
167 // Loading from memory automatically zeroing upper bits.
168 class sse12_move_rm<RegisterClass RC, X86MemOperand x86memop,
169 PatFrag mem_pat, string OpcodeStr> :
170 SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
171 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
172 [(set RC:$dst, (mem_pat addr:$src))]>;
174 // Move Instructions. Register-to-register movss/movsd is not used for FR32/64
175 // register copies because it's a partial register update; FsMOVAPSrr/FsMOVAPDrr
176 // is used instead. Register-to-register movss/movsd is not modeled as an
177 // INSERT_SUBREG because INSERT_SUBREG requires that the insert be implementable
178 // in terms of a copy, and just mentioned, we don't use movss/movsd for copies.
179 let isAsmParserOnly = 1 in {
180 def VMOVSSrr : sse12_move_rr<FR32, v4f32,
181 "movss\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XS, VEX_4V;
182 def VMOVSDrr : sse12_move_rr<FR64, v2f64,
183 "movsd\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XD, VEX_4V;
185 let canFoldAsLoad = 1, isReMaterializable = 1 in {
186 def VMOVSSrm : sse12_move_rm<FR32, f32mem, loadf32, "movss">, XS, VEX;
188 let AddedComplexity = 20 in
189 def VMOVSDrm : sse12_move_rm<FR64, f64mem, loadf64, "movsd">, XD, VEX;
193 let Constraints = "$src1 = $dst" in {
194 def MOVSSrr : sse12_move_rr<FR32, v4f32,
195 "movss\t{$src2, $dst|$dst, $src2}">, XS;
196 def MOVSDrr : sse12_move_rr<FR64, v2f64,
197 "movsd\t{$src2, $dst|$dst, $src2}">, XD;
200 let canFoldAsLoad = 1, isReMaterializable = 1 in {
201 def MOVSSrm : sse12_move_rm<FR32, f32mem, loadf32, "movss">, XS;
203 let AddedComplexity = 20 in
204 def MOVSDrm : sse12_move_rm<FR64, f64mem, loadf64, "movsd">, XD;
207 let AddedComplexity = 15 in {
208 // Extract the low 32-bit value from one vector and insert it into another.
209 def : Pat<(v4f32 (movl VR128:$src1, VR128:$src2)),
210 (MOVSSrr (v4f32 VR128:$src1),
211 (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>;
212 // Extract the low 64-bit value from one vector and insert it into another.
213 def : Pat<(v2f64 (movl VR128:$src1, VR128:$src2)),
214 (MOVSDrr (v2f64 VR128:$src1),
215 (EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>;
218 // Implicitly promote a 32-bit scalar to a vector.
219 def : Pat<(v4f32 (scalar_to_vector FR32:$src)),
220 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src, sub_ss)>;
221 // Implicitly promote a 64-bit scalar to a vector.
222 def : Pat<(v2f64 (scalar_to_vector FR64:$src)),
223 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src, sub_sd)>;
225 let AddedComplexity = 20 in {
226 // MOVSSrm zeros the high parts of the register; represent this
227 // with SUBREG_TO_REG.
228 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
229 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
230 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
231 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
232 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
233 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
234 // MOVSDrm zeros the high parts of the register; represent this
235 // with SUBREG_TO_REG.
236 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
237 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
238 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
239 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
240 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
241 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
242 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
243 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
244 def : Pat<(v2f64 (X86vzload addr:$src)),
245 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
248 // Store scalar value to memory.
249 def MOVSSmr : SSI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
250 "movss\t{$src, $dst|$dst, $src}",
251 [(store FR32:$src, addr:$dst)]>;
252 def MOVSDmr : SDI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
253 "movsd\t{$src, $dst|$dst, $src}",
254 [(store FR64:$src, addr:$dst)]>;
256 let isAsmParserOnly = 1 in {
257 def VMOVSSmr : SI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
258 "movss\t{$src, $dst|$dst, $src}",
259 [(store FR32:$src, addr:$dst)]>, XS, VEX;
260 def VMOVSDmr : SI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
261 "movsd\t{$src, $dst|$dst, $src}",
262 [(store FR64:$src, addr:$dst)]>, XD, VEX;
265 // Extract and store.
266 def : Pat<(store (f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
269 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
270 def : Pat<(store (f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
273 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
275 // Move Aligned/Unaligned floating point values
276 multiclass sse12_mov_packed<bits<8> opc, RegisterClass RC,
277 X86MemOperand x86memop, PatFrag ld_frag,
278 string asm, Domain d,
279 bit IsReMaterializable = 1> {
280 let neverHasSideEffects = 1 in
281 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
282 !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], d>;
283 let canFoldAsLoad = 1, isReMaterializable = IsReMaterializable in
284 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
285 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
286 [(set RC:$dst, (ld_frag addr:$src))], d>;
289 let isAsmParserOnly = 1 in {
290 defm VMOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
291 "movaps", SSEPackedSingle>, VEX;
292 defm VMOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
293 "movapd", SSEPackedDouble>, OpSize, VEX;
294 defm VMOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
295 "movups", SSEPackedSingle>, VEX;
296 defm VMOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
297 "movupd", SSEPackedDouble, 0>, OpSize, VEX;
299 defm VMOVAPSY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv8f32,
300 "movaps", SSEPackedSingle>, VEX;
301 defm VMOVAPDY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv4f64,
302 "movapd", SSEPackedDouble>, OpSize, VEX;
303 defm VMOVUPSY : sse12_mov_packed<0x10, VR256, f256mem, loadv8f32,
304 "movups", SSEPackedSingle>, VEX;
305 defm VMOVUPDY : sse12_mov_packed<0x10, VR256, f256mem, loadv4f64,
306 "movupd", SSEPackedDouble, 0>, OpSize, VEX;
308 defm MOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
309 "movaps", SSEPackedSingle>, TB;
310 defm MOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
311 "movapd", SSEPackedDouble>, TB, OpSize;
312 defm MOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
313 "movups", SSEPackedSingle>, TB;
314 defm MOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
315 "movupd", SSEPackedDouble, 0>, TB, OpSize;
317 let isAsmParserOnly = 1 in {
318 def VMOVAPSmr : VPSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
319 "movaps\t{$src, $dst|$dst, $src}",
320 [(alignedstore (v4f32 VR128:$src), addr:$dst)]>, VEX;
321 def VMOVAPDmr : VPDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
322 "movapd\t{$src, $dst|$dst, $src}",
323 [(alignedstore (v2f64 VR128:$src), addr:$dst)]>, VEX;
324 def VMOVUPSmr : VPSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
325 "movups\t{$src, $dst|$dst, $src}",
326 [(store (v4f32 VR128:$src), addr:$dst)]>, VEX;
327 def VMOVUPDmr : VPDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
328 "movupd\t{$src, $dst|$dst, $src}",
329 [(store (v2f64 VR128:$src), addr:$dst)]>, VEX;
330 def VMOVAPSYmr : VPSI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
331 "movaps\t{$src, $dst|$dst, $src}",
332 [(alignedstore (v8f32 VR256:$src), addr:$dst)]>, VEX;
333 def VMOVAPDYmr : VPDI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
334 "movapd\t{$src, $dst|$dst, $src}",
335 [(alignedstore (v4f64 VR256:$src), addr:$dst)]>, VEX;
336 def VMOVUPSYmr : VPSI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
337 "movups\t{$src, $dst|$dst, $src}",
338 [(store (v8f32 VR256:$src), addr:$dst)]>, VEX;
339 def VMOVUPDYmr : VPDI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
340 "movupd\t{$src, $dst|$dst, $src}",
341 [(store (v4f64 VR256:$src), addr:$dst)]>, VEX;
343 def MOVAPSmr : PSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
344 "movaps\t{$src, $dst|$dst, $src}",
345 [(alignedstore (v4f32 VR128:$src), addr:$dst)]>;
346 def MOVAPDmr : PDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
347 "movapd\t{$src, $dst|$dst, $src}",
348 [(alignedstore (v2f64 VR128:$src), addr:$dst)]>;
349 def MOVUPSmr : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
350 "movups\t{$src, $dst|$dst, $src}",
351 [(store (v4f32 VR128:$src), addr:$dst)]>;
352 def MOVUPDmr : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
353 "movupd\t{$src, $dst|$dst, $src}",
354 [(store (v2f64 VR128:$src), addr:$dst)]>;
356 // Intrinsic forms of MOVUPS/D load and store
357 let isAsmParserOnly = 1 in {
358 let canFoldAsLoad = 1, isReMaterializable = 1 in
359 def VMOVUPSrm_Int : VPSI<0x10, MRMSrcMem, (outs VR128:$dst),
361 "movups\t{$src, $dst|$dst, $src}",
362 [(set VR128:$dst, (int_x86_sse_loadu_ps addr:$src))]>, VEX;
363 def VMOVUPDrm_Int : VPDI<0x10, MRMSrcMem, (outs VR128:$dst),
365 "movupd\t{$src, $dst|$dst, $src}",
366 [(set VR128:$dst, (int_x86_sse2_loadu_pd addr:$src))]>, VEX;
367 def VMOVUPSmr_Int : VPSI<0x11, MRMDestMem, (outs),
368 (ins f128mem:$dst, VR128:$src),
369 "movups\t{$src, $dst|$dst, $src}",
370 [(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>, VEX;
371 def VMOVUPDmr_Int : VPDI<0x11, MRMDestMem, (outs),
372 (ins f128mem:$dst, VR128:$src),
373 "movupd\t{$src, $dst|$dst, $src}",
374 [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>, VEX;
376 let canFoldAsLoad = 1, isReMaterializable = 1 in
377 def MOVUPSrm_Int : PSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
378 "movups\t{$src, $dst|$dst, $src}",
379 [(set VR128:$dst, (int_x86_sse_loadu_ps addr:$src))]>;
380 def MOVUPDrm_Int : PDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
381 "movupd\t{$src, $dst|$dst, $src}",
382 [(set VR128:$dst, (int_x86_sse2_loadu_pd addr:$src))]>;
384 def MOVUPSmr_Int : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
385 "movups\t{$src, $dst|$dst, $src}",
386 [(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>;
387 def MOVUPDmr_Int : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
388 "movupd\t{$src, $dst|$dst, $src}",
389 [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>;
391 // Move Low/High packed floating point values
392 multiclass sse12_mov_hilo_packed<bits<8>opc, RegisterClass RC,
393 PatFrag mov_frag, string base_opc,
395 def PSrm : PI<opc, MRMSrcMem,
396 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
397 !strconcat(!strconcat(base_opc,"s"), asm_opr),
400 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))))],
401 SSEPackedSingle>, TB;
403 def PDrm : PI<opc, MRMSrcMem,
404 (outs RC:$dst), (ins RC:$src1, f64mem:$src2),
405 !strconcat(!strconcat(base_opc,"d"), asm_opr),
406 [(set RC:$dst, (v2f64 (mov_frag RC:$src1,
407 (scalar_to_vector (loadf64 addr:$src2)))))],
408 SSEPackedDouble>, TB, OpSize;
411 let isAsmParserOnly = 1, AddedComplexity = 20 in {
412 defm VMOVL : sse12_mov_hilo_packed<0x12, VR128, movlp, "movlp",
413 "\t{$src2, $src1, $dst|$dst, $src1, $src2}">, VEX_4V;
414 defm VMOVH : sse12_mov_hilo_packed<0x16, VR128, movlhps, "movhp",
415 "\t{$src2, $src1, $dst|$dst, $src1, $src2}">, VEX_4V;
417 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
418 defm MOVL : sse12_mov_hilo_packed<0x12, VR128, movlp, "movlp",
419 "\t{$src2, $dst|$dst, $src2}">;
420 defm MOVH : sse12_mov_hilo_packed<0x16, VR128, movlhps, "movhp",
421 "\t{$src2, $dst|$dst, $src2}">;
424 let isAsmParserOnly = 1 in {
425 def VMOVLPSmr : VPSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
426 "movlps\t{$src, $dst|$dst, $src}",
427 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
428 (iPTR 0))), addr:$dst)]>, VEX;
429 def VMOVLPDmr : VPDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
430 "movlpd\t{$src, $dst|$dst, $src}",
431 [(store (f64 (vector_extract (v2f64 VR128:$src),
432 (iPTR 0))), addr:$dst)]>, VEX;
434 def MOVLPSmr : PSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
435 "movlps\t{$src, $dst|$dst, $src}",
436 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
437 (iPTR 0))), addr:$dst)]>;
438 def MOVLPDmr : PDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
439 "movlpd\t{$src, $dst|$dst, $src}",
440 [(store (f64 (vector_extract (v2f64 VR128:$src),
441 (iPTR 0))), addr:$dst)]>;
443 // v2f64 extract element 1 is always custom lowered to unpack high to low
444 // and extract element 0 so the non-store version isn't too horrible.
445 let isAsmParserOnly = 1 in {
446 def VMOVHPSmr : VPSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
447 "movhps\t{$src, $dst|$dst, $src}",
448 [(store (f64 (vector_extract
449 (unpckh (bc_v2f64 (v4f32 VR128:$src)),
450 (undef)), (iPTR 0))), addr:$dst)]>,
452 def VMOVHPDmr : VPDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
453 "movhpd\t{$src, $dst|$dst, $src}",
454 [(store (f64 (vector_extract
455 (v2f64 (unpckh VR128:$src, (undef))),
456 (iPTR 0))), addr:$dst)]>,
459 def MOVHPSmr : PSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
460 "movhps\t{$src, $dst|$dst, $src}",
461 [(store (f64 (vector_extract
462 (unpckh (bc_v2f64 (v4f32 VR128:$src)),
463 (undef)), (iPTR 0))), addr:$dst)]>;
464 def MOVHPDmr : PDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
465 "movhpd\t{$src, $dst|$dst, $src}",
466 [(store (f64 (vector_extract
467 (v2f64 (unpckh VR128:$src, (undef))),
468 (iPTR 0))), addr:$dst)]>;
470 let isAsmParserOnly = 1, AddedComplexity = 20 in {
471 def VMOVLHPSrr : VPSI<0x16, MRMSrcReg, (outs VR128:$dst),
472 (ins VR128:$src1, VR128:$src2),
473 "movlhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
475 (v4f32 (movlhps VR128:$src1, VR128:$src2)))]>,
477 def VMOVHLPSrr : VPSI<0x12, MRMSrcReg, (outs VR128:$dst),
478 (ins VR128:$src1, VR128:$src2),
479 "movhlps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
481 (v4f32 (movhlps VR128:$src1, VR128:$src2)))]>,
484 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
485 def MOVLHPSrr : PSI<0x16, MRMSrcReg, (outs VR128:$dst),
486 (ins VR128:$src1, VR128:$src2),
487 "movlhps\t{$src2, $dst|$dst, $src2}",
489 (v4f32 (movlhps VR128:$src1, VR128:$src2)))]>;
490 def MOVHLPSrr : PSI<0x12, MRMSrcReg, (outs VR128:$dst),
491 (ins VR128:$src1, VR128:$src2),
492 "movhlps\t{$src2, $dst|$dst, $src2}",
494 (v4f32 (movhlps VR128:$src1, VR128:$src2)))]>;
497 def : Pat<(movlhps VR128:$src1, (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
498 (MOVHPSrm (v4i32 VR128:$src1), addr:$src2)>;
499 let AddedComplexity = 20 in {
500 def : Pat<(v4f32 (movddup VR128:$src, (undef))),
501 (MOVLHPSrr (v4f32 VR128:$src), (v4f32 VR128:$src))>;
502 def : Pat<(v2i64 (movddup VR128:$src, (undef))),
503 (MOVLHPSrr (v2i64 VR128:$src), (v2i64 VR128:$src))>;
506 //===----------------------------------------------------------------------===//
507 // SSE 1 & 2 - Conversion Instructions
508 //===----------------------------------------------------------------------===//
510 multiclass sse12_cvt_s<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
511 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
513 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
514 [(set DstRC:$dst, (OpNode SrcRC:$src))]>;
515 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
516 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))]>;
519 multiclass sse12_cvt_s_np<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
520 X86MemOperand x86memop, string asm> {
521 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
523 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
527 multiclass sse12_cvt_p<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
528 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
529 string asm, Domain d> {
530 def rr : PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
531 [(set DstRC:$dst, (OpNode SrcRC:$src))], d>;
532 def rm : PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
533 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))], d>;
536 multiclass sse12_vcvt_avx<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
537 X86MemOperand x86memop, string asm> {
538 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src),
539 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>;
540 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
541 (ins DstRC:$src1, x86memop:$src),
542 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>;
545 let isAsmParserOnly = 1 in {
546 defm VCVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
547 "cvttss2si\t{$src, $dst|$dst, $src}">, XS, VEX;
548 defm VCVTTSS2SIr64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
549 "cvttss2si\t{$src, $dst|$dst, $src}">, XS, VEX,
551 defm VCVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
552 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD, VEX;
553 defm VCVTTSD2SIr64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
554 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD, VEX,
557 // The assembler can recognize rr 64-bit instructions by seeing a rxx
558 // register, but the same isn't true when only using memory operands,
559 // provide other assembly "l" and "q" forms to address this explicitly
560 // where appropriate to do so.
561 defm VCVTSI2SS : sse12_vcvt_avx<0x2A, GR32, FR32, i32mem, "cvtsi2ss">, XS,
563 defm VCVTSI2SSQ : sse12_vcvt_avx<0x2A, GR64, FR32, i64mem, "cvtsi2ssq">, XS,
565 defm VCVTSI2SD : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd">, XD,
567 defm VCVTSI2SDL : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sdl">, XD,
569 defm VCVTSI2SDQ : sse12_vcvt_avx<0x2A, GR64, FR64, i64mem, "cvtsi2sdq">, XD,
573 defm CVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
574 "cvttss2si\t{$src, $dst|$dst, $src}">, XS;
575 defm CVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
576 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD;
577 defm CVTSI2SS : sse12_cvt_s<0x2A, GR32, FR32, sint_to_fp, i32mem, loadi32,
578 "cvtsi2ss\t{$src, $dst|$dst, $src}">, XS;
579 defm CVTSI2SD : sse12_cvt_s<0x2A, GR32, FR64, sint_to_fp, i32mem, loadi32,
580 "cvtsi2sd\t{$src, $dst|$dst, $src}">, XD;
582 // Conversion Instructions Intrinsics - Match intrinsics which expect MM
583 // and/or XMM operand(s).
584 multiclass sse12_cvt_pint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
585 Intrinsic Int, X86MemOperand x86memop, PatFrag ld_frag,
586 string asm, Domain d> {
587 def rr : PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
588 [(set DstRC:$dst, (Int SrcRC:$src))], d>;
589 def rm : PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
590 [(set DstRC:$dst, (Int (ld_frag addr:$src)))], d>;
593 multiclass sse12_cvt_sint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
594 Intrinsic Int, X86MemOperand x86memop, PatFrag ld_frag,
596 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
597 [(set DstRC:$dst, (Int SrcRC:$src))]>;
598 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
599 [(set DstRC:$dst, (Int (ld_frag addr:$src)))]>;
602 multiclass sse12_cvt_pint_3addr<bits<8> opc, RegisterClass SrcRC,
603 RegisterClass DstRC, Intrinsic Int, X86MemOperand x86memop,
604 PatFrag ld_frag, string asm, Domain d> {
605 def rr : PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src2),
606 asm, [(set DstRC:$dst, (Int DstRC:$src1, SrcRC:$src2))], d>;
607 def rm : PI<opc, MRMSrcMem, (outs DstRC:$dst),
608 (ins DstRC:$src1, x86memop:$src2), asm,
609 [(set DstRC:$dst, (Int DstRC:$src1, (ld_frag addr:$src2)))], d>;
612 multiclass sse12_cvt_sint_3addr<bits<8> opc, RegisterClass SrcRC,
613 RegisterClass DstRC, Intrinsic Int, X86MemOperand x86memop,
614 PatFrag ld_frag, string asm> {
615 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src2),
616 asm, [(set DstRC:$dst, (Int DstRC:$src1, SrcRC:$src2))]>;
617 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
618 (ins DstRC:$src1, x86memop:$src2), asm,
619 [(set DstRC:$dst, (Int DstRC:$src1, (ld_frag addr:$src2)))]>;
622 let isAsmParserOnly = 1 in {
623 defm Int_VCVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
624 f32mem, load, "cvtss2si\t{$src, $dst|$dst, $src}">, XS,
626 defm Int_VCVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
627 f128mem, load, "cvtsd2si\t{$src, $dst|$dst, $src}">, XD,
629 // FIXME: The asm matcher has a hack to ignore instructions with _Int and Int_
630 // Get rid of this hack or rename the intrinsics, there are several
631 // intructions that only match with the intrinsic form, why create duplicates
632 // to let them be recognized by the assembler?
633 defm VCVTSD2SI_alt : sse12_cvt_s_np<0x2D, FR64, GR32, f64mem,
634 "cvtsd2si\t{$src, $dst|$dst, $src}">, XD, VEX;
635 defm VCVTSD2SI64 : sse12_cvt_s_np<0x2D, FR64, GR64, f64mem,
636 "cvtsd2si\t{$src, $dst|$dst, $src}">, XD, VEX, VEX_W;
638 defm Int_CVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
639 f32mem, load, "cvtss2si\t{$src, $dst|$dst, $src}">, XS;
640 defm Int_CVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
641 f128mem, load, "cvtsd2si\t{$src, $dst|$dst, $src}">, XD;
644 let Constraints = "$src1 = $dst" in {
645 defm Int_CVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
646 int_x86_sse_cvtsi2ss, i32mem, loadi32,
647 "cvtsi2ss\t{$src2, $dst|$dst, $src2}">, XS;
648 defm Int_CVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
649 int_x86_sse2_cvtsi2sd, i32mem, loadi32,
650 "cvtsi2ss\t{$src2, $dst|$dst, $src2}">, XD;
653 // Instructions below don't have an AVX form.
654 defm Int_CVTPS2PI : sse12_cvt_pint<0x2D, VR128, VR64, int_x86_sse_cvtps2pi,
655 f64mem, load, "cvtps2pi\t{$src, $dst|$dst, $src}",
656 SSEPackedSingle>, TB;
657 defm Int_CVTPD2PI : sse12_cvt_pint<0x2D, VR128, VR64, int_x86_sse_cvtpd2pi,
658 f128mem, memop, "cvtpd2pi\t{$src, $dst|$dst, $src}",
659 SSEPackedDouble>, TB, OpSize;
660 defm Int_CVTTPS2PI : sse12_cvt_pint<0x2C, VR128, VR64, int_x86_sse_cvttps2pi,
661 f64mem, load, "cvttps2pi\t{$src, $dst|$dst, $src}",
662 SSEPackedSingle>, TB;
663 defm Int_CVTTPD2PI : sse12_cvt_pint<0x2C, VR128, VR64, int_x86_sse_cvttpd2pi,
664 f128mem, memop, "cvttpd2pi\t{$src, $dst|$dst, $src}",
665 SSEPackedDouble>, TB, OpSize;
666 defm Int_CVTPI2PD : sse12_cvt_pint<0x2A, VR64, VR128, int_x86_sse_cvtpi2pd,
667 i64mem, load, "cvtpi2pd\t{$src, $dst|$dst, $src}",
668 SSEPackedDouble>, TB, OpSize;
669 let Constraints = "$src1 = $dst" in {
670 defm Int_CVTPI2PS : sse12_cvt_pint_3addr<0x2A, VR64, VR128,
671 int_x86_sse_cvtpi2ps,
672 i64mem, load, "cvtpi2ps\t{$src2, $dst|$dst, $src2}",
673 SSEPackedSingle>, TB;
678 // Aliases for intrinsics
679 let isAsmParserOnly = 1, Pattern = []<dag> in {
680 defm Int_VCVTTSS2SI : sse12_cvt_sint_3addr<0x2C, VR128, GR32,
681 int_x86_sse_cvttss2si, f32mem, load,
682 "cvttss2si\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XS;
683 defm Int_VCVTTSD2SI : sse12_cvt_sint_3addr<0x2C, VR128, GR32,
684 int_x86_sse2_cvttsd2si, f128mem, load,
685 "cvttss2si\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XD;
687 defm Int_CVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
688 f32mem, load, "cvttss2si\t{$src, $dst|$dst, $src}">,
690 defm Int_CVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
691 f128mem, load, "cvttss2si\t{$src, $dst|$dst, $src}">,
694 let isAsmParserOnly = 1, Pattern = []<dag> in {
695 defm VCVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load,
696 "cvtss2si{l}\t{$src, $dst|$dst, $src}">, XS, VEX;
697 defm VCVTSS2SI64 : sse12_cvt_s<0x2D, FR32, GR64, undef, f32mem, load,
698 "cvtss2si\t{$src, $dst|$dst, $src}">, XS, VEX,
700 defm VCVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, f128mem, load,
701 "cvtdq2ps\t{$src, $dst|$dst, $src}",
702 SSEPackedSingle>, TB, VEX;
703 defm VCVTDQ2PSY : sse12_cvt_p<0x5B, VR256, VR256, undef, f256mem, load,
704 "cvtdq2ps\t{$src, $dst|$dst, $src}",
705 SSEPackedSingle>, TB, VEX;
707 let Pattern = []<dag> in {
708 defm CVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load /*dummy*/,
709 "cvtss2si{l}\t{$src, $dst|$dst, $src}">, XS;
710 defm CVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, f128mem, load /*dummy*/,
711 "cvtdq2ps\t{$src, $dst|$dst, $src}",
712 SSEPackedSingle>, TB; /* PD SSE3 form is avaiable */
717 // Convert scalar double to scalar single
718 let isAsmParserOnly = 1 in {
719 def VCVTSD2SSrr : VSDI<0x5A, MRMSrcReg, (outs FR32:$dst),
720 (ins FR64:$src1, FR64:$src2),
721 "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
723 def VCVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst),
724 (ins FR64:$src1, f64mem:$src2),
725 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
726 []>, XD, Requires<[HasAVX, OptForSize]>, VEX_4V;
728 def CVTSD2SSrr : SDI<0x5A, MRMSrcReg, (outs FR32:$dst), (ins FR64:$src),
729 "cvtsd2ss\t{$src, $dst|$dst, $src}",
730 [(set FR32:$dst, (fround FR64:$src))]>;
731 def CVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src),
732 "cvtsd2ss\t{$src, $dst|$dst, $src}",
733 [(set FR32:$dst, (fround (loadf64 addr:$src)))]>, XD,
734 Requires<[HasSSE2, OptForSize]>;
736 let isAsmParserOnly = 1 in
737 defm Int_VCVTSD2SS: sse12_cvt_sint_3addr<0x5A, VR128, VR128,
738 int_x86_sse2_cvtsd2ss, f64mem, load,
739 "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}">,
741 let Constraints = "$src1 = $dst" in
742 defm Int_CVTSD2SS: sse12_cvt_sint_3addr<0x5A, VR128, VR128,
743 int_x86_sse2_cvtsd2ss, f64mem, load,
744 "cvtsd2ss\t{$src2, $dst|$dst, $src2}">, XS;
746 // Convert scalar single to scalar double
747 let isAsmParserOnly = 1 in { // SSE2 instructions with XS prefix
748 def VCVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst),
749 (ins FR32:$src1, FR32:$src2),
750 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
751 []>, XS, Requires<[HasAVX]>, VEX_4V;
752 def VCVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst),
753 (ins FR32:$src1, f32mem:$src2),
754 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
755 []>, XS, VEX_4V, Requires<[HasAVX, OptForSize]>;
757 def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src),
758 "cvtss2sd\t{$src, $dst|$dst, $src}",
759 [(set FR64:$dst, (fextend FR32:$src))]>, XS,
761 def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src),
762 "cvtss2sd\t{$src, $dst|$dst, $src}",
763 [(set FR64:$dst, (extloadf32 addr:$src))]>, XS,
764 Requires<[HasSSE2, OptForSize]>;
766 let isAsmParserOnly = 1 in {
767 def Int_VCVTSS2SDrr: I<0x5A, MRMSrcReg,
768 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
769 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
770 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
771 VR128:$src2))]>, XS, VEX_4V,
773 def Int_VCVTSS2SDrm: I<0x5A, MRMSrcMem,
774 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
775 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
776 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
777 (load addr:$src2)))]>, XS, VEX_4V,
780 let Constraints = "$src1 = $dst" in { // SSE2 instructions with XS prefix
781 def Int_CVTSS2SDrr: I<0x5A, MRMSrcReg,
782 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
783 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
784 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
787 def Int_CVTSS2SDrm: I<0x5A, MRMSrcMem,
788 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
789 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
790 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
791 (load addr:$src2)))]>, XS,
795 def : Pat<(extloadf32 addr:$src),
796 (CVTSS2SDrr (MOVSSrm addr:$src))>,
797 Requires<[HasSSE2, OptForSpeed]>;
799 // Convert doubleword to packed single/double fp
800 let isAsmParserOnly = 1 in { // SSE2 instructions without OpSize prefix
801 def Int_VCVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
802 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
803 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
804 TB, VEX, Requires<[HasAVX]>;
805 def Int_VCVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
806 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
807 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps
808 (bitconvert (memopv2i64 addr:$src))))]>,
809 TB, VEX, Requires<[HasAVX]>;
811 def Int_CVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
812 "cvtdq2ps\t{$src, $dst|$dst, $src}",
813 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
814 TB, Requires<[HasSSE2]>;
815 def Int_CVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
816 "cvtdq2ps\t{$src, $dst|$dst, $src}",
817 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps
818 (bitconvert (memopv2i64 addr:$src))))]>,
819 TB, Requires<[HasSSE2]>;
821 // FIXME: why the non-intrinsic version is described as SSE3?
822 let isAsmParserOnly = 1 in { // SSE2 instructions with XS prefix
823 def Int_VCVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
824 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
825 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
826 XS, VEX, Requires<[HasAVX]>;
827 def Int_VCVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
828 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
829 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd
830 (bitconvert (memopv2i64 addr:$src))))]>,
831 XS, VEX, Requires<[HasAVX]>;
833 def Int_CVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
834 "cvtdq2pd\t{$src, $dst|$dst, $src}",
835 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
836 XS, Requires<[HasSSE2]>;
837 def Int_CVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
838 "cvtdq2pd\t{$src, $dst|$dst, $src}",
839 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd
840 (bitconvert (memopv2i64 addr:$src))))]>,
841 XS, Requires<[HasSSE2]>;
843 // Convert packed single/double fp to doubleword
844 let isAsmParserOnly = 1 in {
845 def VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
846 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
847 def VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
848 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
849 def VCVTPS2DQYrr : VPDI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
850 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
851 def VCVTPS2DQYrm : VPDI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
852 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
854 def CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
855 "cvtps2dq\t{$src, $dst|$dst, $src}", []>;
856 def CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
857 "cvtps2dq\t{$src, $dst|$dst, $src}", []>;
859 let isAsmParserOnly = 1 in {
860 def Int_VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
861 "cvtps2dq\t{$src, $dst|$dst, $src}",
862 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>,
864 def Int_VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst),
866 "cvtps2dq\t{$src, $dst|$dst, $src}",
867 [(set VR128:$dst, (int_x86_sse2_cvtps2dq
868 (memop addr:$src)))]>, VEX;
870 def Int_CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
871 "cvtps2dq\t{$src, $dst|$dst, $src}",
872 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>;
873 def Int_CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
874 "cvtps2dq\t{$src, $dst|$dst, $src}",
875 [(set VR128:$dst, (int_x86_sse2_cvtps2dq
876 (memop addr:$src)))]>;
878 let isAsmParserOnly = 1 in { // SSE2 packed instructions with XD prefix
879 def Int_VCVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
880 "vcvtpd2dq\t{$src, $dst|$dst, $src}",
881 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
882 XD, VEX, Requires<[HasAVX]>;
883 def Int_VCVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
884 "vcvtpd2dq\t{$src, $dst|$dst, $src}",
885 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
886 (memop addr:$src)))]>,
887 XD, VEX, Requires<[HasAVX]>;
889 def Int_CVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
890 "cvtpd2dq\t{$src, $dst|$dst, $src}",
891 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
892 XD, Requires<[HasSSE2]>;
893 def Int_CVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
894 "cvtpd2dq\t{$src, $dst|$dst, $src}",
895 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
896 (memop addr:$src)))]>,
897 XD, Requires<[HasSSE2]>;
900 // Convert with truncation packed single/double fp to doubleword
901 let isAsmParserOnly = 1 in { // SSE2 packed instructions with XS prefix
902 def VCVTTPS2DQrr : VSSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
903 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
904 def VCVTTPS2DQrm : VSSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
905 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
906 def VCVTTPS2DQYrr : VSSI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
907 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
908 def VCVTTPS2DQYrm : VSSI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
909 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
911 def CVTTPS2DQrr : SSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
912 "cvttps2dq\t{$src, $dst|$dst, $src}", []>;
913 def CVTTPS2DQrm : SSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
914 "cvttps2dq\t{$src, $dst|$dst, $src}", []>;
917 let isAsmParserOnly = 1 in {
918 def Int_VCVTTPS2DQrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
919 "vcvttps2dq\t{$src, $dst|$dst, $src}",
921 (int_x86_sse2_cvttps2dq VR128:$src))]>,
922 XS, VEX, Requires<[HasAVX]>;
923 def Int_VCVTTPS2DQrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
924 "vcvttps2dq\t{$src, $dst|$dst, $src}",
925 [(set VR128:$dst, (int_x86_sse2_cvttps2dq
926 (memop addr:$src)))]>,
927 XS, VEX, Requires<[HasAVX]>;
929 def Int_CVTTPS2DQrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
930 "cvttps2dq\t{$src, $dst|$dst, $src}",
932 (int_x86_sse2_cvttps2dq VR128:$src))]>,
933 XS, Requires<[HasSSE2]>;
934 def Int_CVTTPS2DQrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
935 "cvttps2dq\t{$src, $dst|$dst, $src}",
936 [(set VR128:$dst, (int_x86_sse2_cvttps2dq
937 (memop addr:$src)))]>,
938 XS, Requires<[HasSSE2]>;
940 let isAsmParserOnly = 1 in {
941 def Int_VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst),
943 "cvttpd2dq\t{$src, $dst|$dst, $src}",
944 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>,
946 def Int_VCVTTPD2DQrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst),
948 "cvttpd2dq\t{$src, $dst|$dst, $src}",
949 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
950 (memop addr:$src)))]>, VEX;
952 def Int_CVTTPD2DQrr : PDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
953 "cvttpd2dq\t{$src, $dst|$dst, $src}",
954 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>;
955 def Int_CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst),(ins f128mem:$src),
956 "cvttpd2dq\t{$src, $dst|$dst, $src}",
957 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
958 (memop addr:$src)))]>;
960 let isAsmParserOnly = 1 in {
961 // The assembler can recognize rr 256-bit instructions by seeing a ymm
962 // register, but the same isn't true when using memory operands instead.
963 // Provide other assembly rr and rm forms to address this explicitly.
964 def VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
965 "cvttpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
966 def VCVTTPD2DQXrYr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
967 "cvttpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
970 def VCVTTPD2DQXrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
971 "cvttpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
972 def VCVTTPD2DQXrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
973 "cvttpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
976 def VCVTTPD2DQYrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
977 "cvttpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX;
978 def VCVTTPD2DQYrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
979 "cvttpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
982 // Convert packed single to packed double
983 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
984 // SSE2 instructions without OpSize prefix
985 def VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
986 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
987 def VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
988 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
989 def VCVTPS2PDYrr : I<0x5A, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
990 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
991 def VCVTPS2PDYrm : I<0x5A, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
992 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
994 def CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
995 "cvtps2pd\t{$src, $dst|$dst, $src}", []>, TB;
996 def CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
997 "cvtps2pd\t{$src, $dst|$dst, $src}", []>, TB;
999 let isAsmParserOnly = 1 in {
1000 def Int_VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1001 "cvtps2pd\t{$src, $dst|$dst, $src}",
1002 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
1003 VEX, Requires<[HasAVX]>;
1004 def Int_VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1005 "cvtps2pd\t{$src, $dst|$dst, $src}",
1006 [(set VR128:$dst, (int_x86_sse2_cvtps2pd
1007 (load addr:$src)))]>,
1008 VEX, Requires<[HasAVX]>;
1010 def Int_CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1011 "cvtps2pd\t{$src, $dst|$dst, $src}",
1012 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
1013 TB, Requires<[HasSSE2]>;
1014 def Int_CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1015 "cvtps2pd\t{$src, $dst|$dst, $src}",
1016 [(set VR128:$dst, (int_x86_sse2_cvtps2pd
1017 (load addr:$src)))]>,
1018 TB, Requires<[HasSSE2]>;
1020 // Convert packed double to packed single
1021 let isAsmParserOnly = 1 in {
1022 // The assembler can recognize rr 256-bit instructions by seeing a ymm
1023 // register, but the same isn't true when using memory operands instead.
1024 // Provide other assembly rr and rm forms to address this explicitly.
1025 def VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1026 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>, VEX;
1027 def VCVTPD2PSXrYr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1028 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>, VEX;
1031 def VCVTPD2PSXrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1032 "cvtpd2psx\t{$src, $dst|$dst, $src}", []>, VEX;
1033 def VCVTPD2PSXrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1034 "cvtpd2psx\t{$src, $dst|$dst, $src}", []>, VEX;
1037 def VCVTPD2PSYrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1038 "cvtpd2psy\t{$src, $dst|$dst, $src}", []>, VEX;
1039 def VCVTPD2PSYrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
1040 "cvtpd2psy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
1042 def CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1043 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>;
1044 def CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1045 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>;
1048 let isAsmParserOnly = 1 in {
1049 def Int_VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1050 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1051 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
1052 def Int_VCVTPD2PSrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst),
1054 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1055 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps
1056 (memop addr:$src)))]>;
1058 def Int_CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1059 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1060 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
1061 def Int_CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1062 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1063 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps
1064 (memop addr:$src)))]>;
1066 //===----------------------------------------------------------------------===//
1067 // SSE 1 & 2 - Compare Instructions
1068 //===----------------------------------------------------------------------===//
1070 // sse12_cmp_scalar - sse 1 & 2 compare scalar instructions
1071 multiclass sse12_cmp_scalar<RegisterClass RC, X86MemOperand x86memop,
1072 string asm, string asm_alt> {
1073 def rr : SIi8<0xC2, MRMSrcReg,
1074 (outs RC:$dst), (ins RC:$src1, RC:$src, SSECC:$cc),
1077 def rm : SIi8<0xC2, MRMSrcMem,
1078 (outs RC:$dst), (ins RC:$src1, x86memop:$src, SSECC:$cc),
1080 // Accept explicit immediate argument form instead of comparison code.
1081 let isAsmParserOnly = 1 in {
1082 def rr_alt : SIi8<0xC2, MRMSrcReg,
1083 (outs RC:$dst), (ins RC:$src1, RC:$src, i8imm:$src2),
1086 def rm_alt : SIi8<0xC2, MRMSrcMem,
1087 (outs RC:$dst), (ins RC:$src1, x86memop:$src, i8imm:$src2),
1092 let neverHasSideEffects = 1, isAsmParserOnly = 1 in {
1093 defm VCMPSS : sse12_cmp_scalar<FR32, f32mem,
1094 "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}",
1095 "cmpss\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}">,
1097 defm VCMPSD : sse12_cmp_scalar<FR64, f64mem,
1098 "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}",
1099 "cmpsd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}">,
1103 let Constraints = "$src1 = $dst", neverHasSideEffects = 1 in {
1104 defm CMPSS : sse12_cmp_scalar<FR32, f32mem,
1105 "cmp${cc}ss\t{$src, $dst|$dst, $src}",
1106 "cmpss\t{$src2, $src, $dst|$dst, $src, $src2}">, XS;
1107 defm CMPSD : sse12_cmp_scalar<FR64, f64mem,
1108 "cmp${cc}sd\t{$src, $dst|$dst, $src}",
1109 "cmpsd\t{$src2, $src, $dst|$dst, $src, $src2}">, XD;
1112 multiclass sse12_cmp_scalar_int<RegisterClass RC, X86MemOperand x86memop,
1113 Intrinsic Int, string asm> {
1114 def rr : SIi8<0xC2, MRMSrcReg, (outs VR128:$dst),
1115 (ins VR128:$src1, VR128:$src, SSECC:$cc), asm,
1116 [(set VR128:$dst, (Int VR128:$src1,
1117 VR128:$src, imm:$cc))]>;
1118 def rm : SIi8<0xC2, MRMSrcMem, (outs VR128:$dst),
1119 (ins VR128:$src1, f32mem:$src, SSECC:$cc), asm,
1120 [(set VR128:$dst, (Int VR128:$src1,
1121 (load addr:$src), imm:$cc))]>;
1124 // Aliases to match intrinsics which expect XMM operand(s).
1125 let isAsmParserOnly = 1 in {
1126 defm Int_VCMPSS : sse12_cmp_scalar_int<VR128, f32mem, int_x86_sse_cmp_ss,
1127 "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}">,
1129 defm Int_VCMPSD : sse12_cmp_scalar_int<VR128, f64mem, int_x86_sse2_cmp_sd,
1130 "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}">,
1133 let Constraints = "$src1 = $dst" in {
1134 defm Int_CMPSS : sse12_cmp_scalar_int<VR128, f32mem, int_x86_sse_cmp_ss,
1135 "cmp${cc}ss\t{$src, $dst|$dst, $src}">, XS;
1136 defm Int_CMPSD : sse12_cmp_scalar_int<VR128, f64mem, int_x86_sse2_cmp_sd,
1137 "cmp${cc}sd\t{$src, $dst|$dst, $src}">, XD;
1141 // sse12_ord_cmp - Unordered/Ordered scalar fp compare and set EFLAGS
1142 multiclass sse12_ord_cmp<bits<8> opc, RegisterClass RC, SDNode OpNode,
1143 ValueType vt, X86MemOperand x86memop,
1144 PatFrag ld_frag, string OpcodeStr, Domain d> {
1145 def rr: PI<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
1146 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
1147 [(set EFLAGS, (OpNode (vt RC:$src1), RC:$src2))], d>;
1148 def rm: PI<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
1149 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
1150 [(set EFLAGS, (OpNode (vt RC:$src1),
1151 (ld_frag addr:$src2)))], d>;
1154 let Defs = [EFLAGS] in {
1155 let isAsmParserOnly = 1 in {
1156 defm VUCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
1157 "ucomiss", SSEPackedSingle>, VEX;
1158 defm VUCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
1159 "ucomisd", SSEPackedDouble>, OpSize, VEX;
1160 let Pattern = []<dag> in {
1161 defm VCOMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
1162 "comiss", SSEPackedSingle>, VEX;
1163 defm VCOMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
1164 "comisd", SSEPackedDouble>, OpSize, VEX;
1167 defm Int_VUCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
1168 load, "ucomiss", SSEPackedSingle>, VEX;
1169 defm Int_VUCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
1170 load, "ucomisd", SSEPackedDouble>, OpSize, VEX;
1172 defm Int_VCOMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem,
1173 load, "comiss", SSEPackedSingle>, VEX;
1174 defm Int_VCOMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem,
1175 load, "comisd", SSEPackedDouble>, OpSize, VEX;
1177 defm UCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
1178 "ucomiss", SSEPackedSingle>, TB;
1179 defm UCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
1180 "ucomisd", SSEPackedDouble>, TB, OpSize;
1182 let Pattern = []<dag> in {
1183 defm COMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
1184 "comiss", SSEPackedSingle>, TB;
1185 defm COMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
1186 "comisd", SSEPackedDouble>, TB, OpSize;
1189 defm Int_UCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
1190 load, "ucomiss", SSEPackedSingle>, TB;
1191 defm Int_UCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
1192 load, "ucomisd", SSEPackedDouble>, TB, OpSize;
1194 defm Int_COMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem, load,
1195 "comiss", SSEPackedSingle>, TB;
1196 defm Int_COMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem, load,
1197 "comisd", SSEPackedDouble>, TB, OpSize;
1198 } // Defs = [EFLAGS]
1200 // sse12_cmp_packed - sse 1 & 2 compared packed instructions
1201 multiclass sse12_cmp_packed<RegisterClass RC, X86MemOperand x86memop,
1202 Intrinsic Int, string asm, string asm_alt,
1204 def rri : PIi8<0xC2, MRMSrcReg,
1205 (outs RC:$dst), (ins RC:$src1, RC:$src, SSECC:$cc), asm,
1206 [(set RC:$dst, (Int RC:$src1, RC:$src, imm:$cc))], d>;
1207 def rmi : PIi8<0xC2, MRMSrcMem,
1208 (outs RC:$dst), (ins RC:$src1, f128mem:$src, SSECC:$cc), asm,
1209 [(set RC:$dst, (Int RC:$src1, (memop addr:$src), imm:$cc))], d>;
1210 // Accept explicit immediate argument form instead of comparison code.
1211 let isAsmParserOnly = 1 in {
1212 def rri_alt : PIi8<0xC2, MRMSrcReg,
1213 (outs RC:$dst), (ins RC:$src1, RC:$src, i8imm:$src2),
1215 def rmi_alt : PIi8<0xC2, MRMSrcMem,
1216 (outs RC:$dst), (ins RC:$src1, f128mem:$src, i8imm:$src2),
1221 let isAsmParserOnly = 1 in {
1222 defm VCMPPS : sse12_cmp_packed<VR128, f128mem, int_x86_sse_cmp_ps,
1223 "cmp${cc}ps\t{$src, $src1, $dst|$dst, $src1, $src}",
1224 "cmpps\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1225 SSEPackedSingle>, VEX_4V;
1226 defm VCMPPD : sse12_cmp_packed<VR128, f128mem, int_x86_sse2_cmp_pd,
1227 "cmp${cc}pd\t{$src, $src1, $dst|$dst, $src1, $src}",
1228 "cmppd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1229 SSEPackedDouble>, OpSize, VEX_4V;
1230 let Pattern = []<dag> in {
1231 defm VCMPPSY : sse12_cmp_packed<VR256, f256mem, int_x86_sse_cmp_ps,
1232 "cmp${cc}ps\t{$src, $src1, $dst|$dst, $src1, $src}",
1233 "cmpps\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1234 SSEPackedSingle>, VEX_4V;
1235 defm VCMPPDY : sse12_cmp_packed<VR256, f256mem, int_x86_sse2_cmp_pd,
1236 "cmp${cc}pd\t{$src, $src1, $dst|$dst, $src1, $src}",
1237 "cmppd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1238 SSEPackedDouble>, OpSize, VEX_4V;
1241 let Constraints = "$src1 = $dst" in {
1242 defm CMPPS : sse12_cmp_packed<VR128, f128mem, int_x86_sse_cmp_ps,
1243 "cmp${cc}ps\t{$src, $dst|$dst, $src}",
1244 "cmpps\t{$src2, $src, $dst|$dst, $src, $src2}",
1245 SSEPackedSingle>, TB;
1246 defm CMPPD : sse12_cmp_packed<VR128, f128mem, int_x86_sse2_cmp_pd,
1247 "cmp${cc}pd\t{$src, $dst|$dst, $src}",
1248 "cmppd\t{$src2, $src, $dst|$dst, $src, $src2}",
1249 SSEPackedDouble>, TB, OpSize;
1252 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
1253 (CMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
1254 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), (memop addr:$src2), imm:$cc)),
1255 (CMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
1256 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
1257 (CMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
1258 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), (memop addr:$src2), imm:$cc)),
1259 (CMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
1261 //===----------------------------------------------------------------------===//
1262 // SSE 1 & 2 - Shuffle Instructions
1263 //===----------------------------------------------------------------------===//
1265 /// sse12_shuffle - sse 1 & 2 shuffle instructions
1266 multiclass sse12_shuffle<RegisterClass RC, X86MemOperand x86memop,
1267 ValueType vt, string asm, PatFrag mem_frag,
1268 Domain d, bit IsConvertibleToThreeAddress = 0> {
1269 def rmi : PIi8<0xC6, MRMSrcMem, (outs RC:$dst),
1270 (ins RC:$src1, f128mem:$src2, i8imm:$src3), asm,
1271 [(set RC:$dst, (vt (shufp:$src3
1272 RC:$src1, (mem_frag addr:$src2))))], d>;
1273 let isConvertibleToThreeAddress = IsConvertibleToThreeAddress in
1274 def rri : PIi8<0xC6, MRMSrcReg, (outs RC:$dst),
1275 (ins RC:$src1, RC:$src2, i8imm:$src3), asm,
1277 (vt (shufp:$src3 RC:$src1, RC:$src2)))], d>;
1280 let isAsmParserOnly = 1 in {
1281 defm VSHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
1282 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
1283 memopv4f32, SSEPackedSingle>, VEX_4V;
1284 defm VSHUFPSY : sse12_shuffle<VR256, f256mem, v8f32,
1285 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
1286 memopv8f32, SSEPackedSingle>, VEX_4V;
1287 defm VSHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
1288 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src2, $src2, $src3}",
1289 memopv2f64, SSEPackedDouble>, OpSize, VEX_4V;
1290 defm VSHUFPDY : sse12_shuffle<VR256, f256mem, v4f64,
1291 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src2, $src2, $src3}",
1292 memopv4f64, SSEPackedDouble>, OpSize, VEX_4V;
1295 let Constraints = "$src1 = $dst" in {
1296 defm SHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
1297 "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1298 memopv4f32, SSEPackedSingle, 1 /* cvt to pshufd */>,
1300 defm SHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
1301 "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1302 memopv2f64, SSEPackedDouble>, TB, OpSize;
1305 //===----------------------------------------------------------------------===//
1306 // SSE 1 & 2 - Unpack Instructions
1307 //===----------------------------------------------------------------------===//
1309 /// sse12_unpack_interleave - sse 1 & 2 unpack and interleave
1310 multiclass sse12_unpack_interleave<bits<8> opc, PatFrag OpNode, ValueType vt,
1311 PatFrag mem_frag, RegisterClass RC,
1312 X86MemOperand x86memop, string asm,
1314 def rr : PI<opc, MRMSrcReg,
1315 (outs RC:$dst), (ins RC:$src1, RC:$src2),
1317 (vt (OpNode RC:$src1, RC:$src2)))], d>;
1318 def rm : PI<opc, MRMSrcMem,
1319 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
1321 (vt (OpNode RC:$src1,
1322 (mem_frag addr:$src2))))], d>;
1325 let AddedComplexity = 10 in {
1326 let isAsmParserOnly = 1 in {
1327 defm VUNPCKHPS: sse12_unpack_interleave<0x15, unpckh, v4f32, memopv4f32,
1328 VR128, f128mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1329 SSEPackedSingle>, VEX_4V;
1330 defm VUNPCKHPD: sse12_unpack_interleave<0x15, unpckh, v2f64, memopv2f64,
1331 VR128, f128mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1332 SSEPackedDouble>, OpSize, VEX_4V;
1333 defm VUNPCKLPS: sse12_unpack_interleave<0x14, unpckl, v4f32, memopv4f32,
1334 VR128, f128mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1335 SSEPackedSingle>, VEX_4V;
1336 defm VUNPCKLPD: sse12_unpack_interleave<0x14, unpckl, v2f64, memopv2f64,
1337 VR128, f128mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1338 SSEPackedDouble>, OpSize, VEX_4V;
1340 defm VUNPCKHPSY: sse12_unpack_interleave<0x15, unpckh, v8f32, memopv8f32,
1341 VR256, f256mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1342 SSEPackedSingle>, VEX_4V;
1343 defm VUNPCKHPDY: sse12_unpack_interleave<0x15, unpckh, v4f64, memopv4f64,
1344 VR256, f256mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1345 SSEPackedDouble>, OpSize, VEX_4V;
1346 defm VUNPCKLPSY: sse12_unpack_interleave<0x14, unpckl, v8f32, memopv8f32,
1347 VR256, f256mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1348 SSEPackedSingle>, VEX_4V;
1349 defm VUNPCKLPDY: sse12_unpack_interleave<0x14, unpckl, v4f64, memopv4f64,
1350 VR256, f256mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1351 SSEPackedDouble>, OpSize, VEX_4V;
1354 let Constraints = "$src1 = $dst" in {
1355 defm UNPCKHPS: sse12_unpack_interleave<0x15, unpckh, v4f32, memopv4f32,
1356 VR128, f128mem, "unpckhps\t{$src2, $dst|$dst, $src2}",
1357 SSEPackedSingle>, TB;
1358 defm UNPCKHPD: sse12_unpack_interleave<0x15, unpckh, v2f64, memopv2f64,
1359 VR128, f128mem, "unpckhpd\t{$src2, $dst|$dst, $src2}",
1360 SSEPackedDouble>, TB, OpSize;
1361 defm UNPCKLPS: sse12_unpack_interleave<0x14, unpckl, v4f32, memopv4f32,
1362 VR128, f128mem, "unpcklps\t{$src2, $dst|$dst, $src2}",
1363 SSEPackedSingle>, TB;
1364 defm UNPCKLPD: sse12_unpack_interleave<0x14, unpckl, v2f64, memopv2f64,
1365 VR128, f128mem, "unpcklpd\t{$src2, $dst|$dst, $src2}",
1366 SSEPackedDouble>, TB, OpSize;
1367 } // Constraints = "$src1 = $dst"
1368 } // AddedComplexity
1370 //===----------------------------------------------------------------------===//
1371 // SSE 1 & 2 - Extract Floating-Point Sign mask
1372 //===----------------------------------------------------------------------===//
1374 /// sse12_extr_sign_mask - sse 1 & 2 unpack and interleave
1375 multiclass sse12_extr_sign_mask<RegisterClass RC, Intrinsic Int, string asm,
1377 def rr : PI<0x50, MRMSrcReg, (outs GR32:$dst), (ins RC:$src),
1378 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
1379 [(set GR32:$dst, (Int RC:$src))], d>;
1383 defm MOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps, "movmskps",
1384 SSEPackedSingle>, TB;
1385 defm MOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd, "movmskpd",
1386 SSEPackedDouble>, TB, OpSize;
1388 let isAsmParserOnly = 1 in {
1389 defm VMOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps,
1390 "movmskps", SSEPackedSingle>, VEX;
1391 defm VMOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd,
1392 "movmskpd", SSEPackedDouble>, OpSize,
1395 // FIXME: merge with multiclass above when the intrinsics come.
1396 def VMOVMSKPSr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1397 "movmskps\t{$src, $dst|$dst, $src}", [], SSEPackedSingle>, VEX;
1398 def VMOVMSKPDr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1399 "movmskpd\t{$src, $dst|$dst, $src}", [], SSEPackedDouble>, OpSize,
1402 def VMOVMSKPSYrr : PI<0x50, MRMSrcReg, (outs GR32:$dst), (ins VR256:$src),
1403 "movmskps\t{$src, $dst|$dst, $src}", [], SSEPackedSingle>, VEX;
1404 def VMOVMSKPDYrr : PI<0x50, MRMSrcReg, (outs GR32:$dst), (ins VR256:$src),
1405 "movmskpd\t{$src, $dst|$dst, $src}", [], SSEPackedDouble>, OpSize,
1408 def VMOVMSKPSYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
1409 "movmskps\t{$src, $dst|$dst, $src}", [], SSEPackedSingle>, VEX;
1410 def VMOVMSKPDYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
1411 "movmskpd\t{$src, $dst|$dst, $src}", [], SSEPackedDouble>, OpSize,
1415 //===----------------------------------------------------------------------===//
1416 // SSE 1 & 2 - Misc aliasing of packed SSE 1 & 2 instructions
1417 //===----------------------------------------------------------------------===//
1419 // Aliases of packed SSE1 & SSE2 instructions for scalar use. These all have
1420 // names that start with 'Fs'.
1422 // Alias instructions that map fld0 to pxor for sse.
1423 let isReMaterializable = 1, isAsCheapAsAMove = 1, isCodeGenOnly = 1,
1424 canFoldAsLoad = 1 in {
1425 // FIXME: Set encoding to pseudo!
1426 def FsFLD0SS : I<0xEF, MRMInitReg, (outs FR32:$dst), (ins), "",
1427 [(set FR32:$dst, fp32imm0)]>,
1428 Requires<[HasSSE1]>, TB, OpSize;
1429 def FsFLD0SD : I<0xEF, MRMInitReg, (outs FR64:$dst), (ins), "",
1430 [(set FR64:$dst, fpimm0)]>,
1431 Requires<[HasSSE2]>, TB, OpSize;
1434 // Alias instruction to do FR32 or FR64 reg-to-reg copy using movaps. Upper
1435 // bits are disregarded.
1436 let neverHasSideEffects = 1 in {
1437 def FsMOVAPSrr : PSI<0x28, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
1438 "movaps\t{$src, $dst|$dst, $src}", []>;
1439 def FsMOVAPDrr : PDI<0x28, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
1440 "movapd\t{$src, $dst|$dst, $src}", []>;
1443 // Alias instruction to load FR32 or FR64 from f128mem using movaps. Upper
1444 // bits are disregarded.
1445 let canFoldAsLoad = 1, isReMaterializable = 1 in {
1446 def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
1447 "movaps\t{$src, $dst|$dst, $src}",
1448 [(set FR32:$dst, (alignedloadfsf32 addr:$src))]>;
1449 def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
1450 "movapd\t{$src, $dst|$dst, $src}",
1451 [(set FR64:$dst, (alignedloadfsf64 addr:$src))]>;
1454 //===----------------------------------------------------------------------===//
1455 // SSE 1 & 2 - Logical Instructions
1456 //===----------------------------------------------------------------------===//
1458 /// sse12_fp_alias_pack_logical - SSE 1 & 2 aliased packed FP logical ops
1460 multiclass sse12_fp_alias_pack_logical<bits<8> opc, string OpcodeStr,
1462 let isAsmParserOnly = 1 in {
1463 defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
1464 FR32, f32, f128mem, memopfsf32, SSEPackedSingle, 0>, VEX_4V;
1466 defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
1467 FR64, f64, f128mem, memopfsf64, SSEPackedDouble, 0>, OpSize, VEX_4V;
1470 let Constraints = "$src1 = $dst" in {
1471 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, FR32,
1472 f32, f128mem, memopfsf32, SSEPackedSingle>, TB;
1474 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, FR64,
1475 f64, f128mem, memopfsf64, SSEPackedDouble>, TB, OpSize;
1479 // Alias bitwise logical operations using SSE logical ops on packed FP values.
1480 let mayLoad = 0 in {
1481 defm FsAND : sse12_fp_alias_pack_logical<0x54, "and", X86fand>;
1482 defm FsOR : sse12_fp_alias_pack_logical<0x56, "or", X86for>;
1483 defm FsXOR : sse12_fp_alias_pack_logical<0x57, "xor", X86fxor>;
1486 let neverHasSideEffects = 1, Pattern = []<dag>, isCommutable = 0 in
1487 defm FsANDN : sse12_fp_alias_pack_logical<0x55, "andn", undef>;
1489 /// sse12_fp_packed_logical - SSE 1 & 2 packed FP logical ops
1491 multiclass sse12_fp_packed_logical<bits<8> opc, string OpcodeStr,
1492 SDNode OpNode, int HasPat = 0,
1493 list<list<dag>> Pattern = []> {
1494 let isAsmParserOnly = 1, Pattern = []<dag> in {
1495 defm V#NAME#PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
1496 !strconcat(OpcodeStr, "ps"), f128mem,
1497 !if(HasPat, Pattern[0], // rr
1498 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1,
1500 !if(HasPat, Pattern[2], // rm
1501 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
1502 (memopv2i64 addr:$src2)))]), 0>,
1505 defm V#NAME#PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
1506 !strconcat(OpcodeStr, "pd"), f128mem,
1507 !if(HasPat, Pattern[1], // rr
1508 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1511 !if(HasPat, Pattern[3], // rm
1512 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1513 (memopv2i64 addr:$src2)))]), 0>,
1516 let Constraints = "$src1 = $dst" in {
1517 defm PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
1518 !strconcat(OpcodeStr, "ps"), f128mem,
1519 !if(HasPat, Pattern[0], // rr
1520 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1,
1522 !if(HasPat, Pattern[2], // rm
1523 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
1524 (memopv2i64 addr:$src2)))])>, TB;
1526 defm PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
1527 !strconcat(OpcodeStr, "pd"), f128mem,
1528 !if(HasPat, Pattern[1], // rr
1529 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1532 !if(HasPat, Pattern[3], // rm
1533 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1534 (memopv2i64 addr:$src2)))])>,
1539 /// sse12_fp_packed_logical_y - AVX 256-bit SSE 1 & 2 logical ops forms
1541 let isAsmParserOnly = 1 in {
1542 multiclass sse12_fp_packed_logical_y<bits<8> opc, string OpcodeStr> {
1543 defm PSY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedSingle,
1544 !strconcat(OpcodeStr, "ps"), f256mem, [], [], 0>, VEX_4V;
1546 defm PDY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedDouble,
1547 !strconcat(OpcodeStr, "pd"), f256mem, [], [], 0>, OpSize, VEX_4V;
1551 // AVX 256-bit packed logical ops forms
1552 defm VAND : sse12_fp_packed_logical_y<0x54, "and">;
1553 defm VOR : sse12_fp_packed_logical_y<0x56, "or">;
1554 defm VXOR : sse12_fp_packed_logical_y<0x57, "xor">;
1555 let isCommutable = 0 in
1556 defm VANDN : sse12_fp_packed_logical_y<0x55, "andn">;
1558 defm AND : sse12_fp_packed_logical<0x54, "and", and>;
1559 defm OR : sse12_fp_packed_logical<0x56, "or", or>;
1560 defm XOR : sse12_fp_packed_logical<0x57, "xor", xor>;
1561 let isCommutable = 0 in
1562 defm ANDN : sse12_fp_packed_logical<0x55, "andn", undef /* dummy */, 1, [
1564 [(set VR128:$dst, (v2i64 (and (xor VR128:$src1,
1565 (bc_v2i64 (v4i32 immAllOnesV))),
1568 [(set VR128:$dst, (and (vnot (bc_v2i64 (v2f64 VR128:$src1))),
1569 (bc_v2i64 (v2f64 VR128:$src2))))],
1571 [(set VR128:$dst, (v2i64 (and (xor (bc_v2i64 (v4f32 VR128:$src1)),
1572 (bc_v2i64 (v4i32 immAllOnesV))),
1573 (memopv2i64 addr:$src2))))],
1575 [(set VR128:$dst, (and (vnot (bc_v2i64 (v2f64 VR128:$src1))),
1576 (memopv2i64 addr:$src2)))]]>;
1578 //===----------------------------------------------------------------------===//
1579 // SSE 1 & 2 - Arithmetic Instructions
1580 //===----------------------------------------------------------------------===//
1582 /// basic_sse12_fp_binop_xxx - SSE 1 & 2 binops come in both scalar and
1585 /// In addition, we also have a special variant of the scalar form here to
1586 /// represent the associated intrinsic operation. This form is unlike the
1587 /// plain scalar form, in that it takes an entire vector (instead of a scalar)
1588 /// and leaves the top elements unmodified (therefore these cannot be commuted).
1590 /// These three forms can each be reg+reg or reg+mem.
1592 multiclass basic_sse12_fp_binop_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
1594 defm SS : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"),
1595 OpNode, FR32, f32mem, Is2Addr>, XS;
1596 defm SD : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd"),
1597 OpNode, FR64, f64mem, Is2Addr>, XD;
1600 multiclass basic_sse12_fp_binop_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
1602 let mayLoad = 0 in {
1603 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR128,
1604 v4f32, f128mem, memopv4f32, SSEPackedSingle, Is2Addr>, TB;
1605 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR128,
1606 v2f64, f128mem, memopv2f64, SSEPackedDouble, Is2Addr>, TB, OpSize;
1610 multiclass basic_sse12_fp_binop_p_y<bits<8> opc, string OpcodeStr,
1612 let mayLoad = 0 in {
1613 defm PSY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR256,
1614 v8f32, f256mem, memopv8f32, SSEPackedSingle, 0>, TB;
1615 defm PDY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR256,
1616 v4f64, f256mem, memopv4f64, SSEPackedDouble, 0>, TB, OpSize;
1620 multiclass basic_sse12_fp_binop_s_int<bits<8> opc, string OpcodeStr,
1622 defm SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
1623 !strconcat(OpcodeStr, "ss"), "", "_ss", ssmem, sse_load_f32, Is2Addr>, XS;
1624 defm SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
1625 !strconcat(OpcodeStr, "sd"), "2", "_sd", sdmem, sse_load_f64, Is2Addr>, XD;
1628 multiclass basic_sse12_fp_binop_p_int<bits<8> opc, string OpcodeStr,
1630 defm PS : sse12_fp_packed_int<opc, OpcodeStr, VR128,
1631 !strconcat(OpcodeStr, "ps"), "", "_ps", f128mem, memopv4f32,
1632 SSEPackedSingle, Is2Addr>, TB;
1634 defm PD : sse12_fp_packed_int<opc, OpcodeStr, VR128,
1635 !strconcat(OpcodeStr, "pd"), "2", "_pd", f128mem, memopv2f64,
1636 SSEPackedDouble, Is2Addr>, TB, OpSize;
1639 // Binary Arithmetic instructions
1640 let isAsmParserOnly = 1 in {
1641 defm VADD : basic_sse12_fp_binop_s<0x58, "add", fadd, 0>,
1642 basic_sse12_fp_binop_p<0x58, "add", fadd, 0>,
1643 basic_sse12_fp_binop_p_y<0x58, "add", fadd>, VEX_4V;
1644 defm VMUL : basic_sse12_fp_binop_s<0x59, "mul", fmul, 0>,
1645 basic_sse12_fp_binop_p<0x59, "mul", fmul, 0>,
1646 basic_sse12_fp_binop_p_y<0x59, "mul", fmul>, VEX_4V;
1648 let isCommutable = 0 in {
1649 defm VSUB : basic_sse12_fp_binop_s<0x5C, "sub", fsub, 0>,
1650 basic_sse12_fp_binop_p<0x5C, "sub", fsub, 0>,
1651 basic_sse12_fp_binop_p_y<0x5C, "sub", fsub>, VEX_4V;
1652 defm VDIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv, 0>,
1653 basic_sse12_fp_binop_p<0x5E, "div", fdiv, 0>,
1654 basic_sse12_fp_binop_p_y<0x5E, "div", fdiv>, VEX_4V;
1655 defm VMAX : basic_sse12_fp_binop_s<0x5F, "max", X86fmax, 0>,
1656 basic_sse12_fp_binop_p<0x5F, "max", X86fmax, 0>,
1657 basic_sse12_fp_binop_p_y<0x5F, "max", X86fmax>, VEX_4V;
1658 defm VMIN : basic_sse12_fp_binop_s<0x5D, "min", X86fmin, 0>,
1659 basic_sse12_fp_binop_p<0x5D, "min", X86fmin, 0>,
1660 basic_sse12_fp_binop_p_y<0x5D, "min", X86fmin>, VEX_4V;
1664 let Constraints = "$src1 = $dst" in {
1665 defm ADD : basic_sse12_fp_binop_s<0x58, "add", fadd>,
1666 basic_sse12_fp_binop_p<0x58, "add", fadd>,
1667 basic_sse12_fp_binop_s_int<0x58, "add">;
1668 defm MUL : basic_sse12_fp_binop_s<0x59, "mul", fmul>,
1669 basic_sse12_fp_binop_p<0x59, "mul", fmul>,
1670 basic_sse12_fp_binop_s_int<0x59, "mul">;
1672 let isCommutable = 0 in {
1673 defm SUB : basic_sse12_fp_binop_s<0x5C, "sub", fsub>,
1674 basic_sse12_fp_binop_p<0x5C, "sub", fsub>,
1675 basic_sse12_fp_binop_s_int<0x5C, "sub">;
1676 defm DIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv>,
1677 basic_sse12_fp_binop_p<0x5E, "div", fdiv>,
1678 basic_sse12_fp_binop_s_int<0x5E, "div">;
1679 defm MAX : basic_sse12_fp_binop_s<0x5F, "max", X86fmax>,
1680 basic_sse12_fp_binop_p<0x5F, "max", X86fmax>,
1681 basic_sse12_fp_binop_s_int<0x5F, "max">,
1682 basic_sse12_fp_binop_p_int<0x5F, "max">;
1683 defm MIN : basic_sse12_fp_binop_s<0x5D, "min", X86fmin>,
1684 basic_sse12_fp_binop_p<0x5D, "min", X86fmin>,
1685 basic_sse12_fp_binop_s_int<0x5D, "min">,
1686 basic_sse12_fp_binop_p_int<0x5D, "min">;
1691 /// In addition, we also have a special variant of the scalar form here to
1692 /// represent the associated intrinsic operation. This form is unlike the
1693 /// plain scalar form, in that it takes an entire vector (instead of a
1694 /// scalar) and leaves the top elements undefined.
1696 /// And, we have a special variant form for a full-vector intrinsic form.
1698 /// sse1_fp_unop_s - SSE1 unops in scalar form.
1699 multiclass sse1_fp_unop_s<bits<8> opc, string OpcodeStr,
1700 SDNode OpNode, Intrinsic F32Int> {
1701 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
1702 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1703 [(set FR32:$dst, (OpNode FR32:$src))]>;
1704 // For scalar unary operations, fold a load into the operation
1705 // only in OptForSize mode. It eliminates an instruction, but it also
1706 // eliminates a whole-register clobber (the load), so it introduces a
1707 // partial register update condition.
1708 def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
1709 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1710 [(set FR32:$dst, (OpNode (load addr:$src)))]>, XS,
1711 Requires<[HasSSE1, OptForSize]>;
1712 def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1713 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1714 [(set VR128:$dst, (F32Int VR128:$src))]>;
1715 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins ssmem:$src),
1716 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1717 [(set VR128:$dst, (F32Int sse_load_f32:$src))]>;
1720 /// sse1_fp_unop_s_avx - AVX SSE1 unops in scalar form.
1721 multiclass sse1_fp_unop_s_avx<bits<8> opc, string OpcodeStr,
1722 SDNode OpNode, Intrinsic F32Int> {
1723 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
1724 !strconcat(!strconcat("v", OpcodeStr),
1725 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1726 def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f32mem:$src2),
1727 !strconcat(!strconcat("v", OpcodeStr),
1728 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1729 []>, XS, Requires<[HasAVX, OptForSize]>;
1730 def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst),
1731 (ins VR128:$src1, VR128:$src2),
1732 !strconcat(!strconcat("v", OpcodeStr),
1733 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1734 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst),
1735 (ins VR128:$src1, ssmem:$src2),
1736 !strconcat(!strconcat("v", OpcodeStr),
1737 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1740 /// sse1_fp_unop_p - SSE1 unops in packed form.
1741 multiclass sse1_fp_unop_p<bits<8> opc, string OpcodeStr, SDNode OpNode> {
1742 def PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1743 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1744 [(set VR128:$dst, (v4f32 (OpNode VR128:$src)))]>;
1745 def PSm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1746 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1747 [(set VR128:$dst, (OpNode (memopv4f32 addr:$src)))]>;
1750 /// sse1_fp_unop_p_y - AVX 256-bit SSE1 unops in packed form.
1751 multiclass sse1_fp_unop_p_y<bits<8> opc, string OpcodeStr, SDNode OpNode> {
1752 def PSYr : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1753 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1754 [(set VR256:$dst, (v8f32 (OpNode VR256:$src)))]>;
1755 def PSYm : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1756 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1757 [(set VR256:$dst, (OpNode (memopv8f32 addr:$src)))]>;
1760 /// sse1_fp_unop_p_int - SSE1 intrinsics unops in packed forms.
1761 multiclass sse1_fp_unop_p_int<bits<8> opc, string OpcodeStr,
1762 Intrinsic V4F32Int> {
1763 def PSr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1764 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1765 [(set VR128:$dst, (V4F32Int VR128:$src))]>;
1766 def PSm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1767 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1768 [(set VR128:$dst, (V4F32Int (memopv4f32 addr:$src)))]>;
1772 /// sse2_fp_unop_s - SSE2 unops in scalar form.
1773 multiclass sse2_fp_unop_s<bits<8> opc, string OpcodeStr,
1774 SDNode OpNode, Intrinsic F64Int> {
1775 def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
1776 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1777 [(set FR64:$dst, (OpNode FR64:$src))]>;
1778 // See the comments in sse1_fp_unop_s for why this is OptForSize.
1779 def SDm : I<opc, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
1780 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1781 [(set FR64:$dst, (OpNode (load addr:$src)))]>, XD,
1782 Requires<[HasSSE2, OptForSize]>;
1783 def SDr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1784 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1785 [(set VR128:$dst, (F64Int VR128:$src))]>;
1786 def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins sdmem:$src),
1787 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1788 [(set VR128:$dst, (F64Int sse_load_f64:$src))]>;
1791 /// sse2_fp_unop_s_avx - AVX SSE2 unops in scalar form.
1792 multiclass sse2_fp_unop_s_avx<bits<8> opc, string OpcodeStr,
1793 SDNode OpNode, Intrinsic F64Int> {
1794 def SDr : VSDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
1795 !strconcat(OpcodeStr,
1796 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1797 def SDm : VSDI<opc, MRMSrcMem, (outs FR64:$dst),
1798 (ins FR64:$src1, f64mem:$src2),
1799 !strconcat(OpcodeStr,
1800 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1801 def SDr_Int : VSDI<opc, MRMSrcReg, (outs VR128:$dst),
1802 (ins VR128:$src1, VR128:$src2),
1803 !strconcat(OpcodeStr, "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1805 def SDm_Int : VSDI<opc, MRMSrcMem, (outs VR128:$dst),
1806 (ins VR128:$src1, sdmem:$src2),
1807 !strconcat(OpcodeStr, "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1811 /// sse2_fp_unop_p - SSE2 unops in vector forms.
1812 multiclass sse2_fp_unop_p<bits<8> opc, string OpcodeStr,
1814 def PDr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1815 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1816 [(set VR128:$dst, (v2f64 (OpNode VR128:$src)))]>;
1817 def PDm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1818 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1819 [(set VR128:$dst, (OpNode (memopv2f64 addr:$src)))]>;
1822 /// sse2_fp_unop_p_y - AVX SSE2 256-bit unops in vector forms.
1823 multiclass sse2_fp_unop_p_y<bits<8> opc, string OpcodeStr, SDNode OpNode> {
1824 def PDYr : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1825 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1826 [(set VR256:$dst, (v4f64 (OpNode VR256:$src)))]>;
1827 def PDYm : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1828 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1829 [(set VR256:$dst, (OpNode (memopv4f64 addr:$src)))]>;
1832 /// sse2_fp_unop_p_int - SSE2 intrinsic unops in vector forms.
1833 multiclass sse2_fp_unop_p_int<bits<8> opc, string OpcodeStr,
1834 Intrinsic V2F64Int> {
1835 def PDr_Int : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1836 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1837 [(set VR128:$dst, (V2F64Int VR128:$src))]>;
1838 def PDm_Int : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1839 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1840 [(set VR128:$dst, (V2F64Int (memopv2f64 addr:$src)))]>;
1843 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
1845 defm VSQRT : sse1_fp_unop_s_avx<0x51, "sqrt", fsqrt, int_x86_sse_sqrt_ss>,
1846 sse2_fp_unop_s_avx<0x51, "sqrt", fsqrt, int_x86_sse2_sqrt_sd>,
1849 defm VSQRT : sse1_fp_unop_p<0x51, "vsqrt", fsqrt>,
1850 sse2_fp_unop_p<0x51, "vsqrt", fsqrt>,
1851 sse1_fp_unop_p_y<0x51, "vsqrt", fsqrt>,
1852 sse2_fp_unop_p_y<0x51, "vsqrt", fsqrt>,
1855 // Reciprocal approximations. Note that these typically require refinement
1856 // in order to obtain suitable precision.
1857 defm VRSQRT : sse1_fp_unop_s_avx<0x52, "rsqrt", X86frsqrt,
1858 int_x86_sse_rsqrt_ss>, VEX_4V;
1859 defm VRSQRT : sse1_fp_unop_p<0x52, "vrsqrt", X86frsqrt>,
1860 sse1_fp_unop_p_y<0x52, "vrsqrt", X86frsqrt>, VEX;
1862 defm VRCP : sse1_fp_unop_s_avx<0x53, "rcp", X86frcp, int_x86_sse_rcp_ss>,
1864 defm VRCP : sse1_fp_unop_p<0x53, "vrcp", X86frcp>,
1865 sse1_fp_unop_p_y<0x53, "vrcp", X86frcp>, VEX;
1869 defm SQRT : sse1_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse_sqrt_ss>,
1870 sse1_fp_unop_p<0x51, "sqrt", fsqrt>,
1871 sse1_fp_unop_p_int<0x51, "sqrt", int_x86_sse_sqrt_ps>,
1872 sse2_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse2_sqrt_sd>,
1873 sse2_fp_unop_p<0x51, "sqrt", fsqrt>,
1874 sse2_fp_unop_p_int<0x51, "sqrt", int_x86_sse2_sqrt_pd>;
1876 // Reciprocal approximations. Note that these typically require refinement
1877 // in order to obtain suitable precision.
1878 defm RSQRT : sse1_fp_unop_s<0x52, "rsqrt", X86frsqrt, int_x86_sse_rsqrt_ss>,
1879 sse1_fp_unop_p<0x52, "rsqrt", X86frsqrt>,
1880 sse1_fp_unop_p_int<0x52, "rsqrt", int_x86_sse_rsqrt_ps>;
1881 defm RCP : sse1_fp_unop_s<0x53, "rcp", X86frcp, int_x86_sse_rcp_ss>,
1882 sse1_fp_unop_p<0x53, "rcp", X86frcp>,
1883 sse1_fp_unop_p_int<0x53, "rcp", int_x86_sse_rcp_ps>;
1885 // There is no f64 version of the reciprocal approximation instructions.
1887 //===----------------------------------------------------------------------===//
1888 // SSE 1 & 2 - Non-temporal stores
1889 //===----------------------------------------------------------------------===//
1891 let isAsmParserOnly = 1 in {
1892 def VMOVNTPSmr_Int : VPSI<0x2B, MRMDestMem, (outs),
1893 (ins i128mem:$dst, VR128:$src),
1894 "movntps\t{$src, $dst|$dst, $src}",
1895 [(int_x86_sse_movnt_ps addr:$dst, VR128:$src)]>, VEX;
1896 def VMOVNTPDmr_Int : VPDI<0x2B, MRMDestMem, (outs),
1897 (ins i128mem:$dst, VR128:$src),
1898 "movntpd\t{$src, $dst|$dst, $src}",
1899 [(int_x86_sse2_movnt_pd addr:$dst, VR128:$src)]>, VEX;
1901 let ExeDomain = SSEPackedInt in
1902 def VMOVNTDQmr_Int : VPDI<0xE7, MRMDestMem, (outs),
1903 (ins f128mem:$dst, VR128:$src),
1904 "movntdq\t{$src, $dst|$dst, $src}",
1905 [(int_x86_sse2_movnt_dq addr:$dst, VR128:$src)]>, VEX;
1907 let AddedComplexity = 400 in { // Prefer non-temporal versions
1908 def VMOVNTPSmr : VPSI<0x2B, MRMDestMem, (outs),
1909 (ins f128mem:$dst, VR128:$src),
1910 "movntps\t{$src, $dst|$dst, $src}",
1911 [(alignednontemporalstore (v4f32 VR128:$src),
1913 def VMOVNTPDmr : VPDI<0x2B, MRMDestMem, (outs),
1914 (ins f128mem:$dst, VR128:$src),
1915 "movntpd\t{$src, $dst|$dst, $src}",
1916 [(alignednontemporalstore (v2f64 VR128:$src),
1918 def VMOVNTDQ_64mr : VPDI<0xE7, MRMDestMem, (outs),
1919 (ins f128mem:$dst, VR128:$src),
1920 "movntdq\t{$src, $dst|$dst, $src}",
1921 [(alignednontemporalstore (v2f64 VR128:$src),
1923 let ExeDomain = SSEPackedInt in
1924 def VMOVNTDQmr : VPDI<0xE7, MRMDestMem, (outs),
1925 (ins f128mem:$dst, VR128:$src),
1926 "movntdq\t{$src, $dst|$dst, $src}",
1927 [(alignednontemporalstore (v4f32 VR128:$src),
1930 def VMOVNTPSYmr : VPSI<0x2B, MRMDestMem, (outs),
1931 (ins f256mem:$dst, VR256:$src),
1932 "movntps\t{$src, $dst|$dst, $src}",
1933 [(alignednontemporalstore (v8f32 VR256:$src),
1935 def VMOVNTPDYmr : VPDI<0x2B, MRMDestMem, (outs),
1936 (ins f256mem:$dst, VR256:$src),
1937 "movntpd\t{$src, $dst|$dst, $src}",
1938 [(alignednontemporalstore (v4f64 VR256:$src),
1940 def VMOVNTDQY_64mr : VPDI<0xE7, MRMDestMem, (outs),
1941 (ins f256mem:$dst, VR256:$src),
1942 "movntdq\t{$src, $dst|$dst, $src}",
1943 [(alignednontemporalstore (v4f64 VR256:$src),
1945 let ExeDomain = SSEPackedInt in
1946 def VMOVNTDQYmr : VPDI<0xE7, MRMDestMem, (outs),
1947 (ins f256mem:$dst, VR256:$src),
1948 "movntdq\t{$src, $dst|$dst, $src}",
1949 [(alignednontemporalstore (v8f32 VR256:$src),
1954 def MOVNTPSmr_Int : PSI<0x2B, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
1955 "movntps\t{$src, $dst|$dst, $src}",
1956 [(int_x86_sse_movnt_ps addr:$dst, VR128:$src)]>;
1957 def MOVNTPDmr_Int : PDI<0x2B, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
1958 "movntpd\t{$src, $dst|$dst, $src}",
1959 [(int_x86_sse2_movnt_pd addr:$dst, VR128:$src)]>;
1961 let ExeDomain = SSEPackedInt in
1962 def MOVNTDQmr_Int : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
1963 "movntdq\t{$src, $dst|$dst, $src}",
1964 [(int_x86_sse2_movnt_dq addr:$dst, VR128:$src)]>;
1966 let AddedComplexity = 400 in { // Prefer non-temporal versions
1967 def MOVNTPSmr : PSI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
1968 "movntps\t{$src, $dst|$dst, $src}",
1969 [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)]>;
1970 def MOVNTPDmr : PDI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
1971 "movntpd\t{$src, $dst|$dst, $src}",
1972 [(alignednontemporalstore(v2f64 VR128:$src), addr:$dst)]>;
1974 def MOVNTDQ_64mr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
1975 "movntdq\t{$src, $dst|$dst, $src}",
1976 [(alignednontemporalstore (v2f64 VR128:$src), addr:$dst)]>;
1978 let ExeDomain = SSEPackedInt in
1979 def MOVNTDQmr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
1980 "movntdq\t{$src, $dst|$dst, $src}",
1981 [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)]>;
1983 // There is no AVX form for instructions below this point
1984 def MOVNTImr : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
1985 "movnti\t{$src, $dst|$dst, $src}",
1986 [(nontemporalstore (i32 GR32:$src), addr:$dst)]>,
1987 TB, Requires<[HasSSE2]>;
1989 def MOVNTI_64mr : RI<0xC3, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
1990 "movnti\t{$src, $dst|$dst, $src}",
1991 [(nontemporalstore (i64 GR64:$src), addr:$dst)]>,
1992 TB, Requires<[HasSSE2]>;
1995 def MOVNTImr_Int : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
1996 "movnti\t{$src, $dst|$dst, $src}",
1997 [(int_x86_sse2_movnt_i addr:$dst, GR32:$src)]>,
1998 TB, Requires<[HasSSE2]>;
2000 //===----------------------------------------------------------------------===//
2001 // SSE 1 & 2 - Misc Instructions (No AVX form)
2002 //===----------------------------------------------------------------------===//
2004 // Prefetch intrinsic.
2005 def PREFETCHT0 : PSI<0x18, MRM1m, (outs), (ins i8mem:$src),
2006 "prefetcht0\t$src", [(prefetch addr:$src, imm, (i32 3))]>;
2007 def PREFETCHT1 : PSI<0x18, MRM2m, (outs), (ins i8mem:$src),
2008 "prefetcht1\t$src", [(prefetch addr:$src, imm, (i32 2))]>;
2009 def PREFETCHT2 : PSI<0x18, MRM3m, (outs), (ins i8mem:$src),
2010 "prefetcht2\t$src", [(prefetch addr:$src, imm, (i32 1))]>;
2011 def PREFETCHNTA : PSI<0x18, MRM0m, (outs), (ins i8mem:$src),
2012 "prefetchnta\t$src", [(prefetch addr:$src, imm, (i32 0))]>;
2014 // Load, store, and memory fence
2015 def SFENCE : I<0xAE, MRM_F8, (outs), (ins), "sfence", [(int_x86_sse_sfence)]>,
2016 TB, Requires<[HasSSE1]>;
2017 def : Pat<(X86SFence), (SFENCE)>;
2019 // Alias instructions that map zero vector to pxor / xorp* for sse.
2020 // We set canFoldAsLoad because this can be converted to a constant-pool
2021 // load of an all-zeros value if folding it would be beneficial.
2022 // FIXME: Change encoding to pseudo!
2023 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
2024 isCodeGenOnly = 1 in {
2025 def V_SET0PS : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2026 [(set VR128:$dst, (v4f32 immAllZerosV))]>;
2027 def V_SET0PD : PDI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2028 [(set VR128:$dst, (v2f64 immAllZerosV))]>;
2029 let ExeDomain = SSEPackedInt in
2030 def V_SET0PI : PDI<0xEF, MRMInitReg, (outs VR128:$dst), (ins), "",
2031 [(set VR128:$dst, (v4i32 immAllZerosV))]>;
2034 def : Pat<(v2i64 immAllZerosV), (V_SET0PI)>;
2035 def : Pat<(v8i16 immAllZerosV), (V_SET0PI)>;
2036 def : Pat<(v16i8 immAllZerosV), (V_SET0PI)>;
2038 def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
2039 (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
2041 //===----------------------------------------------------------------------===//
2042 // SSE 1 & 2 - Load/Store XCSR register
2043 //===----------------------------------------------------------------------===//
2045 let isAsmParserOnly = 1 in {
2046 def VLDMXCSR : VPSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
2047 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>, VEX;
2048 def VSTMXCSR : VPSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
2049 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>, VEX;
2052 def LDMXCSR : PSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
2053 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>;
2054 def STMXCSR : PSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
2055 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>;
2057 //===---------------------------------------------------------------------===//
2058 // SSE2 - Move Aligned/Unaligned Packed Integer Instructions
2059 //===---------------------------------------------------------------------===//
2061 let ExeDomain = SSEPackedInt in { // SSE integer instructions
2063 let isAsmParserOnly = 1 in {
2064 let neverHasSideEffects = 1 in {
2065 def VMOVDQArr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2066 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2067 def VMOVDQAYrr : VPDI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2068 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2070 def VMOVDQUrr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2071 "movdqu\t{$src, $dst|$dst, $src}", []>, XS, VEX;
2072 def VMOVDQUYrr : VPDI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2073 "movdqu\t{$src, $dst|$dst, $src}", []>, XS, VEX;
2075 let canFoldAsLoad = 1, mayLoad = 1 in {
2076 def VMOVDQArm : VPDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2077 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2078 def VMOVDQAYrm : VPDI<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
2079 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2080 let Predicates = [HasAVX] in {
2081 def VMOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2082 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2083 def VMOVDQUYrm : I<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
2084 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2088 let mayStore = 1 in {
2089 def VMOVDQAmr : VPDI<0x7F, MRMDestMem, (outs),
2090 (ins i128mem:$dst, VR128:$src),
2091 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2092 def VMOVDQAYmr : VPDI<0x7F, MRMDestMem, (outs),
2093 (ins i256mem:$dst, VR256:$src),
2094 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2095 let Predicates = [HasAVX] in {
2096 def VMOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2097 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2098 def VMOVDQUYmr : I<0x7F, MRMDestMem, (outs), (ins i256mem:$dst, VR256:$src),
2099 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2104 let neverHasSideEffects = 1 in
2105 def MOVDQArr : PDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2106 "movdqa\t{$src, $dst|$dst, $src}", []>;
2108 let canFoldAsLoad = 1, mayLoad = 1 in {
2109 def MOVDQArm : PDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2110 "movdqa\t{$src, $dst|$dst, $src}",
2111 [/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/]>;
2112 def MOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2113 "movdqu\t{$src, $dst|$dst, $src}",
2114 [/*(set VR128:$dst, (loadv2i64 addr:$src))*/]>,
2115 XS, Requires<[HasSSE2]>;
2118 let mayStore = 1 in {
2119 def MOVDQAmr : PDI<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2120 "movdqa\t{$src, $dst|$dst, $src}",
2121 [/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/]>;
2122 def MOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2123 "movdqu\t{$src, $dst|$dst, $src}",
2124 [/*(store (v2i64 VR128:$src), addr:$dst)*/]>,
2125 XS, Requires<[HasSSE2]>;
2128 // Intrinsic forms of MOVDQU load and store
2129 let isAsmParserOnly = 1 in {
2130 let canFoldAsLoad = 1 in
2131 def VMOVDQUrm_Int : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2132 "vmovdqu\t{$src, $dst|$dst, $src}",
2133 [(set VR128:$dst, (int_x86_sse2_loadu_dq addr:$src))]>,
2134 XS, VEX, Requires<[HasAVX]>;
2135 def VMOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2136 "vmovdqu\t{$src, $dst|$dst, $src}",
2137 [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
2138 XS, VEX, Requires<[HasAVX]>;
2141 let canFoldAsLoad = 1 in
2142 def MOVDQUrm_Int : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2143 "movdqu\t{$src, $dst|$dst, $src}",
2144 [(set VR128:$dst, (int_x86_sse2_loadu_dq addr:$src))]>,
2145 XS, Requires<[HasSSE2]>;
2146 def MOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2147 "movdqu\t{$src, $dst|$dst, $src}",
2148 [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
2149 XS, Requires<[HasSSE2]>;
2151 } // ExeDomain = SSEPackedInt
2153 //===---------------------------------------------------------------------===//
2154 // SSE2 - Packed Integer Arithmetic Instructions
2155 //===---------------------------------------------------------------------===//
2157 let ExeDomain = SSEPackedInt in { // SSE integer instructions
2159 multiclass PDI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId,
2160 bit IsCommutable = 0, bit Is2Addr = 1> {
2161 let isCommutable = IsCommutable in
2162 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2163 (ins VR128:$src1, VR128:$src2),
2165 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2166 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2167 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
2168 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2169 (ins VR128:$src1, i128mem:$src2),
2171 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2172 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2173 [(set VR128:$dst, (IntId VR128:$src1,
2174 (bitconvert (memopv2i64 addr:$src2))))]>;
2177 multiclass PDI_binop_rmi_int<bits<8> opc, bits<8> opc2, Format ImmForm,
2178 string OpcodeStr, Intrinsic IntId,
2179 Intrinsic IntId2, bit Is2Addr = 1> {
2180 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2181 (ins VR128:$src1, VR128:$src2),
2183 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2184 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2185 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
2186 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2187 (ins VR128:$src1, i128mem:$src2),
2189 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2190 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2191 [(set VR128:$dst, (IntId VR128:$src1,
2192 (bitconvert (memopv2i64 addr:$src2))))]>;
2193 def ri : PDIi8<opc2, ImmForm, (outs VR128:$dst),
2194 (ins VR128:$src1, i32i8imm:$src2),
2196 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2197 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2198 [(set VR128:$dst, (IntId2 VR128:$src1, (i32 imm:$src2)))]>;
2201 /// PDI_binop_rm - Simple SSE2 binary operator.
2202 multiclass PDI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
2203 ValueType OpVT, bit IsCommutable = 0, bit Is2Addr = 1> {
2204 let isCommutable = IsCommutable in
2205 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2206 (ins VR128:$src1, VR128:$src2),
2208 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2209 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2210 [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]>;
2211 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2212 (ins VR128:$src1, i128mem:$src2),
2214 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2215 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2216 [(set VR128:$dst, (OpVT (OpNode VR128:$src1,
2217 (bitconvert (memopv2i64 addr:$src2)))))]>;
2220 /// PDI_binop_rm_v2i64 - Simple SSE2 binary operator whose type is v2i64.
2222 /// FIXME: we could eliminate this and use PDI_binop_rm instead if tblgen knew
2223 /// to collapse (bitconvert VT to VT) into its operand.
2225 multiclass PDI_binop_rm_v2i64<bits<8> opc, string OpcodeStr, SDNode OpNode,
2226 bit IsCommutable = 0, bit Is2Addr = 1> {
2227 let isCommutable = IsCommutable in
2228 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2229 (ins VR128:$src1, VR128:$src2),
2231 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2232 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2233 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))]>;
2234 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2235 (ins VR128:$src1, i128mem:$src2),
2237 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2238 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2239 [(set VR128:$dst, (OpNode VR128:$src1, (memopv2i64 addr:$src2)))]>;
2242 } // ExeDomain = SSEPackedInt
2244 // 128-bit Integer Arithmetic
2246 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
2247 defm VPADDB : PDI_binop_rm<0xFC, "vpaddb", add, v16i8, 1, 0 /*3addr*/>, VEX_4V;
2248 defm VPADDW : PDI_binop_rm<0xFD, "vpaddw", add, v8i16, 1, 0>, VEX_4V;
2249 defm VPADDD : PDI_binop_rm<0xFE, "vpaddd", add, v4i32, 1, 0>, VEX_4V;
2250 defm VPADDQ : PDI_binop_rm_v2i64<0xD4, "vpaddq", add, 1, 0>, VEX_4V;
2251 defm VPMULLW : PDI_binop_rm<0xD5, "vpmullw", mul, v8i16, 1, 0>, VEX_4V;
2252 defm VPSUBB : PDI_binop_rm<0xF8, "vpsubb", sub, v16i8, 0, 0>, VEX_4V;
2253 defm VPSUBW : PDI_binop_rm<0xF9, "vpsubw", sub, v8i16, 0, 0>, VEX_4V;
2254 defm VPSUBD : PDI_binop_rm<0xFA, "vpsubd", sub, v4i32, 0, 0>, VEX_4V;
2255 defm VPSUBQ : PDI_binop_rm_v2i64<0xFB, "vpsubq", sub, 0, 0>, VEX_4V;
2258 defm VPSUBSB : PDI_binop_rm_int<0xE8, "vpsubsb" , int_x86_sse2_psubs_b, 0, 0>,
2260 defm VPSUBSW : PDI_binop_rm_int<0xE9, "vpsubsw" , int_x86_sse2_psubs_w, 0, 0>,
2262 defm VPSUBUSB : PDI_binop_rm_int<0xD8, "vpsubusb", int_x86_sse2_psubus_b, 0, 0>,
2264 defm VPSUBUSW : PDI_binop_rm_int<0xD9, "vpsubusw", int_x86_sse2_psubus_w, 0, 0>,
2266 defm VPADDSB : PDI_binop_rm_int<0xEC, "vpaddsb" , int_x86_sse2_padds_b, 1, 0>,
2268 defm VPADDSW : PDI_binop_rm_int<0xED, "vpaddsw" , int_x86_sse2_padds_w, 1, 0>,
2270 defm VPADDUSB : PDI_binop_rm_int<0xDC, "vpaddusb", int_x86_sse2_paddus_b, 1, 0>,
2272 defm VPADDUSW : PDI_binop_rm_int<0xDD, "vpaddusw", int_x86_sse2_paddus_w, 1, 0>,
2274 defm VPMULHUW : PDI_binop_rm_int<0xE4, "vpmulhuw", int_x86_sse2_pmulhu_w, 1, 0>,
2276 defm VPMULHW : PDI_binop_rm_int<0xE5, "vpmulhw" , int_x86_sse2_pmulh_w, 1, 0>,
2278 defm VPMULUDQ : PDI_binop_rm_int<0xF4, "vpmuludq", int_x86_sse2_pmulu_dq, 1, 0>,
2280 defm VPMADDWD : PDI_binop_rm_int<0xF5, "vpmaddwd", int_x86_sse2_pmadd_wd, 1, 0>,
2282 defm VPAVGB : PDI_binop_rm_int<0xE0, "vpavgb", int_x86_sse2_pavg_b, 1, 0>,
2284 defm VPAVGW : PDI_binop_rm_int<0xE3, "vpavgw", int_x86_sse2_pavg_w, 1, 0>,
2286 defm VPMINUB : PDI_binop_rm_int<0xDA, "vpminub", int_x86_sse2_pminu_b, 1, 0>,
2288 defm VPMINSW : PDI_binop_rm_int<0xEA, "vpminsw", int_x86_sse2_pmins_w, 1, 0>,
2290 defm VPMAXUB : PDI_binop_rm_int<0xDE, "vpmaxub", int_x86_sse2_pmaxu_b, 1, 0>,
2292 defm VPMAXSW : PDI_binop_rm_int<0xEE, "vpmaxsw", int_x86_sse2_pmaxs_w, 1, 0>,
2294 defm VPSADBW : PDI_binop_rm_int<0xF6, "vpsadbw", int_x86_sse2_psad_bw, 1, 0>,
2298 let Constraints = "$src1 = $dst" in {
2299 defm PADDB : PDI_binop_rm<0xFC, "paddb", add, v16i8, 1>;
2300 defm PADDW : PDI_binop_rm<0xFD, "paddw", add, v8i16, 1>;
2301 defm PADDD : PDI_binop_rm<0xFE, "paddd", add, v4i32, 1>;
2302 defm PADDQ : PDI_binop_rm_v2i64<0xD4, "paddq", add, 1>;
2303 defm PMULLW : PDI_binop_rm<0xD5, "pmullw", mul, v8i16, 1>;
2304 defm PSUBB : PDI_binop_rm<0xF8, "psubb", sub, v16i8>;
2305 defm PSUBW : PDI_binop_rm<0xF9, "psubw", sub, v8i16>;
2306 defm PSUBD : PDI_binop_rm<0xFA, "psubd", sub, v4i32>;
2307 defm PSUBQ : PDI_binop_rm_v2i64<0xFB, "psubq", sub>;
2310 defm PSUBSB : PDI_binop_rm_int<0xE8, "psubsb" , int_x86_sse2_psubs_b>;
2311 defm PSUBSW : PDI_binop_rm_int<0xE9, "psubsw" , int_x86_sse2_psubs_w>;
2312 defm PSUBUSB : PDI_binop_rm_int<0xD8, "psubusb", int_x86_sse2_psubus_b>;
2313 defm PSUBUSW : PDI_binop_rm_int<0xD9, "psubusw", int_x86_sse2_psubus_w>;
2314 defm PADDSB : PDI_binop_rm_int<0xEC, "paddsb" , int_x86_sse2_padds_b, 1>;
2315 defm PADDSW : PDI_binop_rm_int<0xED, "paddsw" , int_x86_sse2_padds_w, 1>;
2316 defm PADDUSB : PDI_binop_rm_int<0xDC, "paddusb", int_x86_sse2_paddus_b, 1>;
2317 defm PADDUSW : PDI_binop_rm_int<0xDD, "paddusw", int_x86_sse2_paddus_w, 1>;
2318 defm PMULHUW : PDI_binop_rm_int<0xE4, "pmulhuw", int_x86_sse2_pmulhu_w, 1>;
2319 defm PMULHW : PDI_binop_rm_int<0xE5, "pmulhw" , int_x86_sse2_pmulh_w, 1>;
2320 defm PMULUDQ : PDI_binop_rm_int<0xF4, "pmuludq", int_x86_sse2_pmulu_dq, 1>;
2321 defm PMADDWD : PDI_binop_rm_int<0xF5, "pmaddwd", int_x86_sse2_pmadd_wd, 1>;
2322 defm PAVGB : PDI_binop_rm_int<0xE0, "pavgb", int_x86_sse2_pavg_b, 1>;
2323 defm PAVGW : PDI_binop_rm_int<0xE3, "pavgw", int_x86_sse2_pavg_w, 1>;
2324 defm PMINUB : PDI_binop_rm_int<0xDA, "pminub", int_x86_sse2_pminu_b, 1>;
2325 defm PMINSW : PDI_binop_rm_int<0xEA, "pminsw", int_x86_sse2_pmins_w, 1>;
2326 defm PMAXUB : PDI_binop_rm_int<0xDE, "pmaxub", int_x86_sse2_pmaxu_b, 1>;
2327 defm PMAXSW : PDI_binop_rm_int<0xEE, "pmaxsw", int_x86_sse2_pmaxs_w, 1>;
2328 defm PSADBW : PDI_binop_rm_int<0xF6, "psadbw", int_x86_sse2_psad_bw, 1>;
2330 } // Constraints = "$src1 = $dst"
2332 //===---------------------------------------------------------------------===//
2333 // SSE2 - Packed Integer Logical Instructions
2334 //===---------------------------------------------------------------------===//
2336 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
2337 defm VPSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "vpsllw",
2338 int_x86_sse2_psll_w, int_x86_sse2_pslli_w, 0>,
2340 defm VPSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "vpslld",
2341 int_x86_sse2_psll_d, int_x86_sse2_pslli_d, 0>,
2343 defm VPSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "vpsllq",
2344 int_x86_sse2_psll_q, int_x86_sse2_pslli_q, 0>,
2347 defm VPSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "vpsrlw",
2348 int_x86_sse2_psrl_w, int_x86_sse2_psrli_w, 0>,
2350 defm VPSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "vpsrld",
2351 int_x86_sse2_psrl_d, int_x86_sse2_psrli_d, 0>,
2353 defm VPSRLQ : PDI_binop_rmi_int<0xD3, 0x73, MRM2r, "vpsrlq",
2354 int_x86_sse2_psrl_q, int_x86_sse2_psrli_q, 0>,
2357 defm VPSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "vpsraw",
2358 int_x86_sse2_psra_w, int_x86_sse2_psrai_w, 0>,
2360 defm VPSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "vpsrad",
2361 int_x86_sse2_psra_d, int_x86_sse2_psrai_d, 0>,
2364 defm VPAND : PDI_binop_rm_v2i64<0xDB, "vpand", and, 1, 0>, VEX_4V;
2365 defm VPOR : PDI_binop_rm_v2i64<0xEB, "vpor" , or, 1, 0>, VEX_4V;
2366 defm VPXOR : PDI_binop_rm_v2i64<0xEF, "vpxor", xor, 1, 0>, VEX_4V;
2368 let ExeDomain = SSEPackedInt in {
2369 let neverHasSideEffects = 1 in {
2370 // 128-bit logical shifts.
2371 def VPSLLDQri : PDIi8<0x73, MRM7r,
2372 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2373 "vpslldq\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
2375 def VPSRLDQri : PDIi8<0x73, MRM3r,
2376 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2377 "vpsrldq\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
2379 // PSRADQri doesn't exist in SSE[1-3].
2381 def VPANDNrr : PDI<0xDF, MRMSrcReg,
2382 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2383 "vpandn\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2384 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
2385 VR128:$src2)))]>, VEX_4V;
2387 def VPANDNrm : PDI<0xDF, MRMSrcMem,
2388 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2389 "vpandn\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2390 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
2391 (memopv2i64 addr:$src2))))]>,
2396 let Constraints = "$src1 = $dst" in {
2397 defm PSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "psllw",
2398 int_x86_sse2_psll_w, int_x86_sse2_pslli_w>;
2399 defm PSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "pslld",
2400 int_x86_sse2_psll_d, int_x86_sse2_pslli_d>;
2401 defm PSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "psllq",
2402 int_x86_sse2_psll_q, int_x86_sse2_pslli_q>;
2404 defm PSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "psrlw",
2405 int_x86_sse2_psrl_w, int_x86_sse2_psrli_w>;
2406 defm PSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "psrld",
2407 int_x86_sse2_psrl_d, int_x86_sse2_psrli_d>;
2408 defm PSRLQ : PDI_binop_rmi_int<0xD3, 0x73, MRM2r, "psrlq",
2409 int_x86_sse2_psrl_q, int_x86_sse2_psrli_q>;
2411 defm PSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "psraw",
2412 int_x86_sse2_psra_w, int_x86_sse2_psrai_w>;
2413 defm PSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "psrad",
2414 int_x86_sse2_psra_d, int_x86_sse2_psrai_d>;
2416 defm PAND : PDI_binop_rm_v2i64<0xDB, "pand", and, 1>;
2417 defm POR : PDI_binop_rm_v2i64<0xEB, "por" , or, 1>;
2418 defm PXOR : PDI_binop_rm_v2i64<0xEF, "pxor", xor, 1>;
2420 let ExeDomain = SSEPackedInt in {
2421 let neverHasSideEffects = 1 in {
2422 // 128-bit logical shifts.
2423 def PSLLDQri : PDIi8<0x73, MRM7r,
2424 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2425 "pslldq\t{$src2, $dst|$dst, $src2}", []>;
2426 def PSRLDQri : PDIi8<0x73, MRM3r,
2427 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2428 "psrldq\t{$src2, $dst|$dst, $src2}", []>;
2429 // PSRADQri doesn't exist in SSE[1-3].
2431 def PANDNrr : PDI<0xDF, MRMSrcReg,
2432 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2433 "pandn\t{$src2, $dst|$dst, $src2}",
2434 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
2437 def PANDNrm : PDI<0xDF, MRMSrcMem,
2438 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2439 "pandn\t{$src2, $dst|$dst, $src2}",
2440 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
2441 (memopv2i64 addr:$src2))))]>;
2443 } // Constraints = "$src1 = $dst"
2445 let Predicates = [HasSSE2] in {
2446 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
2447 (v2i64 (PSLLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2448 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
2449 (v2i64 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2450 def : Pat<(int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2),
2451 (v2i64 (PSLLDQri VR128:$src1, imm:$src2))>;
2452 def : Pat<(int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2),
2453 (v2i64 (PSRLDQri VR128:$src1, imm:$src2))>;
2454 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
2455 (v2f64 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2457 // Shift up / down and insert zero's.
2458 def : Pat<(v2i64 (X86vshl VR128:$src, (i8 imm:$amt))),
2459 (v2i64 (PSLLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2460 def : Pat<(v2i64 (X86vshr VR128:$src, (i8 imm:$amt))),
2461 (v2i64 (PSRLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2464 //===---------------------------------------------------------------------===//
2465 // SSE2 - Packed Integer Comparison Instructions
2466 //===---------------------------------------------------------------------===//
2468 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
2469 defm VPCMPEQB : PDI_binop_rm_int<0x74, "vpcmpeqb", int_x86_sse2_pcmpeq_b, 1,
2471 defm VPCMPEQW : PDI_binop_rm_int<0x75, "vpcmpeqw", int_x86_sse2_pcmpeq_w, 1,
2473 defm VPCMPEQD : PDI_binop_rm_int<0x76, "vpcmpeqd", int_x86_sse2_pcmpeq_d, 1,
2475 defm VPCMPGTB : PDI_binop_rm_int<0x64, "vpcmpgtb", int_x86_sse2_pcmpgt_b, 0,
2477 defm VPCMPGTW : PDI_binop_rm_int<0x65, "vpcmpgtw", int_x86_sse2_pcmpgt_w, 0,
2479 defm VPCMPGTD : PDI_binop_rm_int<0x66, "vpcmpgtd", int_x86_sse2_pcmpgt_d, 0,
2483 let Constraints = "$src1 = $dst" in {
2484 defm PCMPEQB : PDI_binop_rm_int<0x74, "pcmpeqb", int_x86_sse2_pcmpeq_b, 1>;
2485 defm PCMPEQW : PDI_binop_rm_int<0x75, "pcmpeqw", int_x86_sse2_pcmpeq_w, 1>;
2486 defm PCMPEQD : PDI_binop_rm_int<0x76, "pcmpeqd", int_x86_sse2_pcmpeq_d, 1>;
2487 defm PCMPGTB : PDI_binop_rm_int<0x64, "pcmpgtb", int_x86_sse2_pcmpgt_b>;
2488 defm PCMPGTW : PDI_binop_rm_int<0x65, "pcmpgtw", int_x86_sse2_pcmpgt_w>;
2489 defm PCMPGTD : PDI_binop_rm_int<0x66, "pcmpgtd", int_x86_sse2_pcmpgt_d>;
2490 } // Constraints = "$src1 = $dst"
2492 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, VR128:$src2)),
2493 (PCMPEQBrr VR128:$src1, VR128:$src2)>;
2494 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, (memop addr:$src2))),
2495 (PCMPEQBrm VR128:$src1, addr:$src2)>;
2496 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, VR128:$src2)),
2497 (PCMPEQWrr VR128:$src1, VR128:$src2)>;
2498 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, (memop addr:$src2))),
2499 (PCMPEQWrm VR128:$src1, addr:$src2)>;
2500 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, VR128:$src2)),
2501 (PCMPEQDrr VR128:$src1, VR128:$src2)>;
2502 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, (memop addr:$src2))),
2503 (PCMPEQDrm VR128:$src1, addr:$src2)>;
2505 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, VR128:$src2)),
2506 (PCMPGTBrr VR128:$src1, VR128:$src2)>;
2507 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, (memop addr:$src2))),
2508 (PCMPGTBrm VR128:$src1, addr:$src2)>;
2509 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, VR128:$src2)),
2510 (PCMPGTWrr VR128:$src1, VR128:$src2)>;
2511 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, (memop addr:$src2))),
2512 (PCMPGTWrm VR128:$src1, addr:$src2)>;
2513 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, VR128:$src2)),
2514 (PCMPGTDrr VR128:$src1, VR128:$src2)>;
2515 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, (memop addr:$src2))),
2516 (PCMPGTDrm VR128:$src1, addr:$src2)>;
2518 //===---------------------------------------------------------------------===//
2519 // SSE2 - Packed Integer Pack Instructions
2520 //===---------------------------------------------------------------------===//
2522 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
2523 defm VPACKSSWB : PDI_binop_rm_int<0x63, "vpacksswb", int_x86_sse2_packsswb_128,
2525 defm VPACKSSDW : PDI_binop_rm_int<0x6B, "vpackssdw", int_x86_sse2_packssdw_128,
2527 defm VPACKUSWB : PDI_binop_rm_int<0x67, "vpackuswb", int_x86_sse2_packuswb_128,
2531 let Constraints = "$src1 = $dst" in {
2532 defm PACKSSWB : PDI_binop_rm_int<0x63, "packsswb", int_x86_sse2_packsswb_128>;
2533 defm PACKSSDW : PDI_binop_rm_int<0x6B, "packssdw", int_x86_sse2_packssdw_128>;
2534 defm PACKUSWB : PDI_binop_rm_int<0x67, "packuswb", int_x86_sse2_packuswb_128>;
2535 } // Constraints = "$src1 = $dst"
2537 //===---------------------------------------------------------------------===//
2538 // SSE2 - Packed Integer Shuffle Instructions
2539 //===---------------------------------------------------------------------===//
2541 let ExeDomain = SSEPackedInt in {
2542 multiclass sse2_pshuffle<string OpcodeStr, ValueType vt, PatFrag pshuf_frag,
2544 def ri : Ii8<0x70, MRMSrcReg,
2545 (outs VR128:$dst), (ins VR128:$src1, i8imm:$src2),
2546 !strconcat(OpcodeStr,
2547 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2548 [(set VR128:$dst, (vt (pshuf_frag:$src2 VR128:$src1,
2550 def mi : Ii8<0x70, MRMSrcMem,
2551 (outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
2552 !strconcat(OpcodeStr,
2553 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2554 [(set VR128:$dst, (vt (pshuf_frag:$src2
2555 (bc_frag (memopv2i64 addr:$src1)),
2558 } // ExeDomain = SSEPackedInt
2560 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
2561 let AddedComplexity = 5 in
2562 defm VPSHUFD : sse2_pshuffle<"vpshufd", v4i32, pshufd, bc_v4i32>, OpSize,
2565 // SSE2 with ImmT == Imm8 and XS prefix.
2566 defm VPSHUFHW : sse2_pshuffle<"vpshufhw", v8i16, pshufhw, bc_v8i16>, XS,
2569 // SSE2 with ImmT == Imm8 and XD prefix.
2570 defm VPSHUFLW : sse2_pshuffle<"vpshuflw", v8i16, pshuflw, bc_v8i16>, XD,
2574 let Predicates = [HasSSE2] in {
2575 let AddedComplexity = 5 in
2576 defm PSHUFD : sse2_pshuffle<"pshufd", v4i32, pshufd, bc_v4i32>, TB, OpSize;
2578 // SSE2 with ImmT == Imm8 and XS prefix.
2579 defm PSHUFHW : sse2_pshuffle<"pshufhw", v8i16, pshufhw, bc_v8i16>, XS;
2581 // SSE2 with ImmT == Imm8 and XD prefix.
2582 defm PSHUFLW : sse2_pshuffle<"pshuflw", v8i16, pshuflw, bc_v8i16>, XD;
2585 //===---------------------------------------------------------------------===//
2586 // SSE2 - Packed Integer Unpack Instructions
2587 //===---------------------------------------------------------------------===//
2589 let ExeDomain = SSEPackedInt in {
2590 multiclass sse2_unpack<bits<8> opc, string OpcodeStr, ValueType vt,
2591 PatFrag unp_frag, PatFrag bc_frag, bit Is2Addr = 1> {
2592 def rr : PDI<opc, MRMSrcReg,
2593 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2595 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
2596 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2597 [(set VR128:$dst, (vt (unp_frag VR128:$src1, VR128:$src2)))]>;
2598 def rm : PDI<opc, MRMSrcMem,
2599 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2601 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
2602 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2603 [(set VR128:$dst, (unp_frag VR128:$src1,
2604 (bc_frag (memopv2i64
2608 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
2609 defm VPUNPCKLBW : sse2_unpack<0x60, "vpunpcklbw", v16i8, unpckl, bc_v16i8,
2611 defm VPUNPCKLWD : sse2_unpack<0x61, "vpunpcklwd", v8i16, unpckl, bc_v8i16,
2613 defm VPUNPCKLDQ : sse2_unpack<0x62, "vpunpckldq", v4i32, unpckl, bc_v4i32,
2616 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2617 /// knew to collapse (bitconvert VT to VT) into its operand.
2618 def VPUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
2619 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2620 "vpunpcklqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2622 (v2i64 (unpckl VR128:$src1, VR128:$src2)))]>, VEX_4V;
2623 def VPUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
2624 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2625 "vpunpcklqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2627 (v2i64 (unpckl VR128:$src1,
2628 (memopv2i64 addr:$src2))))]>, VEX_4V;
2630 defm VPUNPCKHBW : sse2_unpack<0x68, "vpunpckhbw", v16i8, unpckh, bc_v16i8,
2632 defm VPUNPCKHWD : sse2_unpack<0x69, "vpunpckhwd", v8i16, unpckh, bc_v8i16,
2634 defm VPUNPCKHDQ : sse2_unpack<0x6A, "vpunpckhdq", v4i32, unpckh, bc_v4i32,
2637 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2638 /// knew to collapse (bitconvert VT to VT) into its operand.
2639 def VPUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
2640 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2641 "vpunpckhqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2643 (v2i64 (unpckh VR128:$src1, VR128:$src2)))]>, VEX_4V;
2644 def VPUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
2645 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2646 "vpunpckhqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2648 (v2i64 (unpckh VR128:$src1,
2649 (memopv2i64 addr:$src2))))]>, VEX_4V;
2652 let Constraints = "$src1 = $dst" in {
2653 defm PUNPCKLBW : sse2_unpack<0x60, "punpcklbw", v16i8, unpckl, bc_v16i8>;
2654 defm PUNPCKLWD : sse2_unpack<0x61, "punpcklwd", v8i16, unpckl, bc_v8i16>;
2655 defm PUNPCKLDQ : sse2_unpack<0x62, "punpckldq", v4i32, unpckl, bc_v4i32>;
2657 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2658 /// knew to collapse (bitconvert VT to VT) into its operand.
2659 def PUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
2660 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2661 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
2663 (v2i64 (unpckl VR128:$src1, VR128:$src2)))]>;
2664 def PUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
2665 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2666 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
2668 (v2i64 (unpckl VR128:$src1,
2669 (memopv2i64 addr:$src2))))]>;
2671 defm PUNPCKHBW : sse2_unpack<0x68, "punpckhbw", v16i8, unpckh, bc_v16i8>;
2672 defm PUNPCKHWD : sse2_unpack<0x69, "punpckhwd", v8i16, unpckh, bc_v8i16>;
2673 defm PUNPCKHDQ : sse2_unpack<0x6A, "punpckhdq", v4i32, unpckh, bc_v4i32>;
2675 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2676 /// knew to collapse (bitconvert VT to VT) into its operand.
2677 def PUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
2678 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2679 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
2681 (v2i64 (unpckh VR128:$src1, VR128:$src2)))]>;
2682 def PUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
2683 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2684 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
2686 (v2i64 (unpckh VR128:$src1,
2687 (memopv2i64 addr:$src2))))]>;
2690 } // ExeDomain = SSEPackedInt
2692 //===---------------------------------------------------------------------===//
2693 // SSE2 - Packed Integer Extract and Insert
2694 //===---------------------------------------------------------------------===//
2696 let ExeDomain = SSEPackedInt in {
2697 multiclass sse2_pinsrw<bit Is2Addr = 1> {
2698 def rri : Ii8<0xC4, MRMSrcReg,
2699 (outs VR128:$dst), (ins VR128:$src1,
2700 GR32:$src2, i32i8imm:$src3),
2702 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2703 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
2705 (X86pinsrw VR128:$src1, GR32:$src2, imm:$src3))]>;
2706 def rmi : Ii8<0xC4, MRMSrcMem,
2707 (outs VR128:$dst), (ins VR128:$src1,
2708 i16mem:$src2, i32i8imm:$src3),
2710 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2711 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
2713 (X86pinsrw VR128:$src1, (extloadi16 addr:$src2),
2718 let isAsmParserOnly = 1, Predicates = [HasAVX] in
2719 def VPEXTRWri : Ii8<0xC5, MRMSrcReg,
2720 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
2721 "vpextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2722 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
2723 imm:$src2))]>, OpSize, VEX;
2724 def PEXTRWri : PDIi8<0xC5, MRMSrcReg,
2725 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
2726 "pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2727 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
2731 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
2732 defm VPINSRW : sse2_pinsrw<0>, OpSize, VEX_4V;
2733 def VPINSRWrr64i : Ii8<0xC4, MRMSrcReg, (outs VR128:$dst),
2734 (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
2735 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2736 []>, OpSize, VEX_4V;
2739 let Constraints = "$src1 = $dst" in
2740 defm PINSRW : sse2_pinsrw, TB, OpSize, Requires<[HasSSE2]>;
2742 } // ExeDomain = SSEPackedInt
2744 //===---------------------------------------------------------------------===//
2745 // SSE2 - Packed Mask Creation
2746 //===---------------------------------------------------------------------===//
2748 let ExeDomain = SSEPackedInt in {
2750 let isAsmParserOnly = 1 in {
2751 def VPMOVMSKBrr : VPDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
2752 "pmovmskb\t{$src, $dst|$dst, $src}",
2753 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>, VEX;
2754 def VPMOVMSKBr64r : VPDI<0xD7, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
2755 "pmovmskb\t{$src, $dst|$dst, $src}", []>, VEX;
2757 def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
2758 "pmovmskb\t{$src, $dst|$dst, $src}",
2759 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>;
2761 } // ExeDomain = SSEPackedInt
2763 //===---------------------------------------------------------------------===//
2764 // SSE2 - Conditional Store
2765 //===---------------------------------------------------------------------===//
2767 let ExeDomain = SSEPackedInt in {
2769 let isAsmParserOnly = 1 in {
2771 def VMASKMOVDQU : VPDI<0xF7, MRMSrcReg, (outs),
2772 (ins VR128:$src, VR128:$mask),
2773 "maskmovdqu\t{$mask, $src|$src, $mask}",
2774 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>, VEX;
2776 def VMASKMOVDQU64 : VPDI<0xF7, MRMSrcReg, (outs),
2777 (ins VR128:$src, VR128:$mask),
2778 "maskmovdqu\t{$mask, $src|$src, $mask}",
2779 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>, VEX;
2783 def MASKMOVDQU : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
2784 "maskmovdqu\t{$mask, $src|$src, $mask}",
2785 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>;
2787 def MASKMOVDQU64 : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
2788 "maskmovdqu\t{$mask, $src|$src, $mask}",
2789 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>;
2791 } // ExeDomain = SSEPackedInt
2793 //===---------------------------------------------------------------------===//
2794 // SSE2 - Move Doubleword
2795 //===---------------------------------------------------------------------===//
2797 // Move Int Doubleword to Packed Double Int
2798 let isAsmParserOnly = 1 in {
2799 def VMOVDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
2800 "movd\t{$src, $dst|$dst, $src}",
2802 (v4i32 (scalar_to_vector GR32:$src)))]>, VEX;
2803 def VMOVDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
2804 "movd\t{$src, $dst|$dst, $src}",
2806 (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>,
2809 def MOVDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
2810 "movd\t{$src, $dst|$dst, $src}",
2812 (v4i32 (scalar_to_vector GR32:$src)))]>;
2813 def MOVDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
2814 "movd\t{$src, $dst|$dst, $src}",
2816 (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>;
2819 // Move Int Doubleword to Single Scalar
2820 let isAsmParserOnly = 1 in {
2821 def VMOVDI2SSrr : VPDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
2822 "movd\t{$src, $dst|$dst, $src}",
2823 [(set FR32:$dst, (bitconvert GR32:$src))]>, VEX;
2825 def VMOVDI2SSrm : VPDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
2826 "movd\t{$src, $dst|$dst, $src}",
2827 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>,
2830 def MOVDI2SSrr : PDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
2831 "movd\t{$src, $dst|$dst, $src}",
2832 [(set FR32:$dst, (bitconvert GR32:$src))]>;
2834 def MOVDI2SSrm : PDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
2835 "movd\t{$src, $dst|$dst, $src}",
2836 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>;
2838 // Move Packed Doubleword Int to Packed Double Int
2839 let isAsmParserOnly = 1 in {
2840 def VMOVPDI2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
2841 "movd\t{$src, $dst|$dst, $src}",
2842 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
2844 def VMOVPDI2DImr : VPDI<0x7E, MRMDestMem, (outs),
2845 (ins i32mem:$dst, VR128:$src),
2846 "movd\t{$src, $dst|$dst, $src}",
2847 [(store (i32 (vector_extract (v4i32 VR128:$src),
2848 (iPTR 0))), addr:$dst)]>, VEX;
2850 def MOVPDI2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
2851 "movd\t{$src, $dst|$dst, $src}",
2852 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
2854 def MOVPDI2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src),
2855 "movd\t{$src, $dst|$dst, $src}",
2856 [(store (i32 (vector_extract (v4i32 VR128:$src),
2857 (iPTR 0))), addr:$dst)]>;
2859 // Move Scalar Single to Double Int
2860 let isAsmParserOnly = 1 in {
2861 def VMOVSS2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
2862 "movd\t{$src, $dst|$dst, $src}",
2863 [(set GR32:$dst, (bitconvert FR32:$src))]>, VEX;
2864 def VMOVSS2DImr : VPDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
2865 "movd\t{$src, $dst|$dst, $src}",
2866 [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>, VEX;
2868 def MOVSS2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
2869 "movd\t{$src, $dst|$dst, $src}",
2870 [(set GR32:$dst, (bitconvert FR32:$src))]>;
2871 def MOVSS2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
2872 "movd\t{$src, $dst|$dst, $src}",
2873 [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>;
2875 // movd / movq to XMM register zero-extends
2876 let AddedComplexity = 15, isAsmParserOnly = 1 in {
2877 def VMOVZDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
2878 "movd\t{$src, $dst|$dst, $src}",
2879 [(set VR128:$dst, (v4i32 (X86vzmovl
2880 (v4i32 (scalar_to_vector GR32:$src)))))]>,
2882 def VMOVZQI2PQIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
2883 "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
2884 [(set VR128:$dst, (v2i64 (X86vzmovl
2885 (v2i64 (scalar_to_vector GR64:$src)))))]>,
2888 let AddedComplexity = 15 in {
2889 def MOVZDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
2890 "movd\t{$src, $dst|$dst, $src}",
2891 [(set VR128:$dst, (v4i32 (X86vzmovl
2892 (v4i32 (scalar_to_vector GR32:$src)))))]>;
2893 def MOVZQI2PQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
2894 "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
2895 [(set VR128:$dst, (v2i64 (X86vzmovl
2896 (v2i64 (scalar_to_vector GR64:$src)))))]>;
2899 let AddedComplexity = 20 in {
2900 let isAsmParserOnly = 1 in
2901 def VMOVZDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
2902 "movd\t{$src, $dst|$dst, $src}",
2904 (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
2905 (loadi32 addr:$src))))))]>,
2907 def MOVZDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
2908 "movd\t{$src, $dst|$dst, $src}",
2910 (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
2911 (loadi32 addr:$src))))))]>;
2913 def : Pat<(v4i32 (X86vzmovl (loadv4i32 addr:$src))),
2914 (MOVZDI2PDIrm addr:$src)>;
2915 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
2916 (MOVZDI2PDIrm addr:$src)>;
2917 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
2918 (MOVZDI2PDIrm addr:$src)>;
2921 //===---------------------------------------------------------------------===//
2922 // SSE2 - Move Quadword
2923 //===---------------------------------------------------------------------===//
2925 // Move Quadword Int to Packed Quadword Int
2926 let isAsmParserOnly = 1 in
2927 def VMOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
2928 "vmovq\t{$src, $dst|$dst, $src}",
2930 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
2931 VEX, Requires<[HasAVX]>;
2932 def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
2933 "movq\t{$src, $dst|$dst, $src}",
2935 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
2936 Requires<[HasSSE2]>; // SSE2 instruction with XS Prefix
2938 // Move Packed Quadword Int to Quadword Int
2939 let isAsmParserOnly = 1 in
2940 def VMOVPQI2QImr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
2941 "movq\t{$src, $dst|$dst, $src}",
2942 [(store (i64 (vector_extract (v2i64 VR128:$src),
2943 (iPTR 0))), addr:$dst)]>, VEX;
2944 def MOVPQI2QImr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
2945 "movq\t{$src, $dst|$dst, $src}",
2946 [(store (i64 (vector_extract (v2i64 VR128:$src),
2947 (iPTR 0))), addr:$dst)]>;
2949 def : Pat<(f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
2950 (f64 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
2952 // Store / copy lower 64-bits of a XMM register.
2953 let isAsmParserOnly = 1 in
2954 def VMOVLQ128mr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
2955 "movq\t{$src, $dst|$dst, $src}",
2956 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>, VEX;
2957 def MOVLQ128mr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
2958 "movq\t{$src, $dst|$dst, $src}",
2959 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>;
2961 let AddedComplexity = 20, isAsmParserOnly = 1 in
2962 def VMOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
2963 "vmovq\t{$src, $dst|$dst, $src}",
2965 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
2966 (loadi64 addr:$src))))))]>,
2967 XS, VEX, Requires<[HasAVX]>;
2969 let AddedComplexity = 20 in {
2970 def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
2971 "movq\t{$src, $dst|$dst, $src}",
2973 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
2974 (loadi64 addr:$src))))))]>,
2975 XS, Requires<[HasSSE2]>;
2977 def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
2978 (MOVZQI2PQIrm addr:$src)>;
2979 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4f32 addr:$src)))),
2980 (MOVZQI2PQIrm addr:$src)>;
2981 def : Pat<(v2i64 (X86vzload addr:$src)), (MOVZQI2PQIrm addr:$src)>;
2984 // Moving from XMM to XMM and clear upper 64 bits. Note, there is a bug in
2985 // IA32 document. movq xmm1, xmm2 does clear the high bits.
2986 let isAsmParserOnly = 1, AddedComplexity = 15 in
2987 def VMOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2988 "vmovq\t{$src, $dst|$dst, $src}",
2989 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
2990 XS, VEX, Requires<[HasAVX]>;
2991 let AddedComplexity = 15 in
2992 def MOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2993 "movq\t{$src, $dst|$dst, $src}",
2994 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
2995 XS, Requires<[HasSSE2]>;
2997 let AddedComplexity = 20, isAsmParserOnly = 1 in
2998 def VMOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2999 "vmovq\t{$src, $dst|$dst, $src}",
3000 [(set VR128:$dst, (v2i64 (X86vzmovl
3001 (loadv2i64 addr:$src))))]>,
3002 XS, VEX, Requires<[HasAVX]>;
3003 let AddedComplexity = 20 in {
3004 def MOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3005 "movq\t{$src, $dst|$dst, $src}",
3006 [(set VR128:$dst, (v2i64 (X86vzmovl
3007 (loadv2i64 addr:$src))))]>,
3008 XS, Requires<[HasSSE2]>;
3010 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4i32 addr:$src)))),
3011 (MOVZPQILo2PQIrm addr:$src)>;
3014 // Instructions to match in the assembler
3015 let isAsmParserOnly = 1 in {
3016 def VMOVQs64rr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3017 "movq\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
3018 def VMOVQd64rr : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
3019 "movq\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
3020 // Recognize "movd" with GR64 destination, but encode as a "movq"
3021 def VMOVQd64rr_alt : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
3022 "movd\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
3025 // Instructions for the disassembler
3026 // xr = XMM register
3029 let isAsmParserOnly = 1, Predicates = [HasAVX] in
3030 def VMOVQxrxr: I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3031 "vmovq\t{$src, $dst|$dst, $src}", []>, VEX, XS;
3032 def MOVQxrxr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3033 "movq\t{$src, $dst|$dst, $src}", []>, XS;
3035 //===---------------------------------------------------------------------===//
3036 // SSE2 - Misc Instructions
3037 //===---------------------------------------------------------------------===//
3040 def CLFLUSH : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
3041 "clflush\t$src", [(int_x86_sse2_clflush addr:$src)]>,
3042 TB, Requires<[HasSSE2]>;
3044 // Load, store, and memory fence
3045 def LFENCE : I<0xAE, MRM_E8, (outs), (ins),
3046 "lfence", [(int_x86_sse2_lfence)]>, TB, Requires<[HasSSE2]>;
3047 def MFENCE : I<0xAE, MRM_F0, (outs), (ins),
3048 "mfence", [(int_x86_sse2_mfence)]>, TB, Requires<[HasSSE2]>;
3049 def : Pat<(X86LFence), (LFENCE)>;
3050 def : Pat<(X86MFence), (MFENCE)>;
3053 // Pause. This "instruction" is encoded as "rep; nop", so even though it
3054 // was introduced with SSE2, it's backward compatible.
3055 def PAUSE : I<0x90, RawFrm, (outs), (ins), "pause", []>, REP;
3057 // Alias instructions that map zero vector to pxor / xorp* for sse.
3058 // We set canFoldAsLoad because this can be converted to a constant-pool
3059 // load of an all-ones value if folding it would be beneficial.
3060 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
3061 isCodeGenOnly = 1, ExeDomain = SSEPackedInt in
3062 // FIXME: Change encoding to pseudo.
3063 def V_SETALLONES : PDI<0x76, MRMInitReg, (outs VR128:$dst), (ins), "",
3064 [(set VR128:$dst, (v4i32 immAllOnesV))]>;
3066 //===---------------------------------------------------------------------===//
3067 // SSE3 - Conversion Instructions
3068 //===---------------------------------------------------------------------===//
3070 // Convert Packed Double FP to Packed DW Integers
3071 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3072 // The assembler can recognize rr 256-bit instructions by seeing a ymm
3073 // register, but the same isn't true when using memory operands instead.
3074 // Provide other assembly rr and rm forms to address this explicitly.
3075 def VCVTPD2DQrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3076 "vcvtpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
3077 def VCVTPD2DQXrYr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
3078 "vcvtpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
3081 def VCVTPD2DQXrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3082 "vcvtpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
3083 def VCVTPD2DQXrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3084 "vcvtpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
3087 def VCVTPD2DQYrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
3088 "vcvtpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX;
3089 def VCVTPD2DQYrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
3090 "vcvtpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
3093 def CVTPD2DQrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3094 "cvtpd2dq\t{$src, $dst|$dst, $src}", []>;
3095 def CVTPD2DQrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3096 "cvtpd2dq\t{$src, $dst|$dst, $src}", []>;
3098 // Convert Packed DW Integers to Packed Double FP
3099 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3100 def VCVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3101 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3102 def VCVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3103 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3104 def VCVTDQ2PDYrm : S3SI<0xE6, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
3105 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3106 def VCVTDQ2PDYrr : S3SI<0xE6, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
3107 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3110 def CVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3111 "cvtdq2pd\t{$src, $dst|$dst, $src}", []>;
3112 def CVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3113 "cvtdq2pd\t{$src, $dst|$dst, $src}", []>;
3115 //===---------------------------------------------------------------------===//
3116 // SSE3 - Move Instructions
3117 //===---------------------------------------------------------------------===//
3119 // Replicate Single FP
3120 multiclass sse3_replicate_sfp<bits<8> op, PatFrag rep_frag, string OpcodeStr> {
3121 def rr : S3SI<op, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3122 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3123 [(set VR128:$dst, (v4f32 (rep_frag
3124 VR128:$src, (undef))))]>;
3125 def rm : S3SI<op, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3126 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3127 [(set VR128:$dst, (rep_frag
3128 (memopv4f32 addr:$src), (undef)))]>;
3131 multiclass sse3_replicate_sfp_y<bits<8> op, PatFrag rep_frag,
3133 def rr : S3SI<op, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3134 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>;
3135 def rm : S3SI<op, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
3136 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>;
3139 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3140 // FIXME: Merge above classes when we have patterns for the ymm version
3141 defm VMOVSHDUP : sse3_replicate_sfp<0x16, movshdup, "vmovshdup">, VEX;
3142 defm VMOVSLDUP : sse3_replicate_sfp<0x12, movsldup, "vmovsldup">, VEX;
3143 defm VMOVSHDUPY : sse3_replicate_sfp_y<0x16, movshdup, "vmovshdup">, VEX;
3144 defm VMOVSLDUPY : sse3_replicate_sfp_y<0x12, movsldup, "vmovsldup">, VEX;
3146 defm MOVSHDUP : sse3_replicate_sfp<0x16, movshdup, "movshdup">;
3147 defm MOVSLDUP : sse3_replicate_sfp<0x12, movsldup, "movsldup">;
3149 // Replicate Double FP
3150 multiclass sse3_replicate_dfp<string OpcodeStr> {
3151 def rr : S3DI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3152 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3153 [(set VR128:$dst,(v2f64 (movddup VR128:$src, (undef))))]>;
3154 def rm : S3DI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
3155 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3157 (v2f64 (movddup (scalar_to_vector (loadf64 addr:$src)),
3161 multiclass sse3_replicate_dfp_y<string OpcodeStr> {
3162 def rr : S3DI<0x12, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3163 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3165 def rm : S3DI<0x12, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
3166 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3170 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3171 // FIXME: Merge above classes when we have patterns for the ymm version
3172 defm VMOVDDUP : sse3_replicate_dfp<"vmovddup">, VEX;
3173 defm VMOVDDUPY : sse3_replicate_dfp_y<"vmovddup">, VEX;
3175 defm MOVDDUP : sse3_replicate_dfp<"movddup">;
3177 // Move Unaligned Integer
3178 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3179 def VLDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3180 "vlddqu\t{$src, $dst|$dst, $src}",
3181 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>, VEX;
3182 def VLDDQUYrm : S3DI<0xF0, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
3183 "vlddqu\t{$src, $dst|$dst, $src}", []>, VEX;
3185 def LDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3186 "lddqu\t{$src, $dst|$dst, $src}",
3187 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>;
3189 def : Pat<(movddup (bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src)))),
3191 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3193 // Several Move patterns
3194 let AddedComplexity = 5 in {
3195 def : Pat<(movddup (memopv2f64 addr:$src), (undef)),
3196 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3197 def : Pat<(movddup (bc_v4f32 (memopv2f64 addr:$src)), (undef)),
3198 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3199 def : Pat<(movddup (memopv2i64 addr:$src), (undef)),
3200 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3201 def : Pat<(movddup (bc_v4i32 (memopv2i64 addr:$src)), (undef)),
3202 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3205 // vector_shuffle v1, <undef> <1, 1, 3, 3>
3206 let AddedComplexity = 15 in
3207 def : Pat<(v4i32 (movshdup VR128:$src, (undef))),
3208 (MOVSHDUPrr VR128:$src)>, Requires<[HasSSE3]>;
3209 let AddedComplexity = 20 in
3210 def : Pat<(v4i32 (movshdup (bc_v4i32 (memopv2i64 addr:$src)), (undef))),
3211 (MOVSHDUPrm addr:$src)>, Requires<[HasSSE3]>;
3213 // vector_shuffle v1, <undef> <0, 0, 2, 2>
3214 let AddedComplexity = 15 in
3215 def : Pat<(v4i32 (movsldup VR128:$src, (undef))),
3216 (MOVSLDUPrr VR128:$src)>, Requires<[HasSSE3]>;
3217 let AddedComplexity = 20 in
3218 def : Pat<(v4i32 (movsldup (bc_v4i32 (memopv2i64 addr:$src)), (undef))),
3219 (MOVSLDUPrm addr:$src)>, Requires<[HasSSE3]>;
3221 //===---------------------------------------------------------------------===//
3222 // SSE3 - Arithmetic
3223 //===---------------------------------------------------------------------===//
3225 multiclass sse3_addsub<Intrinsic Int, string OpcodeStr, RegisterClass RC,
3226 X86MemOperand x86memop, bit Is2Addr = 1> {
3227 def rr : I<0xD0, MRMSrcReg,
3228 (outs RC:$dst), (ins RC:$src1, RC:$src2),
3230 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3231 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3232 [(set RC:$dst, (Int RC:$src1, RC:$src2))]>;
3233 def rm : I<0xD0, MRMSrcMem,
3234 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
3236 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3237 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3238 [(set RC:$dst, (Int RC:$src1, (memop addr:$src2)))]>;
3241 let isAsmParserOnly = 1, Predicates = [HasAVX],
3242 ExeDomain = SSEPackedDouble in {
3243 defm VADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "vaddsubps", VR128,
3244 f128mem, 0>, XD, VEX_4V;
3245 defm VADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "vaddsubpd", VR128,
3246 f128mem, 0>, OpSize, VEX_4V;
3247 let Pattern = []<dag> in {
3248 defm VADDSUBPSY : sse3_addsub<int_x86_sse3_addsub_ps, "vaddsubps", VR256,
3249 f256mem, 0>, XD, VEX_4V;
3250 defm VADDSUBPDY : sse3_addsub<int_x86_sse3_addsub_pd, "vaddsubpd", VR256,
3251 f256mem, 0>, OpSize, VEX_4V;
3254 let Constraints = "$src1 = $dst", Predicates = [HasSSE3],
3255 ExeDomain = SSEPackedDouble in {
3256 defm ADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "addsubps", VR128,
3258 defm ADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "addsubpd", VR128,
3259 f128mem>, TB, OpSize;
3262 //===---------------------------------------------------------------------===//
3263 // SSE3 Instructions
3264 //===---------------------------------------------------------------------===//
3267 multiclass S3D_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
3268 X86MemOperand x86memop, Intrinsic IntId, bit Is2Addr = 1> {
3269 def rr : S3DI<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
3271 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3272 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3273 [(set RC:$dst, (vt (IntId RC:$src1, RC:$src2)))]>;
3275 def rm : S3DI<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
3277 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3278 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3279 [(set RC:$dst, (vt (IntId RC:$src1, (memop addr:$src2))))]>;
3281 multiclass S3_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
3282 X86MemOperand x86memop, Intrinsic IntId, bit Is2Addr = 1> {
3283 def rr : S3I<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
3285 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3286 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3287 [(set RC:$dst, (vt (IntId RC:$src1, RC:$src2)))]>;
3289 def rm : S3I<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
3291 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3292 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3293 [(set RC:$dst, (vt (IntId RC:$src1, (memop addr:$src2))))]>;
3296 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3297 defm VHADDPS : S3D_Int<0x7C, "vhaddps", v4f32, VR128, f128mem,
3298 int_x86_sse3_hadd_ps, 0>, VEX_4V;
3299 defm VHADDPD : S3_Int <0x7C, "vhaddpd", v2f64, VR128, f128mem,
3300 int_x86_sse3_hadd_pd, 0>, VEX_4V;
3301 defm VHSUBPS : S3D_Int<0x7D, "vhsubps", v4f32, VR128, f128mem,
3302 int_x86_sse3_hsub_ps, 0>, VEX_4V;
3303 defm VHSUBPD : S3_Int <0x7D, "vhsubpd", v2f64, VR128, f128mem,
3304 int_x86_sse3_hsub_pd, 0>, VEX_4V;
3305 let Pattern = []<dag> in {
3306 defm VHADDPSY : S3D_Int<0x7C, "vhaddps", v8f32, VR256, f256mem,
3307 int_x86_sse3_hadd_ps, 0>, VEX_4V;
3308 defm VHADDPDY : S3_Int <0x7C, "vhaddpd", v4f64, VR256, f256mem,
3309 int_x86_sse3_hadd_pd, 0>, VEX_4V;
3310 defm VHSUBPSY : S3D_Int<0x7D, "vhsubps", v8f32, VR256, f256mem,
3311 int_x86_sse3_hsub_ps, 0>, VEX_4V;
3312 defm VHSUBPDY : S3_Int <0x7D, "vhsubpd", v4f64, VR256, f256mem,
3313 int_x86_sse3_hsub_pd, 0>, VEX_4V;
3317 let Constraints = "$src1 = $dst" in {
3318 defm HADDPS : S3D_Int<0x7C, "haddps", v4f32, VR128, f128mem,
3319 int_x86_sse3_hadd_ps>;
3320 defm HADDPD : S3_Int<0x7C, "haddpd", v2f64, VR128, f128mem,
3321 int_x86_sse3_hadd_pd>;
3322 defm HSUBPS : S3D_Int<0x7D, "hsubps", v4f32, VR128, f128mem,
3323 int_x86_sse3_hsub_ps>;
3324 defm HSUBPD : S3_Int<0x7D, "hsubpd", v2f64, VR128, f128mem,
3325 int_x86_sse3_hsub_pd>;
3328 //===---------------------------------------------------------------------===//
3329 // SSSE3 - Packed Absolute Instructions
3330 //===---------------------------------------------------------------------===//
3332 /// SS3I_unop_rm_int - Simple SSSE3 unary op whose type can be v*{i8,i16,i32}.
3333 multiclass SS3I_unop_rm_int<bits<8> opc, string OpcodeStr,
3334 PatFrag mem_frag64, PatFrag mem_frag128,
3335 Intrinsic IntId64, Intrinsic IntId128> {
3336 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst), (ins VR64:$src),
3337 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3338 [(set VR64:$dst, (IntId64 VR64:$src))]>;
3340 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst), (ins i64mem:$src),
3341 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3343 (IntId64 (bitconvert (mem_frag64 addr:$src))))]>;
3345 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
3347 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3348 [(set VR128:$dst, (IntId128 VR128:$src))]>,
3351 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
3353 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3356 (bitconvert (mem_frag128 addr:$src))))]>, OpSize;
3359 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3360 defm VPABSB : SS3I_unop_rm_int<0x1C, "vpabsb", memopv8i8, memopv16i8,
3361 int_x86_ssse3_pabs_b,
3362 int_x86_ssse3_pabs_b_128>, VEX;
3363 defm VPABSW : SS3I_unop_rm_int<0x1D, "vpabsw", memopv4i16, memopv8i16,
3364 int_x86_ssse3_pabs_w,
3365 int_x86_ssse3_pabs_w_128>, VEX;
3366 defm VPABSD : SS3I_unop_rm_int<0x1E, "vpabsd", memopv2i32, memopv4i32,
3367 int_x86_ssse3_pabs_d,
3368 int_x86_ssse3_pabs_d_128>, VEX;
3371 defm PABSB : SS3I_unop_rm_int<0x1C, "pabsb", memopv8i8, memopv16i8,
3372 int_x86_ssse3_pabs_b,
3373 int_x86_ssse3_pabs_b_128>;
3374 defm PABSW : SS3I_unop_rm_int<0x1D, "pabsw", memopv4i16, memopv8i16,
3375 int_x86_ssse3_pabs_w,
3376 int_x86_ssse3_pabs_w_128>;
3377 defm PABSD : SS3I_unop_rm_int<0x1E, "pabsd", memopv2i32, memopv4i32,
3378 int_x86_ssse3_pabs_d,
3379 int_x86_ssse3_pabs_d_128>;
3381 //===---------------------------------------------------------------------===//
3382 // SSSE3 - Packed Binary Operator Instructions
3383 //===---------------------------------------------------------------------===//
3385 /// SS3I_binop_rm_int - Simple SSSE3 bin op whose type can be v*{i8,i16,i32}.
3386 multiclass SS3I_binop_rm_int<bits<8> opc, string OpcodeStr,
3387 PatFrag mem_frag64, PatFrag mem_frag128,
3388 Intrinsic IntId64, Intrinsic IntId128,
3390 let isCommutable = 1 in
3391 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
3392 (ins VR64:$src1, VR64:$src2),
3394 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3395 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3396 [(set VR64:$dst, (IntId64 VR64:$src1, VR64:$src2))]>;
3397 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
3398 (ins VR64:$src1, i64mem:$src2),
3400 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3401 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3403 (IntId64 VR64:$src1,
3404 (bitconvert (memopv8i8 addr:$src2))))]>;
3406 let isCommutable = 1 in
3407 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
3408 (ins VR128:$src1, VR128:$src2),
3410 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3411 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3412 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
3414 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
3415 (ins VR128:$src1, i128mem:$src2),
3417 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3418 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3420 (IntId128 VR128:$src1,
3421 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
3424 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3425 let isCommutable = 0 in {
3426 defm VPHADDW : SS3I_binop_rm_int<0x01, "vphaddw", memopv4i16, memopv8i16,
3427 int_x86_ssse3_phadd_w,
3428 int_x86_ssse3_phadd_w_128, 0>, VEX_4V;
3429 defm VPHADDD : SS3I_binop_rm_int<0x02, "vphaddd", memopv2i32, memopv4i32,
3430 int_x86_ssse3_phadd_d,
3431 int_x86_ssse3_phadd_d_128, 0>, VEX_4V;
3432 defm VPHADDSW : SS3I_binop_rm_int<0x03, "vphaddsw", memopv4i16, memopv8i16,
3433 int_x86_ssse3_phadd_sw,
3434 int_x86_ssse3_phadd_sw_128, 0>, VEX_4V;
3435 defm VPHSUBW : SS3I_binop_rm_int<0x05, "vphsubw", memopv4i16, memopv8i16,
3436 int_x86_ssse3_phsub_w,
3437 int_x86_ssse3_phsub_w_128, 0>, VEX_4V;
3438 defm VPHSUBD : SS3I_binop_rm_int<0x06, "vphsubd", memopv2i32, memopv4i32,
3439 int_x86_ssse3_phsub_d,
3440 int_x86_ssse3_phsub_d_128, 0>, VEX_4V;
3441 defm VPHSUBSW : SS3I_binop_rm_int<0x07, "vphsubsw", memopv4i16, memopv8i16,
3442 int_x86_ssse3_phsub_sw,
3443 int_x86_ssse3_phsub_sw_128, 0>, VEX_4V;
3444 defm VPMADDUBSW : SS3I_binop_rm_int<0x04, "vpmaddubsw", memopv8i8, memopv16i8,
3445 int_x86_ssse3_pmadd_ub_sw,
3446 int_x86_ssse3_pmadd_ub_sw_128, 0>, VEX_4V;
3447 defm VPSHUFB : SS3I_binop_rm_int<0x00, "vpshufb", memopv8i8, memopv16i8,
3448 int_x86_ssse3_pshuf_b,
3449 int_x86_ssse3_pshuf_b_128, 0>, VEX_4V;
3450 defm VPSIGNB : SS3I_binop_rm_int<0x08, "vpsignb", memopv8i8, memopv16i8,
3451 int_x86_ssse3_psign_b,
3452 int_x86_ssse3_psign_b_128, 0>, VEX_4V;
3453 defm VPSIGNW : SS3I_binop_rm_int<0x09, "vpsignw", memopv4i16, memopv8i16,
3454 int_x86_ssse3_psign_w,
3455 int_x86_ssse3_psign_w_128, 0>, VEX_4V;
3456 defm VPSIGND : SS3I_binop_rm_int<0x0A, "vpsignd", memopv2i32, memopv4i32,
3457 int_x86_ssse3_psign_d,
3458 int_x86_ssse3_psign_d_128, 0>, VEX_4V;
3460 defm VPMULHRSW : SS3I_binop_rm_int<0x0B, "vpmulhrsw", memopv4i16, memopv8i16,
3461 int_x86_ssse3_pmul_hr_sw,
3462 int_x86_ssse3_pmul_hr_sw_128, 0>, VEX_4V;
3465 // None of these have i8 immediate fields.
3466 let ImmT = NoImm, Constraints = "$src1 = $dst" in {
3467 let isCommutable = 0 in {
3468 defm PHADDW : SS3I_binop_rm_int<0x01, "phaddw", memopv4i16, memopv8i16,
3469 int_x86_ssse3_phadd_w,
3470 int_x86_ssse3_phadd_w_128>;
3471 defm PHADDD : SS3I_binop_rm_int<0x02, "phaddd", memopv2i32, memopv4i32,
3472 int_x86_ssse3_phadd_d,
3473 int_x86_ssse3_phadd_d_128>;
3474 defm PHADDSW : SS3I_binop_rm_int<0x03, "phaddsw", memopv4i16, memopv8i16,
3475 int_x86_ssse3_phadd_sw,
3476 int_x86_ssse3_phadd_sw_128>;
3477 defm PHSUBW : SS3I_binop_rm_int<0x05, "phsubw", memopv4i16, memopv8i16,
3478 int_x86_ssse3_phsub_w,
3479 int_x86_ssse3_phsub_w_128>;
3480 defm PHSUBD : SS3I_binop_rm_int<0x06, "phsubd", memopv2i32, memopv4i32,
3481 int_x86_ssse3_phsub_d,
3482 int_x86_ssse3_phsub_d_128>;
3483 defm PHSUBSW : SS3I_binop_rm_int<0x07, "phsubsw", memopv4i16, memopv8i16,
3484 int_x86_ssse3_phsub_sw,
3485 int_x86_ssse3_phsub_sw_128>;
3486 defm PMADDUBSW : SS3I_binop_rm_int<0x04, "pmaddubsw", memopv8i8, memopv16i8,
3487 int_x86_ssse3_pmadd_ub_sw,
3488 int_x86_ssse3_pmadd_ub_sw_128>;
3489 defm PSHUFB : SS3I_binop_rm_int<0x00, "pshufb", memopv8i8, memopv16i8,
3490 int_x86_ssse3_pshuf_b,
3491 int_x86_ssse3_pshuf_b_128>;
3492 defm PSIGNB : SS3I_binop_rm_int<0x08, "psignb", memopv8i8, memopv16i8,
3493 int_x86_ssse3_psign_b,
3494 int_x86_ssse3_psign_b_128>;
3495 defm PSIGNW : SS3I_binop_rm_int<0x09, "psignw", memopv4i16, memopv8i16,
3496 int_x86_ssse3_psign_w,
3497 int_x86_ssse3_psign_w_128>;
3498 defm PSIGND : SS3I_binop_rm_int<0x0A, "psignd", memopv2i32, memopv4i32,
3499 int_x86_ssse3_psign_d,
3500 int_x86_ssse3_psign_d_128>;
3502 defm PMULHRSW : SS3I_binop_rm_int<0x0B, "pmulhrsw", memopv4i16, memopv8i16,
3503 int_x86_ssse3_pmul_hr_sw,
3504 int_x86_ssse3_pmul_hr_sw_128>;
3507 def : Pat<(X86pshufb VR128:$src, VR128:$mask),
3508 (PSHUFBrr128 VR128:$src, VR128:$mask)>, Requires<[HasSSSE3]>;
3509 def : Pat<(X86pshufb VR128:$src, (bc_v16i8 (memopv2i64 addr:$mask))),
3510 (PSHUFBrm128 VR128:$src, addr:$mask)>, Requires<[HasSSSE3]>;
3512 //===---------------------------------------------------------------------===//
3513 // SSSE3 - Packed Align Instruction Patterns
3514 //===---------------------------------------------------------------------===//
3516 multiclass sse3_palign<string asm, bit Is2Addr = 1> {
3517 def R64rr : SS3AI<0x0F, MRMSrcReg, (outs VR64:$dst),
3518 (ins VR64:$src1, VR64:$src2, i8imm:$src3),
3520 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3522 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
3524 def R64rm : SS3AI<0x0F, MRMSrcMem, (outs VR64:$dst),
3525 (ins VR64:$src1, i64mem:$src2, i8imm:$src3),
3527 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3529 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
3532 def R128rr : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
3533 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
3535 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3537 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
3539 def R128rm : SS3AI<0x0F, MRMSrcMem, (outs VR128:$dst),
3540 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
3542 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3544 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
3548 let isAsmParserOnly = 1, Predicates = [HasAVX] in
3549 defm VPALIGN : sse3_palign<"vpalignr", 0>, VEX_4V;
3550 let Constraints = "$src1 = $dst" in
3551 defm PALIGN : sse3_palign<"palignr">;
3553 let AddedComplexity = 5 in {
3555 def : Pat<(v1i64 (palign:$src3 VR64:$src1, VR64:$src2)),
3556 (PALIGNR64rr VR64:$src2, VR64:$src1,
3557 (SHUFFLE_get_palign_imm VR64:$src3))>,
3558 Requires<[HasSSSE3]>;
3559 def : Pat<(v2i32 (palign:$src3 VR64:$src1, VR64:$src2)),
3560 (PALIGNR64rr VR64:$src2, VR64:$src1,
3561 (SHUFFLE_get_palign_imm VR64:$src3))>,
3562 Requires<[HasSSSE3]>;
3563 def : Pat<(v4i16 (palign:$src3 VR64:$src1, VR64:$src2)),
3564 (PALIGNR64rr VR64:$src2, VR64:$src1,
3565 (SHUFFLE_get_palign_imm VR64:$src3))>,
3566 Requires<[HasSSSE3]>;
3567 def : Pat<(v8i8 (palign:$src3 VR64:$src1, VR64:$src2)),
3568 (PALIGNR64rr VR64:$src2, VR64:$src1,
3569 (SHUFFLE_get_palign_imm VR64:$src3))>,
3570 Requires<[HasSSSE3]>;
3572 def : Pat<(v4i32 (palign:$src3 VR128:$src1, VR128:$src2)),
3573 (PALIGNR128rr VR128:$src2, VR128:$src1,
3574 (SHUFFLE_get_palign_imm VR128:$src3))>,
3575 Requires<[HasSSSE3]>;
3576 def : Pat<(v4f32 (palign:$src3 VR128:$src1, VR128:$src2)),
3577 (PALIGNR128rr VR128:$src2, VR128:$src1,
3578 (SHUFFLE_get_palign_imm VR128:$src3))>,
3579 Requires<[HasSSSE3]>;
3580 def : Pat<(v8i16 (palign:$src3 VR128:$src1, VR128:$src2)),
3581 (PALIGNR128rr VR128:$src2, VR128:$src1,
3582 (SHUFFLE_get_palign_imm VR128:$src3))>,
3583 Requires<[HasSSSE3]>;
3584 def : Pat<(v16i8 (palign:$src3 VR128:$src1, VR128:$src2)),
3585 (PALIGNR128rr VR128:$src2, VR128:$src1,
3586 (SHUFFLE_get_palign_imm VR128:$src3))>,
3587 Requires<[HasSSSE3]>;
3590 //===---------------------------------------------------------------------===//
3591 // SSSE3 Misc Instructions
3592 //===---------------------------------------------------------------------===//
3594 // Thread synchronization
3595 def MONITOR : I<0x01, MRM_C8, (outs), (ins), "monitor",
3596 [(int_x86_sse3_monitor EAX, ECX, EDX)]>,TB, Requires<[HasSSE3]>;
3597 def MWAIT : I<0x01, MRM_C9, (outs), (ins), "mwait",
3598 [(int_x86_sse3_mwait ECX, EAX)]>, TB, Requires<[HasSSE3]>;
3600 //===---------------------------------------------------------------------===//
3601 // Non-Instruction Patterns
3602 //===---------------------------------------------------------------------===//
3604 // extload f32 -> f64. This matches load+fextend because we have a hack in
3605 // the isel (PreprocessForFPConvert) that can introduce loads after dag
3607 // Since these loads aren't folded into the fextend, we have to match it
3609 let Predicates = [HasSSE2] in
3610 def : Pat<(fextend (loadf32 addr:$src)),
3611 (CVTSS2SDrm addr:$src)>;
3614 let Predicates = [HasSSE2] in {
3615 def : Pat<(v2i64 (bitconvert (v4i32 VR128:$src))), (v2i64 VR128:$src)>;
3616 def : Pat<(v2i64 (bitconvert (v8i16 VR128:$src))), (v2i64 VR128:$src)>;
3617 def : Pat<(v2i64 (bitconvert (v16i8 VR128:$src))), (v2i64 VR128:$src)>;
3618 def : Pat<(v2i64 (bitconvert (v2f64 VR128:$src))), (v2i64 VR128:$src)>;
3619 def : Pat<(v2i64 (bitconvert (v4f32 VR128:$src))), (v2i64 VR128:$src)>;
3620 def : Pat<(v4i32 (bitconvert (v2i64 VR128:$src))), (v4i32 VR128:$src)>;
3621 def : Pat<(v4i32 (bitconvert (v8i16 VR128:$src))), (v4i32 VR128:$src)>;
3622 def : Pat<(v4i32 (bitconvert (v16i8 VR128:$src))), (v4i32 VR128:$src)>;
3623 def : Pat<(v4i32 (bitconvert (v2f64 VR128:$src))), (v4i32 VR128:$src)>;
3624 def : Pat<(v4i32 (bitconvert (v4f32 VR128:$src))), (v4i32 VR128:$src)>;
3625 def : Pat<(v8i16 (bitconvert (v2i64 VR128:$src))), (v8i16 VR128:$src)>;
3626 def : Pat<(v8i16 (bitconvert (v4i32 VR128:$src))), (v8i16 VR128:$src)>;
3627 def : Pat<(v8i16 (bitconvert (v16i8 VR128:$src))), (v8i16 VR128:$src)>;
3628 def : Pat<(v8i16 (bitconvert (v2f64 VR128:$src))), (v8i16 VR128:$src)>;
3629 def : Pat<(v8i16 (bitconvert (v4f32 VR128:$src))), (v8i16 VR128:$src)>;
3630 def : Pat<(v16i8 (bitconvert (v2i64 VR128:$src))), (v16i8 VR128:$src)>;
3631 def : Pat<(v16i8 (bitconvert (v4i32 VR128:$src))), (v16i8 VR128:$src)>;
3632 def : Pat<(v16i8 (bitconvert (v8i16 VR128:$src))), (v16i8 VR128:$src)>;
3633 def : Pat<(v16i8 (bitconvert (v2f64 VR128:$src))), (v16i8 VR128:$src)>;
3634 def : Pat<(v16i8 (bitconvert (v4f32 VR128:$src))), (v16i8 VR128:$src)>;
3635 def : Pat<(v4f32 (bitconvert (v2i64 VR128:$src))), (v4f32 VR128:$src)>;
3636 def : Pat<(v4f32 (bitconvert (v4i32 VR128:$src))), (v4f32 VR128:$src)>;
3637 def : Pat<(v4f32 (bitconvert (v8i16 VR128:$src))), (v4f32 VR128:$src)>;
3638 def : Pat<(v4f32 (bitconvert (v16i8 VR128:$src))), (v4f32 VR128:$src)>;
3639 def : Pat<(v4f32 (bitconvert (v2f64 VR128:$src))), (v4f32 VR128:$src)>;
3640 def : Pat<(v2f64 (bitconvert (v2i64 VR128:$src))), (v2f64 VR128:$src)>;
3641 def : Pat<(v2f64 (bitconvert (v4i32 VR128:$src))), (v2f64 VR128:$src)>;
3642 def : Pat<(v2f64 (bitconvert (v8i16 VR128:$src))), (v2f64 VR128:$src)>;
3643 def : Pat<(v2f64 (bitconvert (v16i8 VR128:$src))), (v2f64 VR128:$src)>;
3644 def : Pat<(v2f64 (bitconvert (v4f32 VR128:$src))), (v2f64 VR128:$src)>;
3647 // Move scalar to XMM zero-extended
3648 // movd to XMM register zero-extends
3649 let AddedComplexity = 15 in {
3650 // Zeroing a VR128 then do a MOVS{S|D} to the lower bits.
3651 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))),
3652 (MOVSDrr (v2f64 (V_SET0PS)), FR64:$src)>;
3653 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),
3654 (MOVSSrr (v4f32 (V_SET0PS)), FR32:$src)>;
3655 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
3656 (MOVSSrr (v4f32 (V_SET0PS)),
3657 (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss)))>;
3658 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
3659 (MOVSSrr (v4i32 (V_SET0PI)),
3660 (EXTRACT_SUBREG (v4i32 VR128:$src), sub_ss))>;
3663 // Splat v2f64 / v2i64
3664 let AddedComplexity = 10 in {
3665 def : Pat<(splat_lo (v2f64 VR128:$src), (undef)),
3666 (UNPCKLPDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3667 def : Pat<(unpckh (v2f64 VR128:$src), (undef)),
3668 (UNPCKHPDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3669 def : Pat<(splat_lo (v2i64 VR128:$src), (undef)),
3670 (PUNPCKLQDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3671 def : Pat<(unpckh (v2i64 VR128:$src), (undef)),
3672 (PUNPCKHQDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3675 // Special unary SHUFPSrri case.
3676 def : Pat<(v4f32 (pshufd:$src3 VR128:$src1, (undef))),
3677 (SHUFPSrri VR128:$src1, VR128:$src1,
3678 (SHUFFLE_get_shuf_imm VR128:$src3))>;
3679 let AddedComplexity = 5 in
3680 def : Pat<(v4f32 (pshufd:$src2 VR128:$src1, (undef))),
3681 (PSHUFDri VR128:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3682 Requires<[HasSSE2]>;
3683 // Special unary SHUFPDrri case.
3684 def : Pat<(v2i64 (pshufd:$src3 VR128:$src1, (undef))),
3685 (SHUFPDrri VR128:$src1, VR128:$src1,
3686 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3687 Requires<[HasSSE2]>;
3688 // Special unary SHUFPDrri case.
3689 def : Pat<(v2f64 (pshufd:$src3 VR128:$src1, (undef))),
3690 (SHUFPDrri VR128:$src1, VR128:$src1,
3691 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3692 Requires<[HasSSE2]>;
3693 // Unary v4f32 shuffle with PSHUF* in order to fold a load.
3694 def : Pat<(pshufd:$src2 (bc_v4i32 (memopv4f32 addr:$src1)), (undef)),
3695 (PSHUFDmi addr:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3696 Requires<[HasSSE2]>;
3698 // Special binary v4i32 shuffle cases with SHUFPS.
3699 def : Pat<(v4i32 (shufp:$src3 VR128:$src1, (v4i32 VR128:$src2))),
3700 (SHUFPSrri VR128:$src1, VR128:$src2,
3701 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3702 Requires<[HasSSE2]>;
3703 def : Pat<(v4i32 (shufp:$src3 VR128:$src1, (bc_v4i32 (memopv2i64 addr:$src2)))),
3704 (SHUFPSrmi VR128:$src1, addr:$src2,
3705 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3706 Requires<[HasSSE2]>;
3707 // Special binary v2i64 shuffle cases using SHUFPDrri.
3708 def : Pat<(v2i64 (shufp:$src3 VR128:$src1, VR128:$src2)),
3709 (SHUFPDrri VR128:$src1, VR128:$src2,
3710 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3711 Requires<[HasSSE2]>;
3713 // vector_shuffle v1, <undef>, <0, 0, 1, 1, ...>
3714 let AddedComplexity = 15 in {
3715 def : Pat<(v4i32 (unpckl_undef:$src2 VR128:$src, (undef))),
3716 (PSHUFDri VR128:$src, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3717 Requires<[OptForSpeed, HasSSE2]>;
3718 def : Pat<(v4f32 (unpckl_undef:$src2 VR128:$src, (undef))),
3719 (PSHUFDri VR128:$src, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3720 Requires<[OptForSpeed, HasSSE2]>;
3722 let AddedComplexity = 10 in {
3723 def : Pat<(v4f32 (unpckl_undef VR128:$src, (undef))),
3724 (UNPCKLPSrr VR128:$src, VR128:$src)>;
3725 def : Pat<(v16i8 (unpckl_undef VR128:$src, (undef))),
3726 (PUNPCKLBWrr VR128:$src, VR128:$src)>;
3727 def : Pat<(v8i16 (unpckl_undef VR128:$src, (undef))),
3728 (PUNPCKLWDrr VR128:$src, VR128:$src)>;
3729 def : Pat<(v4i32 (unpckl_undef VR128:$src, (undef))),
3730 (PUNPCKLDQrr VR128:$src, VR128:$src)>;
3733 // vector_shuffle v1, <undef>, <2, 2, 3, 3, ...>
3734 let AddedComplexity = 15 in {
3735 def : Pat<(v4i32 (unpckh_undef:$src2 VR128:$src, (undef))),
3736 (PSHUFDri VR128:$src, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3737 Requires<[OptForSpeed, HasSSE2]>;
3738 def : Pat<(v4f32 (unpckh_undef:$src2 VR128:$src, (undef))),
3739 (PSHUFDri VR128:$src, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3740 Requires<[OptForSpeed, HasSSE2]>;
3742 let AddedComplexity = 10 in {
3743 def : Pat<(v4f32 (unpckh_undef VR128:$src, (undef))),
3744 (UNPCKHPSrr VR128:$src, VR128:$src)>;
3745 def : Pat<(v16i8 (unpckh_undef VR128:$src, (undef))),
3746 (PUNPCKHBWrr VR128:$src, VR128:$src)>;
3747 def : Pat<(v8i16 (unpckh_undef VR128:$src, (undef))),
3748 (PUNPCKHWDrr VR128:$src, VR128:$src)>;
3749 def : Pat<(v4i32 (unpckh_undef VR128:$src, (undef))),
3750 (PUNPCKHDQrr VR128:$src, VR128:$src)>;
3753 let AddedComplexity = 20 in {
3754 // vector_shuffle v1, v2 <0, 1, 4, 5> using MOVLHPS
3755 def : Pat<(v4i32 (movlhps VR128:$src1, VR128:$src2)),
3756 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
3758 // vector_shuffle v1, v2 <6, 7, 2, 3> using MOVHLPS
3759 def : Pat<(v4i32 (movhlps VR128:$src1, VR128:$src2)),
3760 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
3762 // vector_shuffle v1, undef <2, ?, ?, ?> using MOVHLPS
3763 def : Pat<(v4f32 (movhlps_undef VR128:$src1, (undef))),
3764 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
3765 def : Pat<(v4i32 (movhlps_undef VR128:$src1, (undef))),
3766 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
3769 let AddedComplexity = 20 in {
3770 // vector_shuffle v1, (load v2) <4, 5, 2, 3> using MOVLPS
3771 def : Pat<(v4f32 (movlp VR128:$src1, (load addr:$src2))),
3772 (MOVLPSrm VR128:$src1, addr:$src2)>;
3773 def : Pat<(v2f64 (movlp VR128:$src1, (load addr:$src2))),
3774 (MOVLPDrm VR128:$src1, addr:$src2)>;
3775 def : Pat<(v4i32 (movlp VR128:$src1, (load addr:$src2))),
3776 (MOVLPSrm VR128:$src1, addr:$src2)>;
3777 def : Pat<(v2i64 (movlp VR128:$src1, (load addr:$src2))),
3778 (MOVLPDrm VR128:$src1, addr:$src2)>;
3781 // (store (vector_shuffle (load addr), v2, <4, 5, 2, 3>), addr) using MOVLPS
3782 def : Pat<(store (v4f32 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
3783 (MOVLPSmr addr:$src1, VR128:$src2)>;
3784 def : Pat<(store (v2f64 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
3785 (MOVLPDmr addr:$src1, VR128:$src2)>;
3786 def : Pat<(store (v4i32 (movlp (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)),
3788 (MOVLPSmr addr:$src1, VR128:$src2)>;
3789 def : Pat<(store (v2i64 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
3790 (MOVLPDmr addr:$src1, VR128:$src2)>;
3792 let AddedComplexity = 15 in {
3793 // Setting the lowest element in the vector.
3794 def : Pat<(v4i32 (movl VR128:$src1, VR128:$src2)),
3795 (MOVSSrr (v4i32 VR128:$src1),
3796 (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_ss))>;
3797 def : Pat<(v2i64 (movl VR128:$src1, VR128:$src2)),
3798 (MOVSDrr (v2i64 VR128:$src1),
3799 (EXTRACT_SUBREG (v2i64 VR128:$src2), sub_sd))>;
3801 // vector_shuffle v1, v2 <4, 5, 2, 3> using movsd
3802 def : Pat<(v4f32 (movlp VR128:$src1, VR128:$src2)),
3803 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, sub_sd))>,
3804 Requires<[HasSSE2]>;
3805 def : Pat<(v4i32 (movlp VR128:$src1, VR128:$src2)),
3806 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, sub_sd))>,
3807 Requires<[HasSSE2]>;
3810 // vector_shuffle v1, v2 <4, 5, 2, 3> using SHUFPSrri (we prefer movsd, but
3811 // fall back to this for SSE1)
3812 def : Pat<(v4f32 (movlp:$src3 VR128:$src1, (v4f32 VR128:$src2))),
3813 (SHUFPSrri VR128:$src2, VR128:$src1,
3814 (SHUFFLE_get_shuf_imm VR128:$src3))>;
3816 // Set lowest element and zero upper elements.
3817 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
3818 (MOVZPQILo2PQIrr VR128:$src)>, Requires<[HasSSE2]>;
3820 // Some special case pandn patterns.
3821 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v4i32 immAllOnesV))),
3823 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3824 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v8i16 immAllOnesV))),
3826 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3827 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v16i8 immAllOnesV))),
3829 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3831 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v4i32 immAllOnesV))),
3832 (memop addr:$src2))),
3833 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
3834 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v8i16 immAllOnesV))),
3835 (memop addr:$src2))),
3836 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
3837 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v16i8 immAllOnesV))),
3838 (memop addr:$src2))),
3839 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
3841 // vector -> vector casts
3842 def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
3843 (Int_CVTDQ2PSrr VR128:$src)>, Requires<[HasSSE2]>;
3844 def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
3845 (Int_CVTTPS2DQrr VR128:$src)>, Requires<[HasSSE2]>;
3846 def : Pat<(v2f64 (sint_to_fp (v2i32 VR64:$src))),
3847 (Int_CVTPI2PDrr VR64:$src)>, Requires<[HasSSE2]>;
3848 def : Pat<(v2i32 (fp_to_sint (v2f64 VR128:$src))),
3849 (Int_CVTTPD2PIrr VR128:$src)>, Requires<[HasSSE2]>;
3851 // Use movaps / movups for SSE integer load / store (one byte shorter).
3852 def : Pat<(alignedloadv4i32 addr:$src),
3853 (MOVAPSrm addr:$src)>;
3854 def : Pat<(loadv4i32 addr:$src),
3855 (MOVUPSrm addr:$src)>;
3856 def : Pat<(alignedloadv2i64 addr:$src),
3857 (MOVAPSrm addr:$src)>;
3858 def : Pat<(loadv2i64 addr:$src),
3859 (MOVUPSrm addr:$src)>;
3861 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
3862 (MOVAPSmr addr:$dst, VR128:$src)>;
3863 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
3864 (MOVAPSmr addr:$dst, VR128:$src)>;
3865 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
3866 (MOVAPSmr addr:$dst, VR128:$src)>;
3867 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
3868 (MOVAPSmr addr:$dst, VR128:$src)>;
3869 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
3870 (MOVUPSmr addr:$dst, VR128:$src)>;
3871 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
3872 (MOVUPSmr addr:$dst, VR128:$src)>;
3873 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
3874 (MOVUPSmr addr:$dst, VR128:$src)>;
3875 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
3876 (MOVUPSmr addr:$dst, VR128:$src)>;
3878 //===----------------------------------------------------------------------===//
3879 // SSE4.1 - Packed Move with Sign/Zero Extend
3880 //===----------------------------------------------------------------------===//
3882 multiclass SS41I_binop_rm_int8<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
3883 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3884 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3885 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
3887 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3888 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3890 (IntId (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))]>,
3894 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3895 defm VPMOVSXBW : SS41I_binop_rm_int8<0x20, "vpmovsxbw", int_x86_sse41_pmovsxbw>,
3897 defm VPMOVSXWD : SS41I_binop_rm_int8<0x23, "vpmovsxwd", int_x86_sse41_pmovsxwd>,
3899 defm VPMOVSXDQ : SS41I_binop_rm_int8<0x25, "vpmovsxdq", int_x86_sse41_pmovsxdq>,
3901 defm VPMOVZXBW : SS41I_binop_rm_int8<0x30, "vpmovzxbw", int_x86_sse41_pmovzxbw>,
3903 defm VPMOVZXWD : SS41I_binop_rm_int8<0x33, "vpmovzxwd", int_x86_sse41_pmovzxwd>,
3905 defm VPMOVZXDQ : SS41I_binop_rm_int8<0x35, "vpmovzxdq", int_x86_sse41_pmovzxdq>,
3909 defm PMOVSXBW : SS41I_binop_rm_int8<0x20, "pmovsxbw", int_x86_sse41_pmovsxbw>;
3910 defm PMOVSXWD : SS41I_binop_rm_int8<0x23, "pmovsxwd", int_x86_sse41_pmovsxwd>;
3911 defm PMOVSXDQ : SS41I_binop_rm_int8<0x25, "pmovsxdq", int_x86_sse41_pmovsxdq>;
3912 defm PMOVZXBW : SS41I_binop_rm_int8<0x30, "pmovzxbw", int_x86_sse41_pmovzxbw>;
3913 defm PMOVZXWD : SS41I_binop_rm_int8<0x33, "pmovzxwd", int_x86_sse41_pmovzxwd>;
3914 defm PMOVZXDQ : SS41I_binop_rm_int8<0x35, "pmovzxdq", int_x86_sse41_pmovzxdq>;
3916 // Common patterns involving scalar load.
3917 def : Pat<(int_x86_sse41_pmovsxbw (vzmovl_v2i64 addr:$src)),
3918 (PMOVSXBWrm addr:$src)>, Requires<[HasSSE41]>;
3919 def : Pat<(int_x86_sse41_pmovsxbw (vzload_v2i64 addr:$src)),
3920 (PMOVSXBWrm addr:$src)>, Requires<[HasSSE41]>;
3922 def : Pat<(int_x86_sse41_pmovsxwd (vzmovl_v2i64 addr:$src)),
3923 (PMOVSXWDrm addr:$src)>, Requires<[HasSSE41]>;
3924 def : Pat<(int_x86_sse41_pmovsxwd (vzload_v2i64 addr:$src)),
3925 (PMOVSXWDrm addr:$src)>, Requires<[HasSSE41]>;
3927 def : Pat<(int_x86_sse41_pmovsxdq (vzmovl_v2i64 addr:$src)),
3928 (PMOVSXDQrm addr:$src)>, Requires<[HasSSE41]>;
3929 def : Pat<(int_x86_sse41_pmovsxdq (vzload_v2i64 addr:$src)),
3930 (PMOVSXDQrm addr:$src)>, Requires<[HasSSE41]>;
3932 def : Pat<(int_x86_sse41_pmovzxbw (vzmovl_v2i64 addr:$src)),
3933 (PMOVZXBWrm addr:$src)>, Requires<[HasSSE41]>;
3934 def : Pat<(int_x86_sse41_pmovzxbw (vzload_v2i64 addr:$src)),
3935 (PMOVZXBWrm addr:$src)>, Requires<[HasSSE41]>;
3937 def : Pat<(int_x86_sse41_pmovzxwd (vzmovl_v2i64 addr:$src)),
3938 (PMOVZXWDrm addr:$src)>, Requires<[HasSSE41]>;
3939 def : Pat<(int_x86_sse41_pmovzxwd (vzload_v2i64 addr:$src)),
3940 (PMOVZXWDrm addr:$src)>, Requires<[HasSSE41]>;
3942 def : Pat<(int_x86_sse41_pmovzxdq (vzmovl_v2i64 addr:$src)),
3943 (PMOVZXDQrm addr:$src)>, Requires<[HasSSE41]>;
3944 def : Pat<(int_x86_sse41_pmovzxdq (vzload_v2i64 addr:$src)),
3945 (PMOVZXDQrm addr:$src)>, Requires<[HasSSE41]>;
3948 multiclass SS41I_binop_rm_int4<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
3949 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3950 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3951 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
3953 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
3954 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3956 (IntId (bitconvert (v4i32 (scalar_to_vector (loadi32 addr:$src))))))]>,
3960 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3961 defm VPMOVSXBD : SS41I_binop_rm_int4<0x21, "vpmovsxbd", int_x86_sse41_pmovsxbd>,
3963 defm VPMOVSXWQ : SS41I_binop_rm_int4<0x24, "vpmovsxwq", int_x86_sse41_pmovsxwq>,
3965 defm VPMOVZXBD : SS41I_binop_rm_int4<0x31, "vpmovzxbd", int_x86_sse41_pmovzxbd>,
3967 defm VPMOVZXWQ : SS41I_binop_rm_int4<0x34, "vpmovzxwq", int_x86_sse41_pmovzxwq>,
3971 defm PMOVSXBD : SS41I_binop_rm_int4<0x21, "pmovsxbd", int_x86_sse41_pmovsxbd>;
3972 defm PMOVSXWQ : SS41I_binop_rm_int4<0x24, "pmovsxwq", int_x86_sse41_pmovsxwq>;
3973 defm PMOVZXBD : SS41I_binop_rm_int4<0x31, "pmovzxbd", int_x86_sse41_pmovzxbd>;
3974 defm PMOVZXWQ : SS41I_binop_rm_int4<0x34, "pmovzxwq", int_x86_sse41_pmovzxwq>;
3976 // Common patterns involving scalar load
3977 def : Pat<(int_x86_sse41_pmovsxbd (vzmovl_v4i32 addr:$src)),
3978 (PMOVSXBDrm addr:$src)>, Requires<[HasSSE41]>;
3979 def : Pat<(int_x86_sse41_pmovsxwq (vzmovl_v4i32 addr:$src)),
3980 (PMOVSXWQrm addr:$src)>, Requires<[HasSSE41]>;
3982 def : Pat<(int_x86_sse41_pmovzxbd (vzmovl_v4i32 addr:$src)),
3983 (PMOVZXBDrm addr:$src)>, Requires<[HasSSE41]>;
3984 def : Pat<(int_x86_sse41_pmovzxwq (vzmovl_v4i32 addr:$src)),
3985 (PMOVZXWQrm addr:$src)>, Requires<[HasSSE41]>;
3988 multiclass SS41I_binop_rm_int2<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
3989 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3990 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3991 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
3993 // Expecting a i16 load any extended to i32 value.
3994 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i16mem:$src),
3995 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3996 [(set VR128:$dst, (IntId (bitconvert
3997 (v4i32 (scalar_to_vector (loadi16_anyext addr:$src))))))]>,
4001 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4002 defm VPMOVSXBQ : SS41I_binop_rm_int2<0x22, "vpmovsxbq", int_x86_sse41_pmovsxbq>,
4004 defm VPMOVZXBQ : SS41I_binop_rm_int2<0x32, "vpmovzxbq", int_x86_sse41_pmovzxbq>,
4007 defm PMOVSXBQ : SS41I_binop_rm_int2<0x22, "pmovsxbq", int_x86_sse41_pmovsxbq>;
4008 defm PMOVZXBQ : SS41I_binop_rm_int2<0x32, "pmovzxbq", int_x86_sse41_pmovzxbq>;
4010 // Common patterns involving scalar load
4011 def : Pat<(int_x86_sse41_pmovsxbq
4012 (bitconvert (v4i32 (X86vzmovl
4013 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
4014 (PMOVSXBQrm addr:$src)>, Requires<[HasSSE41]>;
4016 def : Pat<(int_x86_sse41_pmovzxbq
4017 (bitconvert (v4i32 (X86vzmovl
4018 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
4019 (PMOVZXBQrm addr:$src)>, Requires<[HasSSE41]>;
4021 //===----------------------------------------------------------------------===//
4022 // SSE4.1 - Extract Instructions
4023 //===----------------------------------------------------------------------===//
4025 /// SS41I_binop_ext8 - SSE 4.1 extract 8 bits to 32 bit reg or 8 bit mem
4026 multiclass SS41I_extract8<bits<8> opc, string OpcodeStr> {
4027 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4028 (ins VR128:$src1, i32i8imm:$src2),
4029 !strconcat(OpcodeStr,
4030 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4031 [(set GR32:$dst, (X86pextrb (v16i8 VR128:$src1), imm:$src2))]>,
4033 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4034 (ins i8mem:$dst, VR128:$src1, i32i8imm:$src2),
4035 !strconcat(OpcodeStr,
4036 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4039 // There's an AssertZext in the way of writing the store pattern
4040 // (store (i8 (trunc (X86pextrb (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
4043 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4044 defm VPEXTRB : SS41I_extract8<0x14, "vpextrb">, VEX;
4045 def VPEXTRBrr64 : SS4AIi8<0x14, MRMDestReg, (outs GR64:$dst),
4046 (ins VR128:$src1, i32i8imm:$src2),
4047 "vpextrb\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>, OpSize, VEX;
4050 defm PEXTRB : SS41I_extract8<0x14, "pextrb">;
4053 /// SS41I_extract16 - SSE 4.1 extract 16 bits to memory destination
4054 multiclass SS41I_extract16<bits<8> opc, string OpcodeStr> {
4055 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4056 (ins i16mem:$dst, VR128:$src1, i32i8imm:$src2),
4057 !strconcat(OpcodeStr,
4058 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4061 // There's an AssertZext in the way of writing the store pattern
4062 // (store (i16 (trunc (X86pextrw (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
4065 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4066 defm VPEXTRW : SS41I_extract16<0x15, "vpextrw">, VEX;
4068 defm PEXTRW : SS41I_extract16<0x15, "pextrw">;
4071 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
4072 multiclass SS41I_extract32<bits<8> opc, string OpcodeStr> {
4073 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4074 (ins VR128:$src1, i32i8imm:$src2),
4075 !strconcat(OpcodeStr,
4076 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4078 (extractelt (v4i32 VR128:$src1), imm:$src2))]>, OpSize;
4079 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4080 (ins i32mem:$dst, VR128:$src1, i32i8imm:$src2),
4081 !strconcat(OpcodeStr,
4082 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4083 [(store (extractelt (v4i32 VR128:$src1), imm:$src2),
4084 addr:$dst)]>, OpSize;
4087 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4088 defm VPEXTRD : SS41I_extract32<0x16, "vpextrd">, VEX;
4090 defm PEXTRD : SS41I_extract32<0x16, "pextrd">;
4092 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
4093 multiclass SS41I_extract64<bits<8> opc, string OpcodeStr> {
4094 def rr : SS4AIi8<opc, MRMDestReg, (outs GR64:$dst),
4095 (ins VR128:$src1, i32i8imm:$src2),
4096 !strconcat(OpcodeStr,
4097 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4099 (extractelt (v2i64 VR128:$src1), imm:$src2))]>, OpSize, REX_W;
4100 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4101 (ins i64mem:$dst, VR128:$src1, i32i8imm:$src2),
4102 !strconcat(OpcodeStr,
4103 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4104 [(store (extractelt (v2i64 VR128:$src1), imm:$src2),
4105 addr:$dst)]>, OpSize, REX_W;
4108 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4109 defm VPEXTRQ : SS41I_extract64<0x16, "vpextrq">, VEX, VEX_W;
4111 defm PEXTRQ : SS41I_extract64<0x16, "pextrq">;
4113 /// SS41I_extractf32 - SSE 4.1 extract 32 bits fp value to int reg or memory
4115 multiclass SS41I_extractf32<bits<8> opc, string OpcodeStr> {
4116 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4117 (ins VR128:$src1, i32i8imm:$src2),
4118 !strconcat(OpcodeStr,
4119 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4121 (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2))]>,
4123 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4124 (ins f32mem:$dst, VR128:$src1, i32i8imm:$src2),
4125 !strconcat(OpcodeStr,
4126 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4127 [(store (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2),
4128 addr:$dst)]>, OpSize;
4131 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4132 defm VEXTRACTPS : SS41I_extractf32<0x17, "vextractps">, VEX;
4133 def VEXTRACTPSrr64 : SS4AIi8<0x17, MRMDestReg, (outs GR64:$dst),
4134 (ins VR128:$src1, i32i8imm:$src2),
4135 "vextractps \t{$src2, $src1, $dst|$dst, $src1, $src2}",
4138 defm EXTRACTPS : SS41I_extractf32<0x17, "extractps">;
4140 // Also match an EXTRACTPS store when the store is done as f32 instead of i32.
4141 def : Pat<(store (f32 (bitconvert (extractelt (bc_v4i32 (v4f32 VR128:$src1)),
4144 (EXTRACTPSmr addr:$dst, VR128:$src1, imm:$src2)>,
4145 Requires<[HasSSE41]>;
4147 //===----------------------------------------------------------------------===//
4148 // SSE4.1 - Insert Instructions
4149 //===----------------------------------------------------------------------===//
4151 multiclass SS41I_insert8<bits<8> opc, string asm, bit Is2Addr = 1> {
4152 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4153 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
4155 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4157 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4159 (X86pinsrb VR128:$src1, GR32:$src2, imm:$src3))]>, OpSize;
4160 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4161 (ins VR128:$src1, i8mem:$src2, i32i8imm:$src3),
4163 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4165 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4167 (X86pinsrb VR128:$src1, (extloadi8 addr:$src2),
4168 imm:$src3))]>, OpSize;
4171 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4172 defm VPINSRB : SS41I_insert8<0x20, "vpinsrb", 0>, VEX_4V;
4173 let Constraints = "$src1 = $dst" in
4174 defm PINSRB : SS41I_insert8<0x20, "pinsrb">;
4176 multiclass SS41I_insert32<bits<8> opc, string asm, bit Is2Addr = 1> {
4177 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4178 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
4180 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4182 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4184 (v4i32 (insertelt VR128:$src1, GR32:$src2, imm:$src3)))]>,
4186 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4187 (ins VR128:$src1, i32mem:$src2, i32i8imm:$src3),
4189 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4191 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4193 (v4i32 (insertelt VR128:$src1, (loadi32 addr:$src2),
4194 imm:$src3)))]>, OpSize;
4197 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4198 defm VPINSRD : SS41I_insert32<0x22, "vpinsrd", 0>, VEX_4V;
4199 let Constraints = "$src1 = $dst" in
4200 defm PINSRD : SS41I_insert32<0x22, "pinsrd">;
4202 multiclass SS41I_insert64<bits<8> opc, string asm, bit Is2Addr = 1> {
4203 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4204 (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
4206 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4208 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4210 (v2i64 (insertelt VR128:$src1, GR64:$src2, imm:$src3)))]>,
4212 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4213 (ins VR128:$src1, i64mem:$src2, i32i8imm:$src3),
4215 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4217 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4219 (v2i64 (insertelt VR128:$src1, (loadi64 addr:$src2),
4220 imm:$src3)))]>, OpSize;
4223 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4224 defm VPINSRQ : SS41I_insert64<0x22, "vpinsrq", 0>, VEX_4V, VEX_W;
4225 let Constraints = "$src1 = $dst" in
4226 defm PINSRQ : SS41I_insert64<0x22, "pinsrq">, REX_W;
4228 // insertps has a few different modes, there's the first two here below which
4229 // are optimized inserts that won't zero arbitrary elements in the destination
4230 // vector. The next one matches the intrinsic and could zero arbitrary elements
4231 // in the target vector.
4232 multiclass SS41I_insertf32<bits<8> opc, string asm, bit Is2Addr = 1> {
4233 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4234 (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4236 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4238 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4240 (X86insrtps VR128:$src1, VR128:$src2, imm:$src3))]>,
4242 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4243 (ins VR128:$src1, f32mem:$src2, i32i8imm:$src3),
4245 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4247 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4249 (X86insrtps VR128:$src1,
4250 (v4f32 (scalar_to_vector (loadf32 addr:$src2))),
4251 imm:$src3))]>, OpSize;
4254 let Constraints = "$src1 = $dst" in
4255 defm INSERTPS : SS41I_insertf32<0x21, "insertps">;
4256 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4257 defm VINSERTPS : SS41I_insertf32<0x21, "vinsertps", 0>, VEX_4V;
4259 def : Pat<(int_x86_sse41_insertps VR128:$src1, VR128:$src2, imm:$src3),
4260 (INSERTPSrr VR128:$src1, VR128:$src2, imm:$src3)>;
4262 //===----------------------------------------------------------------------===//
4263 // SSE4.1 - Round Instructions
4264 //===----------------------------------------------------------------------===//
4266 multiclass sse41_fp_unop_rm<bits<8> opcps, bits<8> opcpd,
4269 Intrinsic V2F64Int> {
4270 // Intrinsic operation, reg.
4271 // Vector intrinsic operation, reg
4272 def PSr_Int : SS4AIi8<opcps, MRMSrcReg,
4273 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
4274 !strconcat(OpcodeStr,
4275 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4276 [(set VR128:$dst, (V4F32Int VR128:$src1, imm:$src2))]>,
4279 // Vector intrinsic operation, mem
4280 def PSm_Int : Ii8<opcps, MRMSrcMem,
4281 (outs VR128:$dst), (ins f128mem:$src1, i32i8imm:$src2),
4282 !strconcat(OpcodeStr,
4283 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4285 (V4F32Int (memopv4f32 addr:$src1),imm:$src2))]>,
4287 Requires<[HasSSE41]>;
4289 // Vector intrinsic operation, reg
4290 def PDr_Int : SS4AIi8<opcpd, MRMSrcReg,
4291 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
4292 !strconcat(OpcodeStr,
4293 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4294 [(set VR128:$dst, (V2F64Int VR128:$src1, imm:$src2))]>,
4297 // Vector intrinsic operation, mem
4298 def PDm_Int : SS4AIi8<opcpd, MRMSrcMem,
4299 (outs VR128:$dst), (ins f128mem:$src1, i32i8imm:$src2),
4300 !strconcat(OpcodeStr,
4301 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4303 (V2F64Int (memopv2f64 addr:$src1),imm:$src2))]>,
4307 multiclass sse41_fp_unop_rm_avx_p<bits<8> opcps, bits<8> opcpd,
4308 RegisterClass RC, X86MemOperand x86memop, string OpcodeStr> {
4309 // Intrinsic operation, reg.
4310 // Vector intrinsic operation, reg
4311 def PSr : SS4AIi8<opcps, MRMSrcReg,
4312 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4313 !strconcat(OpcodeStr,
4314 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4317 // Vector intrinsic operation, mem
4318 def PSm : Ii8<opcps, MRMSrcMem,
4319 (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
4320 !strconcat(OpcodeStr,
4321 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4322 []>, TA, OpSize, Requires<[HasSSE41]>;
4324 // Vector intrinsic operation, reg
4325 def PDr : SS4AIi8<opcpd, MRMSrcReg,
4326 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4327 !strconcat(OpcodeStr,
4328 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4331 // Vector intrinsic operation, mem
4332 def PDm : SS4AIi8<opcpd, MRMSrcMem,
4333 (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
4334 !strconcat(OpcodeStr,
4335 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4339 multiclass sse41_fp_binop_rm<bits<8> opcss, bits<8> opcsd,
4342 Intrinsic F64Int, bit Is2Addr = 1> {
4343 // Intrinsic operation, reg.
4344 def SSr_Int : SS4AIi8<opcss, MRMSrcReg,
4345 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4347 !strconcat(OpcodeStr,
4348 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4349 !strconcat(OpcodeStr,
4350 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4351 [(set VR128:$dst, (F32Int VR128:$src1, VR128:$src2, imm:$src3))]>,
4354 // Intrinsic operation, mem.
4355 def SSm_Int : SS4AIi8<opcss, MRMSrcMem,
4356 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
4358 !strconcat(OpcodeStr,
4359 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4360 !strconcat(OpcodeStr,
4361 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4363 (F32Int VR128:$src1, sse_load_f32:$src2, imm:$src3))]>,
4366 // Intrinsic operation, reg.
4367 def SDr_Int : SS4AIi8<opcsd, MRMSrcReg,
4368 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4370 !strconcat(OpcodeStr,
4371 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4372 !strconcat(OpcodeStr,
4373 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4374 [(set VR128:$dst, (F64Int VR128:$src1, VR128:$src2, imm:$src3))]>,
4377 // Intrinsic operation, mem.
4378 def SDm_Int : SS4AIi8<opcsd, MRMSrcMem,
4379 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
4381 !strconcat(OpcodeStr,
4382 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4383 !strconcat(OpcodeStr,
4384 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4386 (F64Int VR128:$src1, sse_load_f64:$src2, imm:$src3))]>,
4390 multiclass sse41_fp_binop_rm_avx_s<bits<8> opcss, bits<8> opcsd,
4392 // Intrinsic operation, reg.
4393 def SSr : SS4AIi8<opcss, MRMSrcReg,
4394 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4395 !strconcat(OpcodeStr,
4396 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4399 // Intrinsic operation, mem.
4400 def SSm : SS4AIi8<opcss, MRMSrcMem,
4401 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
4402 !strconcat(OpcodeStr,
4403 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4406 // Intrinsic operation, reg.
4407 def SDr : SS4AIi8<opcsd, MRMSrcReg,
4408 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4409 !strconcat(OpcodeStr,
4410 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4413 // Intrinsic operation, mem.
4414 def SDm : SS4AIi8<opcsd, MRMSrcMem,
4415 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
4416 !strconcat(OpcodeStr,
4417 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4421 // FP round - roundss, roundps, roundsd, roundpd
4422 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4424 defm VROUND : sse41_fp_unop_rm<0x08, 0x09, "vround",
4425 int_x86_sse41_round_ps, int_x86_sse41_round_pd>,
4427 defm VROUND : sse41_fp_binop_rm<0x0A, 0x0B, "vround",
4428 int_x86_sse41_round_ss, int_x86_sse41_round_sd,
4430 // Instructions for the assembler
4431 defm VROUND : sse41_fp_unop_rm_avx_p<0x08, 0x09, VR128, f128mem, "vround">,
4433 defm VROUNDY : sse41_fp_unop_rm_avx_p<0x08, 0x09, VR256, f256mem, "vround">,
4435 defm VROUND : sse41_fp_binop_rm_avx_s<0x0A, 0x0B, "vround">, VEX_4V;
4438 defm ROUND : sse41_fp_unop_rm<0x08, 0x09, "round",
4439 int_x86_sse41_round_ps, int_x86_sse41_round_pd>;
4440 let Constraints = "$src1 = $dst" in
4441 defm ROUND : sse41_fp_binop_rm<0x0A, 0x0B, "round",
4442 int_x86_sse41_round_ss, int_x86_sse41_round_sd>;
4444 //===----------------------------------------------------------------------===//
4445 // SSE4.1 - Packed Bit Test
4446 //===----------------------------------------------------------------------===//
4448 // ptest instruction we'll lower to this in X86ISelLowering primarily from
4449 // the intel intrinsic that corresponds to this.
4450 let Defs = [EFLAGS], isAsmParserOnly = 1, Predicates = [HasAVX] in {
4451 def VPTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
4452 "vptest\t{$src2, $src1|$src1, $src2}",
4453 [(set EFLAGS, (X86ptest VR128:$src1, VR128:$src2))]>,
4455 def VPTESTYrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR256:$src1, VR256:$src2),
4456 "vptest\t{$src2, $src1|$src1, $src2}", []>, OpSize, VEX;
4458 def VPTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, i128mem:$src2),
4459 "vptest\t{$src2, $src1|$src1, $src2}",
4460 [(set EFLAGS, (X86ptest VR128:$src1, (load addr:$src2)))]>,
4462 def VPTESTYrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR256:$src1, i256mem:$src2),
4463 "vptest\t{$src2, $src1|$src1, $src2}", []>, OpSize, VEX;
4466 let Defs = [EFLAGS] in {
4467 def PTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
4468 "ptest \t{$src2, $src1|$src1, $src2}",
4469 [(set EFLAGS, (X86ptest VR128:$src1, VR128:$src2))]>,
4471 def PTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, i128mem:$src2),
4472 "ptest \t{$src2, $src1|$src1, $src2}",
4473 [(set EFLAGS, (X86ptest VR128:$src1, (load addr:$src2)))]>,
4477 // The bit test instructions below are AVX only
4478 multiclass avx_bittest<bits<8> opc, string OpcodeStr, RegisterClass RC,
4479 X86MemOperand x86memop> {
4480 def rr : SS48I<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
4481 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4483 def rm : SS48I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
4484 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4488 let Defs = [EFLAGS], isAsmParserOnly = 1, Predicates = [HasAVX] in {
4489 defm VTESTPS : avx_bittest<0x0E, "vtestps", VR128, f128mem>;
4490 defm VTESTPSY : avx_bittest<0x0E, "vtestps", VR256, f256mem>;
4491 defm VTESTPD : avx_bittest<0x0F, "vtestpd", VR128, f128mem>;
4492 defm VTESTPDY : avx_bittest<0x0F, "vtestpd", VR256, f256mem>;
4495 //===----------------------------------------------------------------------===//
4496 // SSE4.1 - Misc Instructions
4497 //===----------------------------------------------------------------------===//
4499 // SS41I_unop_rm_int_v16 - SSE 4.1 unary operator whose type is v8i16.
4500 multiclass SS41I_unop_rm_int_v16<bits<8> opc, string OpcodeStr,
4501 Intrinsic IntId128> {
4502 def rr128 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4504 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4505 [(set VR128:$dst, (IntId128 VR128:$src))]>, OpSize;
4506 def rm128 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4508 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4511 (bitconvert (memopv8i16 addr:$src))))]>, OpSize;
4514 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4515 defm VPHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "vphminposuw",
4516 int_x86_sse41_phminposuw>, VEX;
4517 defm PHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "phminposuw",
4518 int_x86_sse41_phminposuw>;
4520 /// SS41I_binop_rm_int - Simple SSE 4.1 binary operator
4521 multiclass SS41I_binop_rm_int<bits<8> opc, string OpcodeStr,
4522 Intrinsic IntId128, bit Is2Addr = 1> {
4523 let isCommutable = 1 in
4524 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4525 (ins VR128:$src1, VR128:$src2),
4527 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4528 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4529 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>, OpSize;
4530 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4531 (ins VR128:$src1, i128mem:$src2),
4533 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4534 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4536 (IntId128 VR128:$src1,
4537 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
4540 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4541 let isCommutable = 0 in
4542 defm VPACKUSDW : SS41I_binop_rm_int<0x2B, "vpackusdw", int_x86_sse41_packusdw,
4544 defm VPCMPEQQ : SS41I_binop_rm_int<0x29, "vpcmpeqq", int_x86_sse41_pcmpeqq,
4546 defm VPMINSB : SS41I_binop_rm_int<0x38, "vpminsb", int_x86_sse41_pminsb,
4548 defm VPMINSD : SS41I_binop_rm_int<0x39, "vpminsd", int_x86_sse41_pminsd,
4550 defm VPMINUD : SS41I_binop_rm_int<0x3B, "vpminud", int_x86_sse41_pminud,
4552 defm VPMINUW : SS41I_binop_rm_int<0x3A, "vpminuw", int_x86_sse41_pminuw,
4554 defm VPMAXSB : SS41I_binop_rm_int<0x3C, "vpmaxsb", int_x86_sse41_pmaxsb,
4556 defm VPMAXSD : SS41I_binop_rm_int<0x3D, "vpmaxsd", int_x86_sse41_pmaxsd,
4558 defm VPMAXUD : SS41I_binop_rm_int<0x3F, "vpmaxud", int_x86_sse41_pmaxud,
4560 defm VPMAXUW : SS41I_binop_rm_int<0x3E, "vpmaxuw", int_x86_sse41_pmaxuw,
4562 defm VPMULDQ : SS41I_binop_rm_int<0x28, "vpmuldq", int_x86_sse41_pmuldq,
4566 let Constraints = "$src1 = $dst" in {
4567 let isCommutable = 0 in
4568 defm PACKUSDW : SS41I_binop_rm_int<0x2B, "packusdw", int_x86_sse41_packusdw>;
4569 defm PCMPEQQ : SS41I_binop_rm_int<0x29, "pcmpeqq", int_x86_sse41_pcmpeqq>;
4570 defm PMINSB : SS41I_binop_rm_int<0x38, "pminsb", int_x86_sse41_pminsb>;
4571 defm PMINSD : SS41I_binop_rm_int<0x39, "pminsd", int_x86_sse41_pminsd>;
4572 defm PMINUD : SS41I_binop_rm_int<0x3B, "pminud", int_x86_sse41_pminud>;
4573 defm PMINUW : SS41I_binop_rm_int<0x3A, "pminuw", int_x86_sse41_pminuw>;
4574 defm PMAXSB : SS41I_binop_rm_int<0x3C, "pmaxsb", int_x86_sse41_pmaxsb>;
4575 defm PMAXSD : SS41I_binop_rm_int<0x3D, "pmaxsd", int_x86_sse41_pmaxsd>;
4576 defm PMAXUD : SS41I_binop_rm_int<0x3F, "pmaxud", int_x86_sse41_pmaxud>;
4577 defm PMAXUW : SS41I_binop_rm_int<0x3E, "pmaxuw", int_x86_sse41_pmaxuw>;
4578 defm PMULDQ : SS41I_binop_rm_int<0x28, "pmuldq", int_x86_sse41_pmuldq>;
4581 def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, VR128:$src2)),
4582 (PCMPEQQrr VR128:$src1, VR128:$src2)>;
4583 def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, (memop addr:$src2))),
4584 (PCMPEQQrm VR128:$src1, addr:$src2)>;
4586 /// SS48I_binop_rm - Simple SSE41 binary operator.
4587 multiclass SS48I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
4588 ValueType OpVT, bit Is2Addr = 1> {
4589 let isCommutable = 1 in
4590 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4591 (ins VR128:$src1, VR128:$src2),
4593 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4594 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4595 [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]>,
4597 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4598 (ins VR128:$src1, i128mem:$src2),
4600 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4601 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4602 [(set VR128:$dst, (OpNode VR128:$src1,
4603 (bc_v4i32 (memopv2i64 addr:$src2))))]>,
4607 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4608 defm VPMULLD : SS48I_binop_rm<0x40, "vpmulld", mul, v4i32, 0>, VEX_4V;
4609 let Constraints = "$src1 = $dst" in
4610 defm PMULLD : SS48I_binop_rm<0x40, "pmulld", mul, v4i32>;
4612 /// SS41I_binop_rmi_int - SSE 4.1 binary operator with 8-bit immediate
4613 multiclass SS41I_binop_rmi_int<bits<8> opc, string OpcodeStr,
4614 Intrinsic IntId, RegisterClass RC, PatFrag memop_frag,
4615 X86MemOperand x86memop, bit Is2Addr = 1> {
4616 let isCommutable = 1 in
4617 def rri : SS4AIi8<opc, MRMSrcReg, (outs RC:$dst),
4618 (ins RC:$src1, RC:$src2, i32i8imm:$src3),
4620 !strconcat(OpcodeStr,
4621 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4622 !strconcat(OpcodeStr,
4623 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4624 [(set RC:$dst, (IntId RC:$src1, RC:$src2, imm:$src3))]>,
4626 def rmi : SS4AIi8<opc, MRMSrcMem, (outs RC:$dst),
4627 (ins RC:$src1, x86memop:$src2, i32i8imm:$src3),
4629 !strconcat(OpcodeStr,
4630 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4631 !strconcat(OpcodeStr,
4632 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4635 (bitconvert (memop_frag addr:$src2)), imm:$src3))]>,
4639 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4640 let isCommutable = 0 in {
4641 defm VBLENDPS : SS41I_binop_rmi_int<0x0C, "vblendps", int_x86_sse41_blendps,
4642 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4643 defm VBLENDPD : SS41I_binop_rmi_int<0x0D, "vblendpd", int_x86_sse41_blendpd,
4644 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4645 let Pattern = []<dag> in {
4646 defm VBLENDPSY : SS41I_binop_rmi_int<0x0C, "vblendps", int_x86_sse41_blendps,
4647 VR256, memopv32i8, i256mem, 0>, VEX_4V;
4648 defm VBLENDPDY : SS41I_binop_rmi_int<0x0D, "vblendpd", int_x86_sse41_blendpd,
4649 VR256, memopv32i8, i256mem, 0>, VEX_4V;
4651 defm VPBLENDW : SS41I_binop_rmi_int<0x0E, "vpblendw", int_x86_sse41_pblendw,
4652 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4653 defm VMPSADBW : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_sse41_mpsadbw,
4654 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4656 defm VDPPS : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_sse41_dpps,
4657 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4658 defm VDPPD : SS41I_binop_rmi_int<0x41, "vdppd", int_x86_sse41_dppd,
4659 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4660 let Pattern = []<dag> in
4661 defm VDPPSY : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_sse41_dpps,
4662 VR256, memopv32i8, i256mem, 0>, VEX_4V;
4665 let Constraints = "$src1 = $dst" in {
4666 let isCommutable = 0 in {
4667 defm BLENDPS : SS41I_binop_rmi_int<0x0C, "blendps", int_x86_sse41_blendps,
4668 VR128, memopv16i8, i128mem>;
4669 defm BLENDPD : SS41I_binop_rmi_int<0x0D, "blendpd", int_x86_sse41_blendpd,
4670 VR128, memopv16i8, i128mem>;
4671 defm PBLENDW : SS41I_binop_rmi_int<0x0E, "pblendw", int_x86_sse41_pblendw,
4672 VR128, memopv16i8, i128mem>;
4673 defm MPSADBW : SS41I_binop_rmi_int<0x42, "mpsadbw", int_x86_sse41_mpsadbw,
4674 VR128, memopv16i8, i128mem>;
4676 defm DPPS : SS41I_binop_rmi_int<0x40, "dpps", int_x86_sse41_dpps,
4677 VR128, memopv16i8, i128mem>;
4678 defm DPPD : SS41I_binop_rmi_int<0x41, "dppd", int_x86_sse41_dppd,
4679 VR128, memopv16i8, i128mem>;
4682 /// SS41I_quaternary_int_avx - AVX SSE 4.1 with 4 operators
4683 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4684 multiclass SS41I_quaternary_int_avx<bits<8> opc, string OpcodeStr,
4685 RegisterClass RC, X86MemOperand x86memop> {
4686 def rr : I<opc, MRMSrcReg, (outs RC:$dst),
4687 (ins RC:$src1, RC:$src2, RC:$src3),
4688 !strconcat(OpcodeStr,
4689 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4690 [], SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
4692 def rm : I<opc, MRMSrcMem, (outs RC:$dst),
4693 (ins RC:$src1, x86memop:$src2, RC:$src3),
4694 !strconcat(OpcodeStr,
4695 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4696 [], SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
4700 defm VBLENDVPD : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR128, i128mem>;
4701 defm VBLENDVPS : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR128, i128mem>;
4702 defm VBLENDVPDY : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR256, i256mem>;
4703 defm VBLENDVPSY : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR256, i256mem>;
4705 defm VPBLENDVB : SS41I_quaternary_int_avx<0x4C, "vpblendvb", VR128, i128mem>;
4707 /// SS41I_ternary_int - SSE 4.1 ternary operator
4708 let Uses = [XMM0], Constraints = "$src1 = $dst" in {
4709 multiclass SS41I_ternary_int<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4710 def rr0 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4711 (ins VR128:$src1, VR128:$src2),
4712 !strconcat(OpcodeStr,
4713 "\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}"),
4714 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2, XMM0))]>,
4717 def rm0 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4718 (ins VR128:$src1, i128mem:$src2),
4719 !strconcat(OpcodeStr,
4720 "\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}"),
4723 (bitconvert (memopv16i8 addr:$src2)), XMM0))]>, OpSize;
4727 defm BLENDVPD : SS41I_ternary_int<0x15, "blendvpd", int_x86_sse41_blendvpd>;
4728 defm BLENDVPS : SS41I_ternary_int<0x14, "blendvps", int_x86_sse41_blendvps>;
4729 defm PBLENDVB : SS41I_ternary_int<0x10, "pblendvb", int_x86_sse41_pblendvb>;
4731 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4732 def VMOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4733 "vmovntdqa\t{$src, $dst|$dst, $src}",
4734 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
4736 def MOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4737 "movntdqa\t{$src, $dst|$dst, $src}",
4738 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
4741 //===----------------------------------------------------------------------===//
4742 // SSE4.2 - Compare Instructions
4743 //===----------------------------------------------------------------------===//
4745 /// SS42I_binop_rm_int - Simple SSE 4.2 binary operator
4746 multiclass SS42I_binop_rm_int<bits<8> opc, string OpcodeStr,
4747 Intrinsic IntId128, bit Is2Addr = 1> {
4748 def rr : SS428I<opc, MRMSrcReg, (outs VR128:$dst),
4749 (ins VR128:$src1, VR128:$src2),
4751 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4752 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4753 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
4755 def rm : SS428I<opc, MRMSrcMem, (outs VR128:$dst),
4756 (ins VR128:$src1, i128mem:$src2),
4758 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4759 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4761 (IntId128 VR128:$src1,
4762 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
4765 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4766 defm VPCMPGTQ : SS42I_binop_rm_int<0x37, "vpcmpgtq", int_x86_sse42_pcmpgtq,
4768 let Constraints = "$src1 = $dst" in
4769 defm PCMPGTQ : SS42I_binop_rm_int<0x37, "pcmpgtq", int_x86_sse42_pcmpgtq>;
4771 def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, VR128:$src2)),
4772 (PCMPGTQrr VR128:$src1, VR128:$src2)>;
4773 def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, (memop addr:$src2))),
4774 (PCMPGTQrm VR128:$src1, addr:$src2)>;
4776 //===----------------------------------------------------------------------===//
4777 // SSE4.2 - String/text Processing Instructions
4778 //===----------------------------------------------------------------------===//
4780 // Packed Compare Implicit Length Strings, Return Mask
4781 let Defs = [EFLAGS], usesCustomInserter = 1 in {
4782 def PCMPISTRM128REG : SS42AI<0, Pseudo, (outs VR128:$dst),
4783 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
4784 "#PCMPISTRM128rr PSEUDO!",
4785 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128 VR128:$src1, VR128:$src2,
4786 imm:$src3))]>, OpSize;
4787 def PCMPISTRM128MEM : SS42AI<0, Pseudo, (outs VR128:$dst),
4788 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
4789 "#PCMPISTRM128rm PSEUDO!",
4790 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128
4791 VR128:$src1, (load addr:$src2), imm:$src3))]>, OpSize;
4794 let Defs = [XMM0, EFLAGS], isAsmParserOnly = 1,
4795 Predicates = [HasAVX] in {
4796 def VPCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
4797 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
4798 "vpcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize, VEX;
4799 def VPCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
4800 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
4801 "vpcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize, VEX;
4804 let Defs = [XMM0, EFLAGS] in {
4805 def PCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
4806 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
4807 "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
4808 def PCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
4809 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
4810 "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
4813 // Packed Compare Explicit Length Strings, Return Mask
4814 let Defs = [EFLAGS], Uses = [EAX, EDX], usesCustomInserter = 1 in {
4815 def PCMPESTRM128REG : SS42AI<0, Pseudo, (outs VR128:$dst),
4816 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
4817 "#PCMPESTRM128rr PSEUDO!",
4819 (int_x86_sse42_pcmpestrm128
4820 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5))]>, OpSize;
4822 def PCMPESTRM128MEM : SS42AI<0, Pseudo, (outs VR128:$dst),
4823 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
4824 "#PCMPESTRM128rm PSEUDO!",
4825 [(set VR128:$dst, (int_x86_sse42_pcmpestrm128
4826 VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5))]>,
4830 let isAsmParserOnly = 1, Predicates = [HasAVX],
4831 Defs = [XMM0, EFLAGS], Uses = [EAX, EDX] in {
4832 def VPCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
4833 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
4834 "vpcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize, VEX;
4835 def VPCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
4836 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
4837 "vpcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize, VEX;
4840 let Defs = [XMM0, EFLAGS], Uses = [EAX, EDX] in {
4841 def PCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
4842 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
4843 "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
4844 def PCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
4845 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
4846 "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
4849 // Packed Compare Implicit Length Strings, Return Index
4850 let Defs = [ECX, EFLAGS] in {
4851 multiclass SS42AI_pcmpistri<Intrinsic IntId128, string asm = "pcmpistri"> {
4852 def rr : SS42AI<0x63, MRMSrcReg, (outs),
4853 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
4854 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
4855 [(set ECX, (IntId128 VR128:$src1, VR128:$src2, imm:$src3)),
4856 (implicit EFLAGS)]>, OpSize;
4857 def rm : SS42AI<0x63, MRMSrcMem, (outs),
4858 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
4859 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
4860 [(set ECX, (IntId128 VR128:$src1, (load addr:$src2), imm:$src3)),
4861 (implicit EFLAGS)]>, OpSize;
4865 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4866 defm VPCMPISTRI : SS42AI_pcmpistri<int_x86_sse42_pcmpistri128, "vpcmpistri">,
4868 defm VPCMPISTRIA : SS42AI_pcmpistri<int_x86_sse42_pcmpistria128, "vpcmpistri">,
4870 defm VPCMPISTRIC : SS42AI_pcmpistri<int_x86_sse42_pcmpistric128, "vpcmpistri">,
4872 defm VPCMPISTRIO : SS42AI_pcmpistri<int_x86_sse42_pcmpistrio128, "vpcmpistri">,
4874 defm VPCMPISTRIS : SS42AI_pcmpistri<int_x86_sse42_pcmpistris128, "vpcmpistri">,
4876 defm VPCMPISTRIZ : SS42AI_pcmpistri<int_x86_sse42_pcmpistriz128, "vpcmpistri">,
4880 defm PCMPISTRI : SS42AI_pcmpistri<int_x86_sse42_pcmpistri128>;
4881 defm PCMPISTRIA : SS42AI_pcmpistri<int_x86_sse42_pcmpistria128>;
4882 defm PCMPISTRIC : SS42AI_pcmpistri<int_x86_sse42_pcmpistric128>;
4883 defm PCMPISTRIO : SS42AI_pcmpistri<int_x86_sse42_pcmpistrio128>;
4884 defm PCMPISTRIS : SS42AI_pcmpistri<int_x86_sse42_pcmpistris128>;
4885 defm PCMPISTRIZ : SS42AI_pcmpistri<int_x86_sse42_pcmpistriz128>;
4887 // Packed Compare Explicit Length Strings, Return Index
4888 let Defs = [ECX, EFLAGS], Uses = [EAX, EDX] in {
4889 multiclass SS42AI_pcmpestri<Intrinsic IntId128, string asm = "pcmpestri"> {
4890 def rr : SS42AI<0x61, MRMSrcReg, (outs),
4891 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
4892 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
4893 [(set ECX, (IntId128 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5)),
4894 (implicit EFLAGS)]>, OpSize;
4895 def rm : SS42AI<0x61, MRMSrcMem, (outs),
4896 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
4897 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
4899 (IntId128 VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5)),
4900 (implicit EFLAGS)]>, OpSize;
4904 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4905 defm VPCMPESTRI : SS42AI_pcmpestri<int_x86_sse42_pcmpestri128, "vpcmpestri">,
4907 defm VPCMPESTRIA : SS42AI_pcmpestri<int_x86_sse42_pcmpestria128, "vpcmpestri">,
4909 defm VPCMPESTRIC : SS42AI_pcmpestri<int_x86_sse42_pcmpestric128, "vpcmpestri">,
4911 defm VPCMPESTRIO : SS42AI_pcmpestri<int_x86_sse42_pcmpestrio128, "vpcmpestri">,
4913 defm VPCMPESTRIS : SS42AI_pcmpestri<int_x86_sse42_pcmpestris128, "vpcmpestri">,
4915 defm VPCMPESTRIZ : SS42AI_pcmpestri<int_x86_sse42_pcmpestriz128, "vpcmpestri">,
4919 defm PCMPESTRI : SS42AI_pcmpestri<int_x86_sse42_pcmpestri128>;
4920 defm PCMPESTRIA : SS42AI_pcmpestri<int_x86_sse42_pcmpestria128>;
4921 defm PCMPESTRIC : SS42AI_pcmpestri<int_x86_sse42_pcmpestric128>;
4922 defm PCMPESTRIO : SS42AI_pcmpestri<int_x86_sse42_pcmpestrio128>;
4923 defm PCMPESTRIS : SS42AI_pcmpestri<int_x86_sse42_pcmpestris128>;
4924 defm PCMPESTRIZ : SS42AI_pcmpestri<int_x86_sse42_pcmpestriz128>;
4926 //===----------------------------------------------------------------------===//
4927 // SSE4.2 - CRC Instructions
4928 //===----------------------------------------------------------------------===//
4930 // No CRC instructions have AVX equivalents
4932 // crc intrinsic instruction
4933 // This set of instructions are only rm, the only difference is the size
4935 let Constraints = "$src1 = $dst" in {
4936 def CRC32m8 : SS42FI<0xF0, MRMSrcMem, (outs GR32:$dst),
4937 (ins GR32:$src1, i8mem:$src2),
4938 "crc32{b} \t{$src2, $src1|$src1, $src2}",
4940 (int_x86_sse42_crc32_8 GR32:$src1,
4941 (load addr:$src2)))]>;
4942 def CRC32r8 : SS42FI<0xF0, MRMSrcReg, (outs GR32:$dst),
4943 (ins GR32:$src1, GR8:$src2),
4944 "crc32{b} \t{$src2, $src1|$src1, $src2}",
4946 (int_x86_sse42_crc32_8 GR32:$src1, GR8:$src2))]>;
4947 def CRC32m16 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst),
4948 (ins GR32:$src1, i16mem:$src2),
4949 "crc32{w} \t{$src2, $src1|$src1, $src2}",
4951 (int_x86_sse42_crc32_16 GR32:$src1,
4952 (load addr:$src2)))]>,
4954 def CRC32r16 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst),
4955 (ins GR32:$src1, GR16:$src2),
4956 "crc32{w} \t{$src2, $src1|$src1, $src2}",
4958 (int_x86_sse42_crc32_16 GR32:$src1, GR16:$src2))]>,
4960 def CRC32m32 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst),
4961 (ins GR32:$src1, i32mem:$src2),
4962 "crc32{l} \t{$src2, $src1|$src1, $src2}",
4964 (int_x86_sse42_crc32_32 GR32:$src1,
4965 (load addr:$src2)))]>;
4966 def CRC32r32 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst),
4967 (ins GR32:$src1, GR32:$src2),
4968 "crc32{l} \t{$src2, $src1|$src1, $src2}",
4970 (int_x86_sse42_crc32_32 GR32:$src1, GR32:$src2))]>;
4971 def CRC64m8 : SS42FI<0xF0, MRMSrcMem, (outs GR64:$dst),
4972 (ins GR64:$src1, i8mem:$src2),
4973 "crc32{b} \t{$src2, $src1|$src1, $src2}",
4975 (int_x86_sse42_crc64_8 GR64:$src1,
4976 (load addr:$src2)))]>,
4978 def CRC64r8 : SS42FI<0xF0, MRMSrcReg, (outs GR64:$dst),
4979 (ins GR64:$src1, GR8:$src2),
4980 "crc32{b} \t{$src2, $src1|$src1, $src2}",
4982 (int_x86_sse42_crc64_8 GR64:$src1, GR8:$src2))]>,
4984 def CRC64m64 : SS42FI<0xF1, MRMSrcMem, (outs GR64:$dst),
4985 (ins GR64:$src1, i64mem:$src2),
4986 "crc32{q} \t{$src2, $src1|$src1, $src2}",
4988 (int_x86_sse42_crc64_64 GR64:$src1,
4989 (load addr:$src2)))]>,
4991 def CRC64r64 : SS42FI<0xF1, MRMSrcReg, (outs GR64:$dst),
4992 (ins GR64:$src1, GR64:$src2),
4993 "crc32{q} \t{$src2, $src1|$src1, $src2}",
4995 (int_x86_sse42_crc64_64 GR64:$src1, GR64:$src2))]>,
4999 //===----------------------------------------------------------------------===//
5000 // AES-NI Instructions
5001 //===----------------------------------------------------------------------===//
5003 multiclass AESI_binop_rm_int<bits<8> opc, string OpcodeStr,
5004 Intrinsic IntId128, bit Is2Addr = 1> {
5005 def rr : AES8I<opc, MRMSrcReg, (outs VR128:$dst),
5006 (ins VR128:$src1, VR128:$src2),
5008 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5009 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5010 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
5012 def rm : AES8I<opc, MRMSrcMem, (outs VR128:$dst),
5013 (ins VR128:$src1, i128mem:$src2),
5015 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5016 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5018 (IntId128 VR128:$src1,
5019 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
5022 // Perform One Round of an AES Encryption/Decryption Flow
5023 let isAsmParserOnly = 1, Predicates = [HasAVX, HasAES] in {
5024 defm VAESENC : AESI_binop_rm_int<0xDC, "vaesenc",
5025 int_x86_aesni_aesenc, 0>, VEX_4V;
5026 defm VAESENCLAST : AESI_binop_rm_int<0xDD, "vaesenclast",
5027 int_x86_aesni_aesenclast, 0>, VEX_4V;
5028 defm VAESDEC : AESI_binop_rm_int<0xDE, "vaesdec",
5029 int_x86_aesni_aesdec, 0>, VEX_4V;
5030 defm VAESDECLAST : AESI_binop_rm_int<0xDF, "vaesdeclast",
5031 int_x86_aesni_aesdeclast, 0>, VEX_4V;
5034 let Constraints = "$src1 = $dst" in {
5035 defm AESENC : AESI_binop_rm_int<0xDC, "aesenc",
5036 int_x86_aesni_aesenc>;
5037 defm AESENCLAST : AESI_binop_rm_int<0xDD, "aesenclast",
5038 int_x86_aesni_aesenclast>;
5039 defm AESDEC : AESI_binop_rm_int<0xDE, "aesdec",
5040 int_x86_aesni_aesdec>;
5041 defm AESDECLAST : AESI_binop_rm_int<0xDF, "aesdeclast",
5042 int_x86_aesni_aesdeclast>;
5045 def : Pat<(v2i64 (int_x86_aesni_aesenc VR128:$src1, VR128:$src2)),
5046 (AESENCrr VR128:$src1, VR128:$src2)>;
5047 def : Pat<(v2i64 (int_x86_aesni_aesenc VR128:$src1, (memop addr:$src2))),
5048 (AESENCrm VR128:$src1, addr:$src2)>;
5049 def : Pat<(v2i64 (int_x86_aesni_aesenclast VR128:$src1, VR128:$src2)),
5050 (AESENCLASTrr VR128:$src1, VR128:$src2)>;
5051 def : Pat<(v2i64 (int_x86_aesni_aesenclast VR128:$src1, (memop addr:$src2))),
5052 (AESENCLASTrm VR128:$src1, addr:$src2)>;
5053 def : Pat<(v2i64 (int_x86_aesni_aesdec VR128:$src1, VR128:$src2)),
5054 (AESDECrr VR128:$src1, VR128:$src2)>;
5055 def : Pat<(v2i64 (int_x86_aesni_aesdec VR128:$src1, (memop addr:$src2))),
5056 (AESDECrm VR128:$src1, addr:$src2)>;
5057 def : Pat<(v2i64 (int_x86_aesni_aesdeclast VR128:$src1, VR128:$src2)),
5058 (AESDECLASTrr VR128:$src1, VR128:$src2)>;
5059 def : Pat<(v2i64 (int_x86_aesni_aesdeclast VR128:$src1, (memop addr:$src2))),
5060 (AESDECLASTrm VR128:$src1, addr:$src2)>;
5062 // Perform the AES InvMixColumn Transformation
5063 let isAsmParserOnly = 1, Predicates = [HasAVX, HasAES] in {
5064 def VAESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
5066 "vaesimc\t{$src1, $dst|$dst, $src1}",
5068 (int_x86_aesni_aesimc VR128:$src1))]>,
5070 def VAESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
5071 (ins i128mem:$src1),
5072 "vaesimc\t{$src1, $dst|$dst, $src1}",
5074 (int_x86_aesni_aesimc (bitconvert (memopv2i64 addr:$src1))))]>,
5077 def AESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
5079 "aesimc\t{$src1, $dst|$dst, $src1}",
5081 (int_x86_aesni_aesimc VR128:$src1))]>,
5083 def AESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
5084 (ins i128mem:$src1),
5085 "aesimc\t{$src1, $dst|$dst, $src1}",
5087 (int_x86_aesni_aesimc (bitconvert (memopv2i64 addr:$src1))))]>,
5090 // AES Round Key Generation Assist
5091 let isAsmParserOnly = 1, Predicates = [HasAVX, HasAES] in {
5092 def VAESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
5093 (ins VR128:$src1, i8imm:$src2),
5094 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5096 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
5098 def VAESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
5099 (ins i128mem:$src1, i8imm:$src2),
5100 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5102 (int_x86_aesni_aeskeygenassist (bitconvert (memopv2i64 addr:$src1)),
5106 def AESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
5107 (ins VR128:$src1, i8imm:$src2),
5108 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5110 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
5112 def AESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
5113 (ins i128mem:$src1, i8imm:$src2),
5114 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5116 (int_x86_aesni_aeskeygenassist (bitconvert (memopv2i64 addr:$src1)),
5120 //===----------------------------------------------------------------------===//
5121 // CLMUL Instructions
5122 //===----------------------------------------------------------------------===//
5124 // Only the AVX version of CLMUL instructions are described here.
5126 // Carry-less Multiplication instructions
5127 let isAsmParserOnly = 1 in {
5128 def VPCLMULQDQrr : CLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
5129 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5130 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5133 def VPCLMULQDQrm : CLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
5134 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5135 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5139 multiclass avx_vpclmul<string asm> {
5140 def rr : I<0, Pseudo, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
5141 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5144 def rm : I<0, Pseudo, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
5145 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5148 defm VPCLMULHQHQDQ : avx_vpclmul<"vpclmulhqhqdq">;
5149 defm VPCLMULHQLQDQ : avx_vpclmul<"vpclmulhqlqdq">;
5150 defm VPCLMULLQHQDQ : avx_vpclmul<"vpclmullqhqdq">;
5151 defm VPCLMULLQLQDQ : avx_vpclmul<"vpclmullqlqdq">;
5153 } // isAsmParserOnly
5155 //===----------------------------------------------------------------------===//
5157 //===----------------------------------------------------------------------===//
5159 let isAsmParserOnly = 1 in {
5161 // Load from memory and broadcast to all elements of the destination operand
5162 class avx_broadcast<bits<8> opc, string OpcodeStr, RegisterClass RC,
5163 X86MemOperand x86memop> :
5164 AVX8I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
5165 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>, VEX;
5167 def VBROADCASTSS : avx_broadcast<0x18, "vbroadcastss", VR128, f32mem>;
5168 def VBROADCASTSSY : avx_broadcast<0x18, "vbroadcastss", VR256, f32mem>;
5169 def VBROADCASTSD : avx_broadcast<0x19, "vbroadcastsd", VR256, f64mem>;
5170 def VBROADCASTF128 : avx_broadcast<0x1A, "vbroadcastf128", VR256, f128mem>;
5172 // Insert packed floating-point values
5173 def VINSERTF128rr : AVXAIi8<0x18, MRMSrcReg, (outs VR256:$dst),
5174 (ins VR256:$src1, VR128:$src2, i8imm:$src3),
5175 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5177 def VINSERTF128rm : AVXAIi8<0x18, MRMSrcMem, (outs VR256:$dst),
5178 (ins VR256:$src1, f128mem:$src2, i8imm:$src3),
5179 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5182 // Extract packed floating-point values
5183 def VEXTRACTF128rr : AVXAIi8<0x19, MRMDestReg, (outs VR128:$dst),
5184 (ins VR256:$src1, i8imm:$src2),
5185 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5187 def VEXTRACTF128mr : AVXAIi8<0x19, MRMDestMem, (outs),
5188 (ins f128mem:$dst, VR256:$src1, i8imm:$src2),
5189 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5192 // Conditional SIMD Packed Loads and Stores
5193 multiclass avx_movmask_rm<bits<8> opc_rm, bits<8> opc_mr, string OpcodeStr> {
5194 def rm : AVX8I<opc_rm, MRMSrcMem, (outs VR128:$dst),
5195 (ins VR128:$src1, f128mem:$src2),
5196 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5198 def Yrm : AVX8I<opc_rm, MRMSrcMem, (outs VR256:$dst),
5199 (ins VR256:$src1, f256mem:$src2),
5200 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5202 def mr : AVX8I<opc_mr, MRMDestMem, (outs),
5203 (ins f128mem:$dst, VR128:$src1, VR128:$src2),
5204 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5206 def Ymr : AVX8I<opc_mr, MRMDestMem, (outs),
5207 (ins f256mem:$dst, VR256:$src1, VR256:$src2),
5208 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5212 defm VMASKMOVPS : avx_movmask_rm<0x2C, 0x2E, "vmaskmovps">;
5213 defm VMASKMOVPD : avx_movmask_rm<0x2D, 0x2F, "vmaskmovpd">;
5215 // Permute Floating-Point Values
5216 multiclass avx_permil<bits<8> opc_rm, bits<8> opc_rmi, string OpcodeStr,
5217 RegisterClass RC, X86MemOperand x86memop> {
5218 def rr : AVX8I<opc_rm, MRMSrcReg, (outs RC:$dst),
5219 (ins RC:$src1, RC:$src2),
5220 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5222 def rm : AVX8I<opc_rm, MRMSrcMem, (outs RC:$dst),
5223 (ins RC:$src1, x86memop:$src2),
5224 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5226 def ri : AVXAIi8<opc_rmi, MRMSrcReg, (outs RC:$dst),
5227 (ins RC:$src1, i8imm:$src2),
5228 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5230 def mi : AVXAIi8<opc_rmi, MRMSrcMem, (outs RC:$dst),
5231 (ins x86memop:$src1, i8imm:$src2),
5232 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5236 defm VPERMILPS : avx_permil<0x0C, 0x04, "vpermilps", VR128, f128mem>;
5237 defm VPERMILPSY : avx_permil<0x0C, 0x04, "vpermilps", VR256, f256mem>;
5238 defm VPERMILPD : avx_permil<0x0D, 0x05, "vpermilpd", VR128, f128mem>;
5239 defm VPERMILPDY : avx_permil<0x0D, 0x05, "vpermilpd", VR256, f256mem>;
5241 def VPERM2F128rr : AVXAIi8<0x06, MRMSrcReg, (outs VR256:$dst),
5242 (ins VR256:$src1, VR256:$src2, i8imm:$src3),
5243 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5245 def VPERM2F128rm : AVXAIi8<0x06, MRMSrcMem, (outs VR256:$dst),
5246 (ins VR256:$src1, f256mem:$src2, i8imm:$src3),
5247 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5250 // Zero All YMM registers
5251 def VZEROALL : I<0x77, RawFrm, (outs), (ins), "vzeroall", []>, VEX, VEX_L,
5254 // Zero Upper bits of YMM registers
5255 def VZEROUPPER : I<0x77, RawFrm, (outs), (ins), "vzeroupper", []>, VEX,
5258 } // isAsmParserOnly